Compare commits

...

39 commits

Author SHA1 Message Date
GitHub Actions Bot
9c158240db update M3U8 2026-04-18 13:02:15 -04:00
GitHub Actions Bot
e42ac2187a health log 2026-04-18 12:11:31 -04:00
GitHub Actions Bot
08be0c463f update M3U8 2026-04-18 12:02:47 -04:00
GitHub Actions Bot
608a6fe16c health log 2026-04-18 15:05:23 +00:00
GitHub Actions Bot
be75371ab3 update M3U8 2026-04-18 11:02:31 -04:00
GitHub Actions Bot
b00df36aa1 update M3U8 2026-04-18 10:02:07 -04:00
GitHub Actions Bot
6e0ae84ee6 update EPG 2026-04-18 11:21:41 +00:00
GitHub Actions Bot
f45789051c health log 2026-04-18 09:11:11 +00:00
GitHub Actions Bot
1df3e6a796 update M3U8 2026-04-18 01:38:00 -04:00
GitHub Actions Bot
fccc3a3b83 health log 2026-04-18 01:37:47 -04:00
doms9
00000d967c e
- remove xstreameast.py
- add totalsportek mirror
2026-04-18 01:29:27 -04:00
GitHub Actions Bot
b98c834004 update EPG 2026-04-18 04:57:37 +00:00
GitHub Actions Bot
5a215666f2 health log 2026-04-18 04:44:19 +00:00
GitHub Actions Bot
9f9db85538 update M3U8 2026-04-17 23:30:23 -04:00
GitHub Actions Bot
cf7d0b0df2 update M3U8 2026-04-17 23:01:02 -04:00
GitHub Actions Bot
a21bfccad9 update M3U8 2026-04-17 22:31:25 -04:00
GitHub Actions Bot
008005e62d update M3U8 2026-04-17 22:00:44 -04:00
GitHub Actions Bot
e9fc5ef75a update M3U8 2026-04-17 21:30:58 -04:00
GitHub Actions Bot
54a10b4e4e update M3U8 2026-04-17 21:01:12 -04:00
GitHub Actions Bot
2b125f7c0b update M3U8 2026-04-17 20:31:32 -04:00
GitHub Actions Bot
3dc0f7f356 update M3U8 2026-04-17 20:00:43 -04:00
GitHub Actions Bot
e5ffd20c89 update M3U8 2026-04-17 19:30:56 -04:00
GitHub Actions Bot
9bbb6d17a5 update M3U8 2026-04-17 19:01:12 -04:00
GitHub Actions Bot
2dc3b1c4f2 update M3U8 2026-04-17 18:31:14 -04:00
GitHub Actions Bot
9940dd5087 update M3U8 2026-04-17 18:00:37 -04:00
GitHub Actions Bot
dee7a3ed3b health log 2026-04-17 17:55:03 -04:00
GitHub Actions Bot
1568000003 update M3U8 2026-04-17 17:31:09 -04:00
GitHub Actions Bot
fcda459f6b health log 2026-04-17 21:09:06 +00:00
GitHub Actions Bot
0d5659a70b update M3U8 2026-04-17 17:00:36 -04:00
GitHub Actions Bot
3a64e39f4a update M3U8 2026-04-17 16:31:00 -04:00
GitHub Actions Bot
7e54363a8f update M3U8 2026-04-17 16:01:28 -04:00
GitHub Actions Bot
73c4a235ec update EPG 2026-04-17 19:44:56 +00:00
GitHub Actions Bot
5d22ba3cf6 update M3U8 2026-04-17 15:31:04 -04:00
GitHub Actions Bot
a783f1098e update M3U8 2026-04-17 15:01:26 -04:00
GitHub Actions Bot
5ca44cc1b7 update M3U8 2026-04-17 14:31:30 -04:00
doms9
00000d9cf1 e
- misc edits.
2026-04-17 14:29:46 -04:00
GitHub Actions Bot
714b781f4f update M3U8 2026-04-17 14:02:11 -04:00
GitHub Actions Bot
5fd6a793e7 update M3U8 2026-04-17 13:31:25 -04:00
doms9
00000d929d e
- change scraping method for streamhub.py
2026-04-17 13:09:13 -04:00
10 changed files with 126480 additions and 125838 deletions

File diff suppressed because it is too large Load diff

245961
M3U8/TV.xml

File diff suppressed because one or more lines are too long

View file

@ -306,8 +306,8 @@ http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/749
#EXTINF:-1 tvg-chno="102" tvg-id="NBA.TV.HD.us2" tvg-name="NBA TV" tvg-logo="http://schedulesdirect-api20141201-logos.s3.dualstack.us-east-1.amazonaws.com/stationLogos/s32281_dark_360w_270h.png" group-title="TV",NBA TV
http://212.102.60.231/NBA_TV/index.m3u8
#EXTINF:-1 tvg-chno="103" tvg-id="WNBC-DT.us_locals1" tvg-name="NBC" tvg-logo="http://schedulesdirect-api20141201-logos.s3.dualstack.us-east-1.amazonaws.com/stationLogos/s10991_dark_360w_270h.png" group-title="TV",NBC
http://stream.cammonitorplus.net/1812/index.m3u8
#EXTINF:-1 tvg-chno="103" tvg-id="WTVJ-DT.us_locals1" tvg-name="NBC" tvg-logo="http://schedulesdirect-api20141201-logos.s3.dualstack.us-east-1.amazonaws.com/stationLogos/s10991_dark_360w_270h.png" group-title="TV",NBC
http://stream.cammonitorplus.net/1804/index.m3u8
#EXTINF:-1 tvg-chno="104" tvg-id="NBC.Sports.Bay.Area.HD.us2" tvg-name="NBC Sports Bay Area" tvg-logo="http://schedulesdirect-api20141201-logos.s3.dualstack.us-east-1.amazonaws.com/stationLogos/s63138_dark_360w_270h.png" group-title="TV",NBC Sports Bay Area
http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/45785

File diff suppressed because it is too large Load diff

View file

@ -24,7 +24,6 @@ from scrapers import (
tvapp,
watchfooty,
webcast,
xstreameast,
)
from scrapers.utils import get_logger, network
@ -65,6 +64,7 @@ async def main() -> None:
asyncio.create_task(embedhd.scrape(hdl_brwsr)),
asyncio.create_task(ppv.scrape(xtrnl_brwsr)),
asyncio.create_task(roxie.scrape(hdl_brwsr)),
asyncio.create_task(streamhub.scrape(xtrnl_brwsr)),
]
httpx_tasks = [
@ -75,13 +75,11 @@ async def main() -> None:
asyncio.create_task(pawa.scrape()),
asyncio.create_task(shark.scrape()),
asyncio.create_task(streamcenter.scrape()),
asyncio.create_task(streamhub.scrape()),
asyncio.create_task(streamsgate.scrape()),
asyncio.create_task(streamtpnew.scrape()),
asyncio.create_task(totalsportek.scrape()),
asyncio.create_task(tvapp.scrape()),
asyncio.create_task(webcast.scrape()),
# asyncio.create_task(xstreameast.scrape()),
]
await asyncio.gather(*(pw_tasks + httpx_tasks))
@ -117,7 +115,6 @@ async def main() -> None:
| tvapp.urls
| watchfooty.urls
| webcast.urls
| xstreameast.urls
)
live_events: list[str] = []

View file

@ -1,8 +1,8 @@
import asyncio
import re
from functools import partial
from urllib.parse import urljoin, urlparse
from urllib.parse import urljoin
from playwright.async_api import Browser, Page, TimeoutError
from selectolax.parser import HTMLParser
from .utils import Cache, Time, get_logger, leagues, network
@ -34,59 +34,86 @@ SPORT_ENDPOINTS = [
]
async def process_event(url: str, url_num: int) -> tuple[str | None, str | None]:
if not (event_data := await network.request(url, log=log)):
log.warning(f"URL {url_num}) Failed to load url.")
return
async def process_event(
url: str,
url_num: int,
page: Page,
) -> str | None:
soup_1 = HTMLParser(event_data.content)
captured: list[str] = []
ifr_1 = soup_1.css_first("iframe#playerIframe")
got_one = asyncio.Event()
if not ifr_1 or not (src := ifr_1.attributes.get("src")):
log.warning(f"URL {url_num}) No iframe element found.")
return
parsed = urlparse(src)
ifr_1_src = urljoin(
BASE_URL,
f"embed1/{parsed.path.split('/')[-1].split('_')[0]}.php",
handler = partial(
network.capture_req,
captured=captured,
got_one=got_one,
)
if not (
ifr_1_src_data := await network.request(
ifr_1_src,
headers={"Referer": url},
log=log,
page.on("request", handler)
try:
resp = await page.goto(
url,
wait_until="domcontentloaded",
timeout=6_000,
)
):
log.warning(f"URL {url_num}) Failed to load iframe source. (IFR1)")
if not resp or resp.status != 200:
log.warning(
f"URL {url_num}) Status Code: {resp.status if resp else 'None'}"
)
return
try:
btn = page.locator("button.btn.btn-sm.btn-success.streamLink")
iframe_src = await btn.get_attribute("data-src", timeout=1_250)
except TimeoutError:
log.warning(f"URL {url_num}) No iframe source found.")
return
await page.goto(
iframe_src,
wait_until="domcontentloaded",
timeout=5_000,
)
wait_task = asyncio.create_task(got_one.wait())
try:
await asyncio.wait_for(wait_task, timeout=10)
except asyncio.TimeoutError:
log.warning(f"URL {url_num}) Timed out waiting for M3U8.")
return
finally:
if not wait_task.done():
wait_task.cancel()
try:
await wait_task
except asyncio.CancelledError:
pass
if captured:
log.info(f"URL {url_num}) Captured M3U8")
return captured[0]
log.warning(f"URL {url_num}) No M3U8 captured after waiting.")
return
soup_2 = HTMLParser(ifr_1_src_data.content)
except Exception as e:
log.warning(f"URL {url_num}) {e}")
ifr_2 = soup_2.css_first("center iframe")
if not ifr_2 or not (ifr_2_src := ifr_2.attributes.get("src")):
log.warning(f"URL {url_num}) Unable to locate iframe. (IFR2)")
return
ifr_2_src = f"https:{ifr_2_src}" if ifr_2_src.startswith("//") else ifr_2_src
if not (ifr_2_src_data := await network.request(ifr_2_src, log=log)):
log.warning(f"URL {url_num}) Failed to load iframe source.")
return
valid_m3u8 = re.compile(r"src:\s+(\'|\")([^\']+)(\'|\")", re.I)
if not (match := valid_m3u8.search(ifr_2_src_data.text)):
log.warning(f"URL {url_num}) No source found.")
return
log.info(f"URL {url_num}) Captured M3U8")
return match[2]
finally:
page.remove_listener("request", handler)
async def refresh_html_cache(
@ -185,7 +212,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
return live
async def scrape() -> None:
async def scrape(browser: Browser) -> None:
cached_urls = CACHE_FILE.load()
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
@ -200,48 +227,51 @@ async def scrape() -> None:
if events := await get_events(cached_urls.keys()):
log.info(f"Processing {len(events)} new URL(s)")
async with network.event_context(browser) as context:
for i, ev in enumerate(events, start=1):
async with network.event_page(context) as page:
for i, ev in enumerate(events, start=1):
handler = partial(
process_event,
url=(link := ev["link"]),
url_num=i,
page=page,
)
handler = partial(
process_event,
url=(link := ev["link"]),
url_num=i,
)
url = await network.safe_process(
handler,
url_num=i,
semaphore=network.PW_S,
log=log,
)
url = await network.safe_process(
handler,
url_num=i,
semaphore=network.PW_S,
log=log,
)
sport, event, ts = (
ev["sport"],
ev["event"],
ev["event_ts"],
)
sport, event, ts = (
ev["sport"],
ev["event"],
ev["event_ts"],
)
key = f"[{sport}] {event} ({TAG})"
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": "https://hardsmart.click",
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
entry = {
"url": url,
"logo": logo,
"base": "https://hardsmart.click",
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
"UA": "curl/8.19.0",
}
cached_urls[key] = entry
cached_urls[key] = entry
if url:
valid_count += 1
if url:
valid_count += 1
entry["url"] = url.split("?st")[0]
urls[key] = entry
urls[key] = entry
log.info(f"Collected and cached {valid_count - cached_count} new event(s)")

View file

@ -11,11 +11,12 @@ log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
TAG = "TOTALSPRTK"
CACHE_FILE = Cache("TSPRTK", exp=28_800)
CACHE_FILE = Cache(TAG, exp=28_800)
BASE_URL = "https://live3.totalsportek.fyi"
BASES = {
"TSPRTK1": "https://live.totalsportek.fyi",
"TSPRTK3": "https://live3.totalsportek.fyi",
}
def fix_txt(s: str) -> str:
@ -24,36 +25,43 @@ def fix_txt(s: str) -> str:
return s.upper() if s.islower() else s
async def process_event(url: str, url_num: int) -> str | None:
if not (event_data := await network.request(url, log=log)):
log.warning(f"URL {url_num}) Failed to load url.")
async def process_ts1(ifr_src: str, url_num: int) -> str | None:
if not (ifr_src_data := await network.request(ifr_src, log=log)):
log.info(f"URL {url_num}) Failed to load iframe source.")
return
soup_1 = HTMLParser(event_data.content)
valid_m3u8 = re.compile(r'(var|const)\s+(\w+)\s*=\s*"([^"]*)"', re.I)
iframe_1 = soup_1.css_first("iframe")
if not iframe_1 or not (iframe_1_src := iframe_1.attributes.get("src")):
log.warning(f"URL {url_num}) No iframe element found. (IFR1)")
if not (match := valid_m3u8.search(ifr_src_data.text)):
log.warning(f"URL {url_num}) No Clappr source found.")
return
if not (iframe_1_src_data := await network.request(iframe_1_src, log=log)):
if len(encoded := match[2]) < 20:
encoded = match[3]
log.info(f"URL {url_num}) Captured M3U8")
return bytes.fromhex(encoded).decode("utf-8")
async def process_ts3(ifr_src: str, url_num: int) -> str | None:
if not (ifr_1_src_data := await network.request(ifr_src, log=log)):
log.warning(f"URL {url_num}) Failed to load iframe source. (IFR1)")
return
soup_2 = HTMLParser(iframe_1_src_data.content)
soup_2 = HTMLParser(ifr_1_src_data.content)
iframe_2 = soup_2.css_first("iframe")
ifr_2 = soup_2.css_first("iframe")
if not iframe_2 or not (iframe_2_src := iframe_2.attributes.get("src")):
if not ifr_2 or not (ifr_2_src := ifr_2.attributes.get("src")):
log.warning(f"URL {url_num}) No iframe element found. (IFR2)")
return
if not (
iframe_2_src_data := await network.request(
iframe_2_src,
ifr_2_src_data := await network.request(
ifr_2_src,
headers={"Referer": ifr_src},
log=log,
headers={"Referer": iframe_1_src},
)
):
log.warning(f"URL {url_num}) Failed to load iframe source. (IFR2)")
@ -61,7 +69,7 @@ async def process_event(url: str, url_num: int) -> str | None:
valid_m3u8 = re.compile(r'currentStreamUrl\s+=\s+"([^"]*)"', re.I)
if not (match := valid_m3u8.search(iframe_2_src_data.text)):
if not (match := valid_m3u8.search(ifr_2_src_data.text)):
log.warning(f"URL {url_num}) No Clappr source found.")
return
@ -70,52 +78,74 @@ async def process_event(url: str, url_num: int) -> str | None:
return json.loads(f'"{match[1]}"')
async def process_event(url: str, url_num: int, tag: str) -> str | None:
if not (event_data := await network.request(url, log=log)):
log.warning(f"URL {url_num}) Failed to load url.")
return
soup = HTMLParser(event_data.content)
iframe = soup.css_first("iframe")
if not iframe or not (iframe_src := iframe.attributes.get("src")):
log.warning(f"URL {url_num}) No valid iframe source found.")
return
return (
await process_ts1(iframe_src, url_num)
if tag == "TSPRTK1"
else await process_ts3(iframe_src, url_num)
)
async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
events = []
if not (html_data := await network.request(BASE_URL, log=log)):
if not (html_data := await network.request(BASES["TSPRTK1"], log=log)):
return events
soup = HTMLParser(html_data.content)
sport = "Live Event"
for node in soup.css("a"):
if not node.attributes.get("class"):
continue
for tag, url in BASES.items():
for node in soup.css("a"):
if not node.attributes.get("class"):
continue
if (parent := node.parent) and "my-1" in parent.attributes.get("class", ""):
if span := node.css_first("span"):
sport = span.text(strip=True)
if (parent := node.parent) and "my-1" in parent.attributes.get("class", ""):
if span := node.css_first("span"):
sport = span.text(strip=True)
sport = fix_txt(sport)
sport = fix_txt(sport)
if not (teams := [t.text(strip=True) for t in node.css(".col-7 .col-12")]):
continue
if not (teams := [t.text(strip=True) for t in node.css(".col-7 .col-12")]):
continue
if not (href := node.attributes.get("href")):
continue
if not (href := node.attributes.get("href")):
continue
href = urlparse(href).path if href.startswith("http") else href
href = urlparse(href).path if href.startswith("http") else href
if not (time_node := node.css_first(".col-3 span")):
continue
if not (time_node := node.css_first(".col-3 span")):
continue
if time_node.text(strip=True).lower() != "matchstarted":
continue
if time_node.text(strip=True).lower() != "matchstarted":
continue
event_name = fix_txt(" vs ".join(teams))
event_name = fix_txt(" vs ".join(teams))
if f"[{sport}] {event_name} ({TAG})" in cached_keys:
continue
if f"[{sport}] {event_name} ({tag})" in cached_keys:
continue
events.append(
{
"sport": sport,
"event": event_name,
"link": urljoin(f"{html_data.url}", href),
}
)
events.append(
{
"sport": sport,
"event": event_name,
"tag": tag,
"link": urljoin(url, href),
}
)
return events
@ -131,7 +161,7 @@ async def scrape() -> None:
log.info(f"Loaded {cached_count} event(s) from cache")
log.info(f'Scraping from "{BASE_URL}"')
log.info('Scraping from "https://live.totalsportek.fyi"')
if events := await get_events(cached_urls.keys()):
log.info(f"Processing {len(events)} new URL(s)")
@ -143,6 +173,7 @@ async def scrape() -> None:
process_event,
url=(link := ev["link"]),
url_num=i,
tag=(tag := ev["tag"]),
)
url = await network.safe_process(
@ -154,7 +185,7 @@ async def scrape() -> None:
sport, event = ev["sport"], ev["event"]
key = f"[{sport}] {event} ({TAG})"
key = f"[{sport}] {event} ({tag})"
tvg_id, logo = leagues.get_tvg_info(sport, event)

View file

@ -45,6 +45,8 @@ async def get_events() -> list[dict[str, str]]:
if not (html_data := await network.request(BASE_URL, log=log)):
return events
now = Time.clean(Time.now())
soup = HTMLParser(html_data.content)
for row in soup.css(".row"):
@ -55,9 +57,14 @@ async def get_events() -> list[dict[str, str]]:
continue
for a in row.css("a.list-group-item[href]"):
splits = a.text(strip=True).split(":")
x, y = a.text(strip=True).split(":", 1)
event_name = ":".join(splits[:2]).split("@")[0].strip()
event_name = x.split("@")[0].strip()
event_dt = Time.from_str(y.split(":", 1)[-1], timezone="UTC")
if event_dt.date() != now.date():
continue
if not (href := a.attributes.get("href")):
continue
@ -67,6 +74,7 @@ async def get_events() -> list[dict[str, str]]:
"sport": sport,
"event": event_name,
"link": urljoin(f"{html_data.url}", href),
"timestamp": now.timestamp(),
}
)
@ -86,8 +94,6 @@ async def scrape() -> None:
if events := await get_events():
log.info(f"Processing {len(events)} URL(s)")
now = Time.clean(Time.now())
for i, ev in enumerate(events, start=1):
handler = partial(
process_event,
@ -102,7 +108,11 @@ async def scrape() -> None:
log=log,
)
sport, event = ev["sport"], ev["event"]
sport, event, ts = (
ev["sport"],
ev["event"],
ev["timestamp"],
)
key = f"[{sport}] {event} ({TAG})"
@ -112,7 +122,7 @@ async def scrape() -> None:
"url": url,
"logo": logo,
"base": BASE_URL,
"timestamp": now.timestamp(),
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}

View file

@ -1,179 +0,0 @@
import asyncio
import re
from functools import partial
from urllib.parse import urljoin
from selectolax.parser import HTMLParser
from .utils import Cache, Time, get_logger, leagues, network
log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
TAG = "XSTRMEST"
CACHE_FILE = Cache(TAG, exp=10_800)
BASE_URL = "https://xstreameast.com"
SPORT_URLS = [
urljoin(BASE_URL, f"categories/{sport}/")
for sport in [
# "mlb",
"mma",
"nba",
# "nfl",
# "nhl",
"soccer",
"wwe",
]
]
async def process_event(url: str, url_num: int) -> tuple[str | None, str | None]:
nones = None, None
if not (html_data := await network.request(url, log=log)):
log.warning(f"URL {url_num}) Failed to load url.")
return nones
soup = HTMLParser(html_data.content)
iframe = soup.css_first("iframe")
if not iframe or not (iframe_src := iframe.attributes.get("src")):
log.warning(f"URL {url_num}) No iframe element found.")
return nones
elif iframe_src == "about:blank":
log.warning(f"URL {url_num}) No iframe element found.")
return nones
if not (iframe_src_data := await network.request(iframe_src, log=log)):
log.warning(f"URL {url_num}) Failed to load iframe source.")
return nones
valid_m3u8 = re.compile(r'(var|const)\s+(\w+)\s*=\s*"([^"]*)"', re.I)
if not (match := valid_m3u8.search(iframe_src_data.text)):
log.warning(f"URL {url_num}) No Clappr source found.")
return nones
if len(encoded := match[2]) < 20:
encoded = match[3]
log.info(f"URL {url_num}) Captured M3U8")
return bytes.fromhex(encoded).decode("utf-8"), iframe_src
async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
tasks = [network.request(url, log=log) for url in SPORT_URLS]
results = await asyncio.gather(*tasks)
events = []
if not (soups := [HTMLParser(html.content) for html in results if html]):
return events
sport = "Live Event"
for soup in soups:
if sport_header := soup.css_first("h1.text-3xl"):
header = sport_header.text(strip=True)
sport = header.split("Streams")[0].strip()
for card in soup.css("article.game-card"):
if not (team_elem := card.css_first("h2.text-xl.font-semibold")):
continue
if not (link_elem := card.css_first("a.stream-button")) or not (
href := link_elem.attributes.get("href")
):
continue
if (
not (live_badge := card.css_first("span.bg-green-600"))
or live_badge.text(strip=True) != "LIVE"
):
continue
event_name = team_elem.text(strip=True)
if f"[{sport}] {event_name} ({TAG})" in cached_keys:
continue
events.append(
{
"sport": sport,
"event": event_name,
"link": href,
}
)
return events
async def scrape() -> None:
cached_urls = CACHE_FILE.load()
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
log.info(f'Scraping from "{BASE_URL}"')
if events := await get_events(cached_urls.keys()):
log.info(f"Processing {len(events)} new URL(s)")
now = Time.clean(Time.now())
for i, ev in enumerate(events, start=1):
handler = partial(
process_event,
url=(link := ev["link"]),
url_num=i,
)
url, iframe = await network.safe_process(
handler,
url_num=i,
semaphore=network.HTTP_S,
log=log,
)
sport, event = ev["sport"], ev["event"]
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": iframe,
"timestamp": now.timestamp(),
"id": tvg_id or "Live.Event.us",
"link": link,
}
cached_urls[key] = entry
if url:
valid_count += 1
urls[key] = entry
log.info(f"Collected and cached {valid_count - cached_count} new event(s)")
else:
log.info("No new events found")
CACHE_FILE.write(cached_urls)

View file

@ -1,16 +1,16 @@
## Base Log @ 2026-04-17 16:09 UTC
## Base Log @ 2026-04-18 16:10 UTC
### ✅ Working Streams: 151<br>❌ Dead Streams: 10
| Channel | Error (Code) | Link |
| ------- | ------------ | ---- |
| Discovery Life | HTTP Error (502) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/201208` |
| FDSN SoCal | HTTP Error (404) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/296681` |
| FDSN Southeast | HTTP Error (404) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/82301` |
| FX Movie Channel | HTTP Error (404) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/39873` |
| getTV | HTTP Error (404) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/194187` |
| Grit TV | HTTP Error (502) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/15086` |
| Hallmark Family | HTTP Error (502) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/304609` |
| Hallmark Mystery | HTTP Error (502) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/3388` |
| HBO Family | HTTP Error (404) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/760` |
| INSP | HTTP Error (502) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/30900` |
| TLC | HTTP Error (502) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/2362` |
| TV Land | HTTP Error (502) | `http://aflaxtv.xyz:8080/mitrovic/19106b7cb4/2364` |