Compare commits

..

No commits in common. "00000d98127ffac1d35187e40438940da517df2b" and "61b05a758782881b93f3f9111fce39f3d761dec8" have entirely different histories.

18 changed files with 119150 additions and 118712 deletions

File diff suppressed because it is too large Load diff

235535
M3U8/TV.xml

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -67,11 +67,11 @@ async def main() -> None:
asyncio.create_task(ppv.scrape(xtrnl_brwsr)),
asyncio.create_task(sport9.scrape(xtrnl_brwsr)),
asyncio.create_task(streamcenter.scrape(xtrnl_brwsr)),
asyncio.create_task(streamhub.scrape(xtrnl_brwsr)),
# asyncio.create_task(streamhub.scrape(xtrnl_brwsr)),
asyncio.create_task(streamsgate.scrape(xtrnl_brwsr)),
asyncio.create_task(totalsportek.scrape(hdl_brwsr)),
asyncio.create_task(tvapp.scrape(hdl_brwsr)),
asyncio.create_task(webcast.scrape(hdl_brwsr)),
# asyncio.create_task(webcast.scrape(hdl_brwsr)),
]
httpx_tasks = [

View file

@ -25,7 +25,7 @@ async def process_event(url: str, url_num: int) -> str | None:
valid_m3u8 = re.compile(
r'var\s+(\w+)\s*=\s*\[["\']?(https?:\/\/[^"\'\s>]+\.m3u8(?:\?[^"\'\s>]*)?)["\']\]?',
re.I,
re.IGNORECASE,
)
if not (match := valid_m3u8.search(html_data.text)):
@ -89,11 +89,9 @@ async def scrape() -> None:
cached_hrefs = {entry["href"] for entry in cached_urls.values()}
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
cached_count = len(cached_urls)
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
@ -120,6 +118,7 @@ async def scrape() -> None:
log=log,
)
if url:
sport, event, link = (
ev["sport"],
ev["event"],
@ -140,14 +139,9 @@ async def scrape() -> None:
"link": link,
}
cached_urls[key] = entry
urls[key] = cached_urls[key] = entry
if url:
valid_count += 1
urls[key] = entry
if new_count := valid_count - cached_count:
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:

View file

@ -40,7 +40,7 @@ async def process_event(url: str, url_num: int) -> str | None:
return
pattern = re.compile(r"source:\s*window\.atob\(\s*'([^']+)'\s*\)", re.I)
pattern = re.compile(r"source:\s*window\.atob\(\s*'([^']+)'\s*\)", re.IGNORECASE)
if not (match := pattern.search(iframe_src_data.text)):
log.warning(f"URL {url_num}) No Clappr source found.")
@ -58,7 +58,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not (html_data := await network.request(BASE_URL, log=log)):
return events
pattern = re.compile(r"^(?:LIVE|(?:[1-9]|[12]\d|30)\s+minutes?\b)", re.I)
pattern = re.compile(r"^(?:LIVE|(?:[1-9]|[12]\d|30)\s+minutes?\b)", re.IGNORECASE)
soup = HTMLParser(html_data.content)
@ -106,11 +106,9 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
async def scrape() -> None:
cached_urls = CACHE_FILE.load()
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
cached_count = len(cached_urls)
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
@ -137,6 +135,7 @@ async def scrape() -> None:
log=log,
)
if url:
sport, event, link = (
ev["sport"],
ev["event"],
@ -156,14 +155,9 @@ async def scrape() -> None:
"link": link,
}
cached_urls[key] = entry
urls[key] = cached_urls[key] = entry
if url:
valid_count += 1
urls[key] = entry
if new_count := valid_count - cached_count:
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:

View file

@ -184,7 +184,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append(v)
live.append({**v})
return live

View file

@ -41,7 +41,7 @@ async def process_event(url: str, url_num: int) -> str | None:
return
pattern = re.compile(r"source:\s*window\.atob\(\s*'([^']+)'\s*\)", re.I)
pattern = re.compile(r"source:\s*window\.atob\(\s*'([^']+)'\s*\)", re.IGNORECASE)
if not (match := pattern.search(iframe_src_data.text)):
log.warning(f"URL {url_num}) No Clappr source found.")
@ -89,11 +89,9 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
async def scrape() -> None:
cached_urls = CACHE_FILE.load()
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
cached_count = len(cached_urls)
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
@ -120,6 +118,7 @@ async def scrape() -> None:
log=log,
)
if url:
sport, event, link = (
ev["sport"],
ev["event"],
@ -139,14 +138,9 @@ async def scrape() -> None:
"link": link,
}
cached_urls[key] = entry
urls[key] = cached_urls[key] = entry
if url:
valid_count += 1
urls[key] = entry
if new_count := valid_count - cached_count:
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:

View file

@ -34,7 +34,7 @@ async def process_event(url: str, url_num: int) -> str | None:
if not (html_data := await network.request(url, log=log)):
return
valid_m3u8 = re.compile(r"'clappr',\s+'([^\"]*)'", re.I)
valid_m3u8 = re.compile(r"'clappr',\s+'([^\"]*)'", re.IGNORECASE)
if not (match := valid_m3u8.search(html_data.text)):
log.info(f"URL {url_num}) No M3U8 found")
@ -125,7 +125,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append(v)
live.append({**v})
return live
@ -133,11 +133,9 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
async def scrape() -> None:
cached_urls = CACHE_FILE.load()
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
cached_count = len(cached_urls)
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
@ -162,6 +160,7 @@ async def scrape() -> None:
log=log,
)
if url:
sport, event, ts, link = (
ev["sport"],
ev["event"],
@ -182,14 +181,9 @@ async def scrape() -> None:
"link": link,
}
cached_urls[key] = entry
urls[key] = cached_urls[key] = entry
if url:
valid_count += 1
urls[key] = entry
if new_count := valid_count - cached_count:
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:

View file

@ -44,7 +44,7 @@ async def refresh_html_cache(now_ts: float) -> dict[str, dict[str, str | float]]
if not (html_data := await network.request(BASE_URL, log=log)):
return events
pattern = re.compile(r"openEmbed\('([^']+)'\)", re.I)
pattern = re.compile(r"openEmbed\('([^']+)'\)", re.IGNORECASE)
soup = HTMLParser(html_data.content)
@ -106,7 +106,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append(v)
live.append({**v})
return live

View file

@ -45,7 +45,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
for card in soup.css("a.match-card"):
live_badge = card.css_first(".live-badge")
if not live_badge or live_badge.text(strip=True).lower() != "live":
if not live_badge or live_badge.text(strip=True) != "Live":
continue
if not (sport_node := card.css_first(".tournament-name")):

View file

@ -19,7 +19,7 @@ BASE_URL = "https://hiteasport.info"
def fix_league(s: str) -> str:
pattern = re.compile(r"^\w*-\w*", re.I)
pattern = re.compile(r"^\w*-\w*", re.IGNORECASE)
return " ".join(s.split("-")) if pattern.search(s) else s
@ -28,7 +28,7 @@ async def process_event(url: str, url_num: int) -> str | None:
if not (html_data := await network.request(url, log=log)):
return
valid_m3u8 = re.compile(r'var\s+(\w+)\s*=\s*"([^"]*)"', re.I)
valid_m3u8 = re.compile(r'var\s+(\w+)\s*=\s*"([^"]*)"', re.IGNORECASE)
if not (match := valid_m3u8.search(html_data.text)):
log.info(f"URL {url_num}) No M3U8 found")

View file

@ -130,7 +130,7 @@ async def get_events(url: str, cached_keys: list[str]) -> list[dict[str, str]]:
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append(v)
live.append({**v})
return live

View file

@ -25,7 +25,7 @@ def fix_url(s: str) -> str:
return urljoin(f"http://{base}", parsed.path.replace("tracks-v1a1/", ""))
async def get_events() -> list[dict[str, str]]:
async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
events = []
if not (html_data := await network.request(BASE_URL, log=log)):
@ -45,6 +45,9 @@ async def get_events() -> list[dict[str, str]]:
for a in row.css("a.list-group-item[href]"):
event_name = a.text(strip=True).split(":", 1)[0]
if f"[{sport}] {event_name} ({TAG})" in cached_keys:
continue
if not (href := a.attributes.get("href")):
continue
@ -60,16 +63,17 @@ async def get_events() -> list[dict[str, str]]:
async def scrape(browser: Browser) -> None:
if cached := CACHE_FILE.load():
urls.update(cached)
cached_urls = CACHE_FILE.load()
log.info(f"Loaded {len(urls)} event(s) from cache")
cached_count = len(cached_urls)
return
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
log.info(f'Scraping from "{BASE_URL}"')
events = await get_events()
events = await get_events(cached_urls.keys())
log.info(f"Processing {len(events)} new URL(s)")
@ -114,8 +118,12 @@ async def scrape(browser: Browser) -> None:
"link": link,
}
urls[key] = entry
urls[key] = cached_urls[key] = entry
log.info(f"Collected and cached {len(urls)} new event(s)")
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
CACHE_FILE.write(urls)
else:
log.info("No new events found")
CACHE_FILE.write(cached_urls)

View file

@ -28,6 +28,8 @@ class Network:
PW_S = asyncio.Semaphore(3)
proxy_base = "https://stream.nvrmind.xyz"
def __init__(self) -> None:
self.client = httpx.AsyncClient(
timeout=httpx.Timeout(5.0),

View file

@ -108,7 +108,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append(v)
live.append({**v})
return live

View file

@ -30,7 +30,7 @@ SPORT_ENDPOINTS = [
async def process_event(url: str, url_num: int) -> tuple[str | None, str | None]:
valid_m3u8 = re.compile(r'(var|const)\s+(\w+)\s*=\s*"([^"]*)"', re.I)
valid_m3u8 = re.compile(r'(var|const)\s+(\w+)\s*=\s*"([^"]*)"', re.IGNORECASE)
nones = None, None

View file

@ -1,10 +1,11 @@
## Base Log @ 2026-02-13 04:32 UTC
## Base Log @ 2026-02-12 04:35 UTC
### ✅ Working Streams: 145<br>❌ Dead Streams: 1
### ✅ Working Streams: 144<br>❌ Dead Streams: 2
| Channel | Error (Code) | Link |
| ------- | ------------ | ---- |
| Hallmark Mystery | HTTP Error (403) | `http://mytvstream.net:8080/live/30550113/30550113/10289.m3u8` |
| NFL RedZone | HTTP Error (000) | `http://mytvstream.net:8080/live/30550113/30550113/159118.m3u8` |
---
#### Base Channels URL
```