From 00000d9afd33aebeecd42b9698b040470432cffe Mon Sep 17 00:00:00 2001 From: doms9 <96013514+doms9@users.noreply.github.com> Date: Fri, 5 Dec 2025 17:29:19 -0500 Subject: [PATCH] e --- M3U8/fetch.py | 3 + M3U8/scrapers/roxie.py | 2 + M3U8/scrapers/shark.py | 2 + M3U8/scrapers/webcast.py | 179 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 186 insertions(+) create mode 100644 M3U8/scrapers/webcast.py diff --git a/M3U8/fetch.py b/M3U8/fetch.py index bd04992..2f610a4 100644 --- a/M3U8/fetch.py +++ b/M3U8/fetch.py @@ -17,6 +17,7 @@ from scrapers import ( strmd, tvpass, watchfooty, + webcast, ) from scrapers.utils import get_logger, network @@ -58,6 +59,7 @@ async def main() -> None: asyncio.create_task(strmd.scrape(network.client)), asyncio.create_task(tvpass.scrape(network.client)), asyncio.create_task(watchfooty.scrape(network.client)), + asyncio.create_task(webcast.scrape(network.client)), ] await asyncio.gather(*tasks) @@ -76,6 +78,7 @@ async def main() -> None: | streamfree.urls | tvpass.urls | watchfooty.urls + | webcast.urls ) live_events: list[str] = [] diff --git a/M3U8/scrapers/roxie.py b/M3U8/scrapers/roxie.py index 2b47e1f..db23726 100644 --- a/M3U8/scrapers/roxie.py +++ b/M3U8/scrapers/roxie.py @@ -62,6 +62,8 @@ async def refresh_html_cache( now_ts: float, ) -> dict[str, dict[str, str | float]]: + log.info("Refreshing HTML cache") + try: r = await client.get(url) r.raise_for_status() diff --git a/M3U8/scrapers/shark.py b/M3U8/scrapers/shark.py index 9875d15..73154c7 100644 --- a/M3U8/scrapers/shark.py +++ b/M3U8/scrapers/shark.py @@ -50,6 +50,8 @@ async def refresh_html_cache( now_ts: float, ) -> dict[str, dict[str, str | float]]: + log.info("Refreshing HTML cache") + try: r = await client.get(url) r.raise_for_status() diff --git a/M3U8/scrapers/webcast.py b/M3U8/scrapers/webcast.py new file mode 100644 index 0000000..039b0a5 --- /dev/null +++ b/M3U8/scrapers/webcast.py @@ -0,0 +1,179 @@ +import asyncio +from functools import partial + +import httpx +from playwright.async_api import async_playwright +from selectolax.parser import HTMLParser + +from .utils import Cache, Time, get_logger, leagues, network + +log = get_logger(__name__) + +urls: dict[str, dict[str, str | float]] = {} + +CACHE_FILE = Cache("webcast.json", exp=3_600) + +HTML_CACHE = Cache("webcast-html.json", exp=28_800) + +BASE_URLS = {"NFL": "https://nflwebcast.com", "NHL": "https://slapstreams.com"} + +TAG = "WEBCST" + + +def fix_event(s: str) -> str: + return " vs ".join(s.split("@")) + + +async def refresh_html_cache( + client: httpx.AsyncClient, url: str +) -> dict[str, dict[str, str | float]]: + log.info("Refreshing HTML cache") + + try: + r = await client.get(url) + r.raise_for_status() + except Exception as e: + log.error(f'Failed to fetch "{url}": {e}') + + return {} + + now = Time.now() + + soup = HTMLParser(r.content) + + events = {} + + title = soup.css_first("title").text(strip=True) + + sport = "NFL" if "NFL" in title else "NHL" + + date_text = now.strftime("%B %d, %Y") + + if date_row := soup.css_first("tr.mdatetitle"): + if mtdate_span := date_row.css_first("span.mtdate"): + date_text = mtdate_span.text(strip=True) + + for row in soup.css("tr.singele_match_date"): + if not (time_node := row.css_first("td.matchtime")): + continue + + time = time_node.text(strip=True) + + if not (vs_node := row.css_first("td.teamvs a")): + continue + + event_name = vs_node.text(strip=True) + + for span in vs_node.css("span.mtdate"): + date = span.text(strip=True) + + event_name = event_name.replace(date, "").strip() + + if not (href := vs_node.attributes.get("href")): + continue + + event_dt = Time.from_str(f"{date_text} {time} PM", timezone="EST") + + event = fix_event(event_name) + + key = f"[{sport}] {event} ({TAG})" + + events[key] = { + "sport": sport, + "event": event, + "link": href, + "event_ts": event_dt.timestamp(), + "timestamp": now.timestamp(), + } + + return events + + +async def get_events( + client: httpx.AsyncClient, cached_keys: set[str] +) -> list[dict[str, str]]: + now = Time.clean(Time.now()) + + if not (events := HTML_CACHE.load()): + tasks = [refresh_html_cache(client, url) for url in BASE_URLS.values()] + + results = await asyncio.gather(*tasks) + + events = {k: v for data in results for k, v in data.items()} + + HTML_CACHE.write(events) + + live = [] + + start_ts = now.delta(minutes=-30).timestamp() + end_ts = now.delta(minutes=30).timestamp() + + for k, v in events.items(): + if cached_keys & {k}: + continue + + if not start_ts <= v["event_ts"] <= end_ts: + continue + + live.append({**v}) + + return live + + +async def scrape(client: httpx.AsyncClient) -> None: + cached_urls = CACHE_FILE.load() + cached_count = len(cached_urls) + urls.update(cached_urls) + + log.info(f"Loaded {cached_count} event(s) from cache") + + log.info(f'Scraping from "{' & '.join(BASE_URLS.values())}"') + + events = await get_events(client, set(cached_urls.keys())) + + log.info(f"Processing {len(events)} new URL(s)") + + if events: + async with async_playwright() as p: + browser, context = await network.browser(p) + + for i, ev in enumerate(events, start=1): + handler = partial( + network.process_event, + url=ev["link"], + url_num=i, + context=context, + log=log, + ) + + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) + + if url: + sport, event, ts = ev["sport"], ev["event"], ev["event_ts"] + + key = f"[{sport}] {event} ({TAG})" + + tvg_id, logo = leagues.get_tvg_info(sport, event) + + entry = { + "url": url, + "logo": logo, + "base": BASE_URLS[sport], + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } + + urls[key] = cached_urls[key] = entry + + await browser.close() + + if new_count := len(cached_urls) - cached_count: + log.info(f"Collected and cached {new_count} new event(s)") + else: + log.info("No new events found") + + CACHE_FILE.write(cached_urls)