From 00000d90b75cf0476031ba51540f4cff8cd29f2a Mon Sep 17 00:00:00 2001 From: doms9 <96013514+doms9@users.noreply.github.com> Date: Wed, 29 Oct 2025 03:21:18 -0400 Subject: [PATCH] e --- M3U8/fetch.py | 3 + M3U8/scrapers/fstv.py | 54 ++++---- M3U8/scrapers/ppv.py | 58 +++++---- M3U8/scrapers/roxie.py | 226 ++++++++++++++++++++++++++++++++++ M3U8/scrapers/streambtw.py | 48 ++++---- M3U8/scrapers/streameast.py | 52 ++++---- M3U8/scrapers/streamed.py | 50 +++++--- M3U8/scrapers/strmd.py | 58 +++++---- M3U8/scrapers/strmfree.py | 58 +++++---- M3U8/scrapers/utils/config.py | 21 ++-- M3U8/scrapers/watchfooty.py | 60 +++++---- 11 files changed, 501 insertions(+), 187 deletions(-) create mode 100644 M3U8/scrapers/roxie.py diff --git a/M3U8/fetch.py b/M3U8/fetch.py index 4c0131d..5b003c7 100644 --- a/M3U8/fetch.py +++ b/M3U8/fetch.py @@ -5,6 +5,7 @@ from pathlib import Path from scrapers import ( fstv, ppv, + roxie, streambtw, streameast, streamed, @@ -40,6 +41,7 @@ async def main() -> None: tasks = [ asyncio.create_task(fstv.scrape(network.client)), asyncio.create_task(ppv.scrape(network.client)), + asyncio.create_task(roxie.scrape(network.client)), asyncio.create_task(streambtw.scrape(network.client)), asyncio.create_task(streameast.scrape(network.client)), asyncio.create_task(streamed.scrape(network.client)), @@ -54,6 +56,7 @@ async def main() -> None: additions = ( fstv.urls | ppv.urls + | roxie.urls | streambtw.urls | streameast.urls | streamed.urls diff --git a/M3U8/scrapers/fstv.py b/M3U8/scrapers/fstv.py index d2469c9..48013f1 100644 --- a/M3U8/scrapers/fstv.py +++ b/M3U8/scrapers/fstv.py @@ -134,32 +134,44 @@ async def scrape(client: httpx.AsyncClient) -> None: log.info(f"Processing {len(events)} new URL(s)") - now = Time.now().timestamp() + if events: + now = Time.now().timestamp() - for i, ev in enumerate(events, start=1): - handler = partial(process_event, client=client, url=ev["link"], url_num=i) - - match_name, url = await network.safe_process(handler, url_num=i, log=log) - - if url: - sport = ev["sport"] - - key = ( - f"[{sport}] {match_name} (FSTV)" if match_name else f"[{sport}] (FSTV)" + for i, ev in enumerate(events, start=1): + handler = partial( + process_event, + client=client, + url=ev["link"], + url_num=i, ) - tvg_id, logo = leagues.info(sport) + match_name, url = await network.safe_process( + handler, + url_num=i, + log=log, + ) - entry = { - "url": url, - "logo": logo, - "base": base_url, - "timestamp": now, - "id": tvg_id or "Live.Event.us", - "href": ev["href"], - } + if url: + sport = ev["sport"] - urls[key] = cached_urls[key] = entry + key = ( + f"[{sport}] {match_name} (FSTV)" + if match_name + else f"[{sport}] (FSTV)" + ) + + tvg_id, logo = leagues.info(sport) + + entry = { + "url": url, + "logo": logo, + "base": base_url, + "timestamp": now, + "id": tvg_id or "Live.Event.us", + "href": ev["href"], + } + + urls[key] = cached_urls[key] = entry if new_count := len(cached_urls) - cached_count: log.info(f"Collected and cached {new_count} new event(s)") diff --git a/M3U8/scrapers/ppv.py b/M3U8/scrapers/ppv.py index 056639b..caa105d 100644 --- a/M3U8/scrapers/ppv.py +++ b/M3U8/scrapers/ppv.py @@ -180,37 +180,47 @@ async def scrape(client: httpx.AsyncClient) -> None: log.info(f"Processing {len(events)} new URL(s)") - async with async_playwright() as p: - browser, context = await network.browser(p) + if events: + async with async_playwright() as p: + browser, context = await network.browser(p) - for i, ev in enumerate(events, start=1): - handler = partial(process_event, url=ev["link"], url_num=i, context=context) - - url = await network.safe_process(handler, url_num=i, log=log) - - if url: - sport, event, logo, ts = ( - ev["sport"], - ev["event"], - ev["logo"], - ev["timestamp"], + for i, ev in enumerate(events, start=1): + handler = partial( + process_event, + url=ev["link"], + url_num=i, + context=context, ) - key = f"[{sport}] {event} (PPV)" + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) - tvg_id, pic = leagues.get_tvg_info(sport, event) + if url: + sport, event, logo, ts = ( + ev["sport"], + ev["event"], + ev["logo"], + ev["timestamp"], + ) - entry = { - "url": url, - "logo": logo or pic, - "base": base_url, - "timestamp": ts, - "id": tvg_id or "Live.Event.us", - } + key = f"[{sport}] {event} (PPV)" - urls[key] = cached_urls[key] = entry + tvg_id, pic = leagues.get_tvg_info(sport, event) - await browser.close() + entry = { + "url": url, + "logo": logo or pic, + "base": base_url, + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } + + urls[key] = cached_urls[key] = entry + + await browser.close() if new_count := len(cached_urls) - cached_count: log.info(f"Collected and cached {new_count} new event(s)") diff --git a/M3U8/scrapers/roxie.py b/M3U8/scrapers/roxie.py new file mode 100644 index 0000000..8f0d07a --- /dev/null +++ b/M3U8/scrapers/roxie.py @@ -0,0 +1,226 @@ +import asyncio +from functools import partial +from pathlib import Path +from urllib.parse import urljoin + +import httpx +from playwright.async_api import BrowserContext, async_playwright +from selectolax.parser import HTMLParser + +from .utils import Cache, Time, get_logger, leagues, network + +log = get_logger(__name__) + +urls: dict[str, dict[str, str]] = {} + +BASE_URL = "https://roxiestreams.cc" + +SPORT_URLS = { + sport: urljoin(BASE_URL, sport.lower()) + for sport in ["Soccer", "MLB", "NBA", "NFL", "Fighting", "Motorsports"] +} + +CACHE_FILE = Cache(Path(__file__).parent / "caches" / "roxie.json", exp=10_800) + +HTML_CACHE = Cache(Path(__file__).parent / "caches" / "roxie_html.json", exp=86_400) + + +async def process_event( + url: str, + url_num: int, + context: BrowserContext, +) -> str | None: + + page = await context.new_page() + + captured: list[str] = [] + + got_one = asyncio.Event() + + handler = partial(network.capture_req, captured=captured, got_one=got_one) + + page.on("request", handler) + + try: + await page.goto( + url, + wait_until="domcontentloaded", + timeout=15_000, + ) + + wait_task = asyncio.create_task(got_one.wait()) + + try: + await asyncio.wait_for(wait_task, timeout=6) + except asyncio.TimeoutError: + log.warning(f"URL {url_num}) Timed out waiting for M3U8.") + return + + finally: + if not wait_task.done(): + wait_task.cancel() + + try: + await wait_task + except asyncio.CancelledError: + pass + + if captured: + log.info(f"URL {url_num}) Captured M3U8") + return captured[-1] + + log.warning(f"URL {url_num}) No M3U8 captured after waiting.") + return + + except Exception as e: + log.warning(f"URL {url_num}) Exception while processing: {e}") + return + + finally: + page.remove_listener("request", handler) + await page.close() + + +async def refresh_html_cache( + client: httpx.AsyncClient, + url: str, + sport: str, +) -> dict[str, str | float]: + + try: + r = await client.get(url) + r.raise_for_status() + except Exception as e: + log.error(f'Failed to fetch "{url}": {e}') + + return [] + + soup = HTMLParser(r.text) + + events = {} + + for row in soup.css("table#eventsTable tbody tr"): + a_tag = row.css_first("td a") + + if not a_tag: + continue + + event = a_tag.text(strip=True) + + event_link = a_tag.attributes.get("href") + + if not (span := row.css_first("span.countdown-timer")): + continue + + data_start = span.attributes["data-start"] + + event_dt = Time.from_str(f"{data_start} PST", "%B %d, %Y %H:%M:%S") + + key = f"[{sport}] {event} (ROXIE)" + + events[key] = { + "sport": sport, + "event": event, + "link": event_link, + "event_ts": event_dt.timestamp(), + "timestamp": Time.now().timestamp(), + } + + return events + + +async def get_events( + client: httpx.AsyncClient, + sport_urls: dict[str, str], + cached_keys: set[str], +) -> list[dict[str, str]]: + + if not (events := HTML_CACHE.load()): + tasks = [ + refresh_html_cache(client, url, sport) for sport, url in sport_urls.items() + ] + + results = await asyncio.gather(*tasks) + + events = {k: v for data in results for k, v in data.items()} + + HTML_CACHE.write(events) + + live = [] + + now = Time.clean(Time.now()) + start_ts = now.delta(minutes=-30).timestamp() + end_ts = now.delta(minutes=30).timestamp() + + for k, v in events.items(): + if cached_keys & {k}: + continue + + if not start_ts <= v["event_ts"] <= end_ts: + continue + + live.append({**v}) + + return live + + +async def scrape(client: httpx.AsyncClient) -> None: + cached_urls = CACHE_FILE.load() + cached_count = len(cached_urls) + urls.update(cached_urls) + + log.info(f"Loaded {cached_count} event(s) from cache") + + log.info(f'Scraping from "{BASE_URL}"') + + events = await get_events( + client, + SPORT_URLS, + set(cached_urls.keys()), + ) + + log.info(f"Processing {len(events)} new URL(s)") + + if events: + async with async_playwright() as p: + browser, context = await network.browser(p) + + for i, ev in enumerate(events, start=1): + handler = partial( + process_event, + url=ev["link"], + url_num=i, + context=context, + ) + + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) + + if url: + sport, event, ts = ev["sport"], ev["event"], ev["event_ts"] + + tvg_id, logo = leagues.info(sport) + + key = f"[{sport}] {event} (ROXIE)" + + entry = { + "url": url, + "logo": logo, + "base": "", + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } + + urls[key] = cached_urls[key] = entry + + await browser.close() + + if new_count := len(cached_urls) - cached_count: + log.info(f"Collected and cached {new_count} new event(s)") + else: + log.info("No new events found") + + CACHE_FILE.write(cached_urls) diff --git a/M3U8/scrapers/streambtw.py b/M3U8/scrapers/streambtw.py index 53e6635..3833310 100644 --- a/M3U8/scrapers/streambtw.py +++ b/M3U8/scrapers/streambtw.py @@ -88,34 +88,40 @@ async def scrape(client: httpx.AsyncClient) -> None: log.info(f"Processing {len(events)} new URL(s)") - now = Time.now().timestamp() + if events: + now = Time.now().timestamp() - for i, ev in enumerate(events, start=1): - handler = partial(process_event, client=client, url=ev["link"], url_num=i) + for i, ev in enumerate(events, start=1): + handler = partial( + process_event, + client=client, + url=ev["link"], + url_num=i, + ) - url = await network.safe_process( - handler, - url_num=i, - log=log, - timeout=10, - ) + url = await network.safe_process( + handler, + url_num=i, + log=log, + timeout=10, + ) - if url: - sport, event = ev["sport"], ev["event"] + if url: + sport, event = ev["sport"], ev["event"] - key = f"[{sport}] {event} (SBTW)" + key = f"[{sport}] {event} (SBTW)" - tvg_id, logo = leagues.info(sport) + tvg_id, logo = leagues.info(sport) - entry = { - "url": url, - "logo": logo, - "base": BASE_URL, - "timestamp": now, - "id": tvg_id or "Live.Event.us", - } + entry = { + "url": url, + "logo": logo, + "base": BASE_URL, + "timestamp": now, + "id": tvg_id or "Live.Event.us", + } - urls[key] = entry + urls[key] = entry log.info(f"Collected {len(urls)} event(s)") diff --git a/M3U8/scrapers/streameast.py b/M3U8/scrapers/streameast.py index 798e69c..4d20d21 100644 --- a/M3U8/scrapers/streameast.py +++ b/M3U8/scrapers/streameast.py @@ -174,36 +174,46 @@ async def scrape(client: httpx.AsyncClient) -> None: log.info(f"Processing {len(events)} new URL(s)") - async with async_playwright() as p: - browser, context = await network.browser(p, browser="brave") + if events: + async with async_playwright() as p: + browser, context = await network.browser(p, browser="brave") - for i, ev in enumerate(events, start=1): - handler = partial(process_event, url=ev["link"], url_num=i, context=context) + for i, ev in enumerate(events, start=1): + handler = partial( + process_event, + url=ev["link"], + url_num=i, + context=context, + ) - url = await network.safe_process(handler, url_num=i, log=log) + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) - if url: - sport, event, ts = ev["sport"], ev["event"], ev["timestamp"] + if url: + sport, event, ts = ev["sport"], ev["event"], ev["timestamp"] - tvg_id, logo = leagues.info(sport) + tvg_id, logo = leagues.info(sport) - if sport == "NBA" and leagues.is_valid(event, "WNBA"): - sport = "WNBA" - tvg_id, logo = leagues.info("WNBA") + if sport == "NBA" and leagues.is_valid(event, "WNBA"): + sport = "WNBA" + tvg_id, logo = leagues.info("WNBA") - key = f"[{sport}] {event} (SEAST)" + key = f"[{sport}] {event} (SEAST)" - entry = { - "url": url, - "logo": logo, - "base": "https://embedsports.top/", - "timestamp": ts, - "id": tvg_id or "Live.Event.us", - } + entry = { + "url": url, + "logo": logo, + "base": "https://embedsports.top/", + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } - urls[key] = cached_urls[key] = entry + urls[key] = cached_urls[key] = entry - await browser.close() + await browser.close() if new_count := len(cached_urls) - cached_count: log.info(f"Collected and cached {new_count} new event(s)") diff --git a/M3U8/scrapers/streamed.py b/M3U8/scrapers/streamed.py index 0d21377..83fb861 100644 --- a/M3U8/scrapers/streamed.py +++ b/M3U8/scrapers/streamed.py @@ -72,7 +72,9 @@ async def process_event( await page.close() -async def refresh_html_cache(client: httpx.AsyncClient, url: str) -> dict[str, str]: +async def refresh_html_cache( + client: httpx.AsyncClient, url: str +) -> dict[str, str | float]: try: r = await client.get(url) r.raise_for_status() @@ -172,32 +174,42 @@ async def scrape(client: httpx.AsyncClient) -> None: log.info(f"Processing {len(events)} new URL(s)") - async with async_playwright() as p: - browser, context = await network.browser(p, browser="brave") + if events: + async with async_playwright() as p: + browser, context = await network.browser(p, browser="brave") - for i, ev in enumerate(events, start=1): - handler = partial(process_event, url=ev["link"], url_num=i, context=context) + for i, ev in enumerate(events, start=1): + handler = partial( + process_event, + url=ev["link"], + url_num=i, + context=context, + ) - url = await network.safe_process(handler, url_num=i, log=log) + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) - if url: - sport, event, ts = ev["sport"], ev["event"], ev["event_ts"] + if url: + sport, event, ts = ev["sport"], ev["event"], ev["event_ts"] - tvg_id, logo = leagues.info(sport) + tvg_id, logo = leagues.info(sport) - key = f"[{sport}] {event} (STRMD)" + key = f"[{sport}] {event} (STRMD)" - entry = { - "url": url, - "logo": logo, - "base": "", - "timestamp": ts, - "id": tvg_id or "Live.Event.us", - } + entry = { + "url": url, + "logo": logo, + "base": "", + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } - urls[key] = cached_urls[key] = entry + urls[key] = cached_urls[key] = entry - await browser.close() + await browser.close() if new_count := len(cached_urls) - cached_count: log.info(f"Collected and cached {new_count} new event(s)") diff --git a/M3U8/scrapers/strmd.py b/M3U8/scrapers/strmd.py index a6a0dff..f2e50ad 100644 --- a/M3U8/scrapers/strmd.py +++ b/M3U8/scrapers/strmd.py @@ -206,37 +206,47 @@ async def scrape(client: httpx.AsyncClient) -> None: log.info(f"Processing {len(events)} new URL(s)") - async with async_playwright() as p: - browser, context = await network.browser(p, "brave") + if events: + async with async_playwright() as p: + browser, context = await network.browser(p, "brave") - for i, ev in enumerate(events, start=1): - handler = partial(process_event, url=ev["link"], url_num=i, context=context) - - url = await network.safe_process(handler, url_num=i, log=log) - - if url: - sport, event, logo, ts = ( - ev["sport"], - ev["event"], - ev["logo"], - ev["timestamp"], + for i, ev in enumerate(events, start=1): + handler = partial( + process_event, + url=ev["link"], + url_num=i, + context=context, ) - key = f"[{sport}] {event} (STRMD)" + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) - tvg_id, pic = leagues.get_tvg_info(sport, event) + if url: + sport, event, logo, ts = ( + ev["sport"], + ev["event"], + ev["logo"], + ev["timestamp"], + ) - entry = { - "url": url, - "logo": logo or pic, - "base": "https://embedsports.top/", - "timestamp": ts, - "id": tvg_id or "Live.Event.us", - } + key = f"[{sport}] {event} (STRMD)" - urls[key] = cached_urls[key] = entry + tvg_id, pic = leagues.get_tvg_info(sport, event) - await browser.close() + entry = { + "url": url, + "logo": logo or pic, + "base": "https://embedsports.top/", + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } + + urls[key] = cached_urls[key] = entry + + await browser.close() if new_count := len(cached_urls) - cached_count: log.info(f"Collected and cached {new_count} new event(s)") diff --git a/M3U8/scrapers/strmfree.py b/M3U8/scrapers/strmfree.py index 4447c47..a72123d 100644 --- a/M3U8/scrapers/strmfree.py +++ b/M3U8/scrapers/strmfree.py @@ -166,37 +166,47 @@ async def scrape(client: httpx.AsyncClient) -> None: log.info(f"Processing {len(events)} new URL(s)") - async with async_playwright() as p: - browser, context = await network.browser(p) + if events: + async with async_playwright() as p: + browser, context = await network.browser(p) - for i, ev in enumerate(events, start=1): - handler = partial(process_event, url=ev["link"], url_num=i, context=context) - - url = await network.safe_process(handler, url_num=i, log=log) - - if url: - sport, event, logo, ts = ( - ev["sport"], - ev["event"], - ev["logo"], - ev["timestamp"], + for i, ev in enumerate(events, start=1): + handler = partial( + process_event, + url=ev["link"], + url_num=i, + context=context, ) - key = f"[{sport}] {event} (STRMFR)" + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) - tvg_id, pic = leagues.get_tvg_info(sport, event) + if url: + sport, event, logo, ts = ( + ev["sport"], + ev["event"], + ev["logo"], + ev["timestamp"], + ) - entry = { - "url": url, - "logo": logo or pic, - "base": "", - "timestamp": ts, - "id": tvg_id or "Live.Event.us", - } + key = f"[{sport}] {event} (STRMFR)" - urls[key] = cached_urls[key] = entry + tvg_id, pic = leagues.get_tvg_info(sport, event) - await browser.close() + entry = { + "url": url, + "logo": logo or pic, + "base": "", + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } + + urls[key] = cached_urls[key] = entry + + await browser.close() if new_count := len(cached_urls) - cached_count: log.info(f"Collected and cached {new_count} new event(s)") diff --git a/M3U8/scrapers/utils/config.py b/M3U8/scrapers/utils/config.py index 458a322..cfc6672 100644 --- a/M3U8/scrapers/utils/config.py +++ b/M3U8/scrapers/utils/config.py @@ -5,12 +5,16 @@ from pathlib import Path import pytz -ZONES = {"ET": pytz.timezone("America/New_York"), "UTC": timezone.utc} - -ZONES["EDT"] = ZONES["EST"] = ZONES["ET"] - class Time(datetime): + ZONES = { + "ET": pytz.timezone("America/New_York"), + "PST": pytz.timezone("America/Los_Angeles"), + "UTC": timezone.utc, + } + + ZONES["EDT"] = ZONES["EST"] = ZONES["ET"] + TZ = ZONES["ET"] @classmethod @@ -39,8 +43,8 @@ class Time(datetime): ) def to_tz(self, tzone: str) -> "Time": - dt = self.astimezone(ZONES[tzone]) - return self.__class__.fromtimestamp(dt.timestamp(), tz=ZONES[tzone]) + dt = self.astimezone(self.ZONES[tzone]) + return self.__class__.fromtimestamp(dt.timestamp(), tz=self.ZONES[tzone]) @classmethod def from_str( @@ -49,16 +53,17 @@ class Time(datetime): fmt: str | None = None, ) -> "Time": - pattern = re.compile(r"\b(ET|UTC|EST|EDT)\b") + pattern = re.compile(r"\b(ET|UTC|EST|EDT|PST)\b") match = pattern.search(s) - tz = ZONES.get(match[1]) if match else cls.TZ + tz = cls.ZONES.get(match[1]) if match else cls.TZ cleaned_str = pattern.sub("", s).strip() if fmt: dt = datetime.strptime(cleaned_str, fmt) + else: formats = [ "%Y-%m-%d %H:%M", diff --git a/M3U8/scrapers/watchfooty.py b/M3U8/scrapers/watchfooty.py index 774f309..061a3cc 100644 --- a/M3U8/scrapers/watchfooty.py +++ b/M3U8/scrapers/watchfooty.py @@ -224,40 +224,50 @@ async def scrape(client: httpx.AsyncClient) -> None: log.info(f"Processing {len(events)} new URL(s)") - async with async_playwright() as p: - browser, context = await network.browser(p) + if events: + async with async_playwright() as p: + browser, context = await network.browser(p) - for i, ev in enumerate(events, start=1): - handler = partial(process_event, url=ev["link"], url_num=i, context=context) + for i, ev in enumerate(events, start=1): + handler = partial( + process_event, + url=ev["link"], + url_num=i, + context=context, + ) - url = await network.safe_process(handler, url_num=i, log=log) + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) - sport, event, logo, ts = ( - ev["sport"], - ev["event"], - ev["logo"], - ev["timestamp"], - ) + sport, event, logo, ts = ( + ev["sport"], + ev["event"], + ev["logo"], + ev["timestamp"], + ) - key = f"[{sport}] {event} (WFTY)" + key = f"[{sport}] {event} (WFTY)" - tvg_id, pic = leagues.get_tvg_info(sport, event) + tvg_id, pic = leagues.get_tvg_info(sport, event) - entry = { - "url": url, - "logo": logo or pic, - "base": base_url, - "timestamp": ts, - "id": tvg_id or "Live.Event.us", - } + entry = { + "url": url, + "logo": logo or pic, + "base": base_url, + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } - cached_urls[key] = entry + cached_urls[key] = entry - if url: - valid_count += 1 - urls[key] = entry + if url: + valid_count += 1 + urls[key] = entry - await browser.close() + await browser.close() if new_count := valid_count - cached_count: log.info(f"Collected and cached {new_count} new event(s)")