From 00000d91dcde123fe2a257bf1fed88abbf01388d Mon Sep 17 00:00:00 2001 From: doms9 <96013514+doms9@users.noreply.github.com> Date: Thu, 30 Oct 2025 19:54:06 -0400 Subject: [PATCH] e --- M3U8/scrapers/roxie.py | 82 ++++++++++++++++++++++--------------- M3U8/scrapers/streameast.py | 5 +-- M3U8/scrapers/strmd.py | 2 +- 3 files changed, 52 insertions(+), 37 deletions(-) diff --git a/M3U8/scrapers/roxie.py b/M3U8/scrapers/roxie.py index 8116b7b..c560a27 100644 --- a/M3U8/scrapers/roxie.py +++ b/M3U8/scrapers/roxie.py @@ -1,10 +1,10 @@ import asyncio +import re from functools import partial from pathlib import Path from urllib.parse import urljoin import httpx -from playwright.async_api import async_playwright from selectolax.parser import HTMLParser from .utils import Cache, Time, get_logger, leagues, network @@ -25,6 +25,31 @@ CACHE_FILE = Cache(Path(__file__).parent / "caches" / "roxie.json", exp=10_800) HTML_CACHE = Cache(Path(__file__).parent / "caches" / "roxie_html.json", exp=28_800) +async def process_event( + client: httpx.AsyncClient, + url: str, + url_num: int, +) -> str | None: + + try: + r = await client.get(url) + r.raise_for_status() + except Exception as e: + log.error(f'URL {url_num}) Failed to fetch "{url}": {e}') + return + + valid_m3u8 = re.compile( + r"showPlayer\(['\"]clappr['\"],\s*['\"]([^'\"]+?\.m3u8(?:\?[^'\"]*)?)['\"]\)", + re.IGNORECASE, + ) + + if match := valid_m3u8.search(r.text): + log.info(f"URL {url_num}) Captured M3U8") + return match[1] + + log.info(f"URL {url_num}) No M3U8 found") + + async def refresh_html_cache( client: httpx.AsyncClient, url: str, @@ -135,43 +160,36 @@ async def scrape(client: httpx.AsyncClient) -> None: log.info(f"Processing {len(events)} new URL(s)") if events: - async with async_playwright() as p: - browser, context = await network.browser(p) + for i, ev in enumerate(events, start=1): + handler = partial( + process_event, + client=client, + url=ev["link"], + url_num=i, + ) - for i, ev in enumerate(events, start=1): - handler = partial( - network.process_event, - url=ev["link"], - url_num=i, - context=context, - timeout=15, - log=log, - ) + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) - url = await network.safe_process( - handler, - url_num=i, - log=log, - ) + if url: + sport, event, ts = ev["sport"], ev["event"], ev["event_ts"] - if url: - sport, event, ts = ev["sport"], ev["event"], ev["event_ts"] + tvg_id, logo = leagues.info(sport) - tvg_id, logo = leagues.info(sport) + key = f"[{sport}] {event} (ROXIE)" - key = f"[{sport}] {event} (ROXIE)" + entry = { + "url": url, + "logo": logo, + "base": "", + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } - entry = { - "url": url, - "logo": logo, - "base": "", - "timestamp": ts, - "id": tvg_id or "Live.Event.us", - } - - urls[key] = cached_urls[key] = entry - - await browser.close() + urls[key] = cached_urls[key] = entry if new_count := len(cached_urls) - cached_count: log.info(f"Collected and cached {new_count} new event(s)") diff --git a/M3U8/scrapers/streameast.py b/M3U8/scrapers/streameast.py index f0817d4..dd30c79 100644 --- a/M3U8/scrapers/streameast.py +++ b/M3U8/scrapers/streameast.py @@ -124,10 +124,7 @@ async def scrape(client: httpx.AsyncClient) -> None: if events: async with async_playwright() as p: - try: - browser, context = await network.browser(p, browser="brave") - except Exception: - browser, context = await network.browser(p) + browser, context = await network.browser(p, browser="brave") for i, ev in enumerate(events, start=1): handler = partial( diff --git a/M3U8/scrapers/strmd.py b/M3U8/scrapers/strmd.py index 3d65c26..1f94fab 100644 --- a/M3U8/scrapers/strmd.py +++ b/M3U8/scrapers/strmd.py @@ -150,7 +150,7 @@ async def scrape(client: httpx.AsyncClient) -> None: if events: async with async_playwright() as p: - browser, context = await network.browser(p, "brave") + browser, context = await network.browser(p, browser="brave") for i, ev in enumerate(events, start=1): handler = partial(