From 00000d9c9af87a7e2414601a283bff738dc2af15 Mon Sep 17 00:00:00 2001 From: doms9 <96013514+doms9@users.noreply.github.com> Date: Fri, 5 Dec 2025 04:10:29 -0500 Subject: [PATCH] e --- M3U8/fetch.py | 6 +- M3U8/scrapers/{ => old}/streameast.py | 352 +++++++++++++------------- M3U8/scrapers/streamcenter.py | 174 +++++++++++++ 3 files changed, 353 insertions(+), 179 deletions(-) rename M3U8/scrapers/{ => old}/streameast.py (96%) create mode 100644 M3U8/scrapers/streamcenter.py diff --git a/M3U8/fetch.py b/M3U8/fetch.py index 23b747a..bd04992 100644 --- a/M3U8/fetch.py +++ b/M3U8/fetch.py @@ -12,7 +12,7 @@ from scrapers import ( shark, sport9, streambtw, - streameast, + streamcenter, streamfree, strmd, tvpass, @@ -53,7 +53,7 @@ async def main() -> None: asyncio.create_task(shark.scrape(network.client)), asyncio.create_task(sport9.scrape(network.client)), asyncio.create_task(streambtw.scrape(network.client)), - #asyncio.create_task(streameast.scrape(network.client)), + asyncio.create_task(streamcenter.scrape(network.client)), asyncio.create_task(streamfree.scrape(network.client)), asyncio.create_task(strmd.scrape(network.client)), asyncio.create_task(tvpass.scrape(network.client)), @@ -71,7 +71,7 @@ async def main() -> None: | shark.urls | sport9.urls | streambtw.urls - | streameast.urls + | streamcenter.urls | strmd.urls | streamfree.urls | tvpass.urls diff --git a/M3U8/scrapers/streameast.py b/M3U8/scrapers/old/streameast.py similarity index 96% rename from M3U8/scrapers/streameast.py rename to M3U8/scrapers/old/streameast.py index 870ba9a..67931ec 100644 --- a/M3U8/scrapers/streameast.py +++ b/M3U8/scrapers/old/streameast.py @@ -1,176 +1,176 @@ -from functools import partial -from urllib.parse import urljoin - -import httpx -from playwright.async_api import async_playwright -from selectolax.parser import HTMLParser - -from .utils import Cache, Time, get_logger, leagues, network - -log = get_logger(__name__) - -urls: dict[str, dict[str, str | float]] = {} - -CACHE_FILE = Cache("streameast.json", exp=10_800) - -prefixes = { - "ga": None, - "ph": None, - "sg": None, - "ch": None, - "ec": None, - "fi": None, - "ms": None, - "ps": None, - "cf": None, - "sk": None, - "co": "the", - "fun": "the", - "ru": "the", - "su": "the", -} - -MIRRORS = [ - *[f"https://streameast.{ext}" for ext in prefixes if not prefixes[ext]], - *[f"https://thestreameast.{ext}" for ext in prefixes if prefixes[ext] == "the"], -] - -TAG = "STRMEST" - - -async def get_events( - client: httpx.AsyncClient, - url: str, - cached_keys: set[str], -) -> list[dict[str, str]]: - - try: - r = await client.get(url) - r.raise_for_status() - except Exception as e: - log.error(f'Failed to fetch "{url}": {e}') - - return [] - - soup = HTMLParser(r.content) - - events = [] - - now = Time.clean(Time.now()) - start_dt = now.delta(minutes=-30) - end_dt = now.delta(minutes=30) - - for section in soup.css("div.se-sport-section"): - if not (sport := section.attributes.get("data-sport-name", "").strip()): - continue - - for a in section.css("a.uefa-card"): - if not (href := a.attributes.get("href")): - continue - - link = urljoin(url, href) - - team_spans = [t.text(strip=True) for t in a.css("span.uefa-name")] - - if len(team_spans) == 2: - name = f"{team_spans[0]} vs {team_spans[1]}" - - elif len(team_spans) == 1: - name = team_spans[0] - - else: - continue - - if not (time_span := a.css_first(".uefa-time")): - continue - - time_text = time_span.text(strip=True) - - timestamp = int(a.attributes.get("data-time", Time.default_8())) - - key = f"[{sport}] {name} ({TAG})" - - if cached_keys & {key}: - continue - - event_dt = Time.from_ts(timestamp) - - if time_text == "LIVE" or (start_dt <= event_dt <= end_dt): - events.append( - { - "sport": sport, - "event": name, - "link": link, - "timestamp": timestamp, - } - ) - - return events - - -async def scrape(client: httpx.AsyncClient) -> None: - cached_urls = CACHE_FILE.load() - cached_count = len(cached_urls) - urls.update(cached_urls) - - log.info(f"Loaded {cached_count} event(s) from cache") - - if not (base_url := await network.get_base(MIRRORS)): - log.warning("No working Streameast mirrors") - CACHE_FILE.write(cached_urls) - return - - log.info(f'Scraping from "{base_url}"') - - events = await get_events( - client, - base_url, - set(cached_urls.keys()), - ) - - log.info(f"Processing {len(events)} new URL(s)") - - if events: - async with async_playwright() as p: - browser, context = await network.browser(p, browser="brave") - - for i, ev in enumerate(events, start=1): - handler = partial( - network.process_event, - url=ev["link"], - url_num=i, - context=context, - log=log, - ) - - url = await network.safe_process( - handler, - url_num=i, - log=log, - ) - - if url: - sport, event, ts = ev["sport"], ev["event"], ev["timestamp"] - - tvg_id, logo = leagues.get_tvg_info(sport, event) - - key = f"[{sport}] {event} ({TAG})" - - entry = { - "url": url, - "logo": logo, - "base": "https://embedsports.top/", - "timestamp": ts, - "id": tvg_id or "Live.Event.us", - } - - urls[key] = cached_urls[key] = entry - - await browser.close() - - if new_count := len(cached_urls) - cached_count: - log.info(f"Collected and cached {new_count} new event(s)") - else: - log.info("No new events found") - - CACHE_FILE.write(cached_urls) +from functools import partial +from urllib.parse import urljoin + +import httpx +from playwright.async_api import async_playwright +from selectolax.parser import HTMLParser + +from .utils import Cache, Time, get_logger, leagues, network + +log = get_logger(__name__) + +urls: dict[str, dict[str, str | float]] = {} + +CACHE_FILE = Cache("streameast.json", exp=10_800) + +prefixes = { + "ga": None, + "ph": None, + "sg": None, + "ch": None, + "ec": None, + "fi": None, + "ms": None, + "ps": None, + "cf": None, + "sk": None, + "co": "the", + "fun": "the", + "ru": "the", + "su": "the", +} + +MIRRORS = [ + *[f"https://streameast.{ext}" for ext in prefixes if not prefixes[ext]], + *[f"https://thestreameast.{ext}" for ext in prefixes if prefixes[ext] == "the"], +] + +TAG = "STRMEST" + + +async def get_events( + client: httpx.AsyncClient, + url: str, + cached_keys: set[str], +) -> list[dict[str, str]]: + + try: + r = await client.get(url) + r.raise_for_status() + except Exception as e: + log.error(f'Failed to fetch "{url}": {e}') + + return [] + + soup = HTMLParser(r.content) + + events = [] + + now = Time.clean(Time.now()) + start_dt = now.delta(minutes=-30) + end_dt = now.delta(minutes=30) + + for section in soup.css("div.se-sport-section"): + if not (sport := section.attributes.get("data-sport-name", "").strip()): + continue + + for a in section.css("a.uefa-card"): + if not (href := a.attributes.get("href")): + continue + + link = urljoin(url, href) + + team_spans = [t.text(strip=True) for t in a.css("span.uefa-name")] + + if len(team_spans) == 2: + name = f"{team_spans[0]} vs {team_spans[1]}" + + elif len(team_spans) == 1: + name = team_spans[0] + + else: + continue + + if not (time_span := a.css_first(".uefa-time")): + continue + + time_text = time_span.text(strip=True) + + timestamp = int(a.attributes.get("data-time", Time.default_8())) + + key = f"[{sport}] {name} ({TAG})" + + if cached_keys & {key}: + continue + + event_dt = Time.from_ts(timestamp) + + if time_text == "LIVE" or (start_dt <= event_dt <= end_dt): + events.append( + { + "sport": sport, + "event": name, + "link": link, + "timestamp": timestamp, + } + ) + + return events + + +async def scrape(client: httpx.AsyncClient) -> None: + cached_urls = CACHE_FILE.load() + cached_count = len(cached_urls) + urls.update(cached_urls) + + log.info(f"Loaded {cached_count} event(s) from cache") + + if not (base_url := await network.get_base(MIRRORS)): + log.warning("No working Streameast mirrors") + CACHE_FILE.write(cached_urls) + return + + log.info(f'Scraping from "{base_url}"') + + events = await get_events( + client, + base_url, + set(cached_urls.keys()), + ) + + log.info(f"Processing {len(events)} new URL(s)") + + if events: + async with async_playwright() as p: + browser, context = await network.browser(p, browser="brave") + + for i, ev in enumerate(events, start=1): + handler = partial( + network.process_event, + url=ev["link"], + url_num=i, + context=context, + log=log, + ) + + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) + + if url: + sport, event, ts = ev["sport"], ev["event"], ev["timestamp"] + + tvg_id, logo = leagues.get_tvg_info(sport, event) + + key = f"[{sport}] {event} ({TAG})" + + entry = { + "url": url, + "logo": logo, + "base": "https://embedsports.top/", + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } + + urls[key] = cached_urls[key] = entry + + await browser.close() + + if new_count := len(cached_urls) - cached_count: + log.info(f"Collected and cached {new_count} new event(s)") + else: + log.info("No new events found") + + CACHE_FILE.write(cached_urls) diff --git a/M3U8/scrapers/streamcenter.py b/M3U8/scrapers/streamcenter.py new file mode 100644 index 0000000..e791c05 --- /dev/null +++ b/M3U8/scrapers/streamcenter.py @@ -0,0 +1,174 @@ +from functools import partial + +import httpx +from playwright.async_api import async_playwright + +from .utils import Cache, Time, get_logger, leagues, network + +log = get_logger(__name__) + +urls: dict[str, dict[str, str | float]] = {} + +CACHE_FILE = Cache("streamcenter.json", exp=10_800) + +API_FILE = Cache("streamcenter-api.json", exp=28_800) + +BASE_URL = "https://backendstreamcenter.youshop.pro:488/api/Parties" + +TAG = "STRMCNTR" + +categories = { + 4: "Basketball", + 9: "Football", + 13: "Baseball", + 14: "American Football", + 15: "Motor Sport", + 16: "Hockey", + 17: "Fight MMA", + 18: "Boxing", + 19: "NCAA Sports", + 20: "WWE", + 21: "Tennis", +} + + +async def refresh_api_cache( + client: httpx.AsyncClient, + url: str, + now_ts: float, +) -> list[dict[str, str | int]]: + log.info("Refreshing API cache") + + try: + r = await client.get(url, params={"pageNumber": 1, "pageSize": 500}) + r.raise_for_status() + except Exception as e: + log.error(f'Failed to fetch "{url}": {e}') + + return {} + + data = r.json() + + data[-1]["timestamp"] = now_ts + + return data + + +async def get_events( + client: httpx.AsyncClient, + cached_keys: set[str], +) -> list[dict[str, str]]: + now = Time.clean(Time.now()) + + if not (api_data := API_FILE.load(per_entry=False, index=-1)): + api_data = await refresh_api_cache( + client, + BASE_URL, + now.timestamp(), + ) + + API_FILE.write(api_data) + + events = [] + + start_dt = now.delta(minutes=-30) + end_dt = now.delta(minutes=30) + + for stream_group in api_data: + category_id: int = stream_group.get("categoryId") + + name: str = stream_group.get("gameName") + + iframe: str = stream_group.get("videoUrl") + + event_time: str = stream_group.get("beginPartie") + + if not (name and category_id and iframe and event_time): + continue + + event_dt = Time.from_str(event_time, timezone="UTC") + + if not start_dt <= event_dt <= end_dt: + continue + + if not (sport := categories.get(category_id)): + continue + + key = f"[{sport}] {name} ({TAG})" + + if cached_keys & {key}: + continue + + events.append( + { + "sport": sport, + "event": name, + "link": iframe.split("<")[0], + "timestamp": event_dt.timestamp(), + } + ) + + return events + + +async def scrape(client: httpx.AsyncClient) -> None: + cached_urls = CACHE_FILE.load() + cached_count = len(cached_urls) + urls.update(cached_urls) + + log.info(f"Loaded {cached_count} event(s) from cache") + + log.info(f'Scraping from "{BASE_URL}"') + + events = await get_events(client, set(cached_urls.keys())) + + log.info(f"Processing {len(events)} new URL(s)") + + if events: + async with async_playwright() as p: + browser, context = await network.browser(p) + + for i, ev in enumerate(events, start=1): + handler = partial( + network.process_event, + url=ev["link"], + url_num=i, + context=context, + log=log, + ) + + url = await network.safe_process( + handler, + url_num=i, + log=log, + ) + + if url: + sport, event, ts = ( + ev["sport"], + ev["event"], + ev["timestamp"], + ) + + key = f"[{sport}] {event} ({TAG})" + + tvg_id, logo = leagues.get_tvg_info(sport, event) + + entry = { + "url": url, + "logo": logo, + "base": "https://streamcenter.xyz", + "timestamp": ts, + "id": tvg_id or "Live.Event.us", + } + + urls[key] = cached_urls[key] = entry + + await browser.close() + + if new_count := len(cached_urls) - cached_count: + log.info(f"Collected and cached {new_count} new event(s)") + else: + log.info("No new events found") + + CACHE_FILE.write(cached_urls)