This commit is contained in:
doms9 2025-10-29 03:21:18 -04:00
parent 8609c0a39e
commit 00000d90b7
11 changed files with 501 additions and 187 deletions

View file

@ -5,6 +5,7 @@ from pathlib import Path
from scrapers import ( from scrapers import (
fstv, fstv,
ppv, ppv,
roxie,
streambtw, streambtw,
streameast, streameast,
streamed, streamed,
@ -40,6 +41,7 @@ async def main() -> None:
tasks = [ tasks = [
asyncio.create_task(fstv.scrape(network.client)), asyncio.create_task(fstv.scrape(network.client)),
asyncio.create_task(ppv.scrape(network.client)), asyncio.create_task(ppv.scrape(network.client)),
asyncio.create_task(roxie.scrape(network.client)),
asyncio.create_task(streambtw.scrape(network.client)), asyncio.create_task(streambtw.scrape(network.client)),
asyncio.create_task(streameast.scrape(network.client)), asyncio.create_task(streameast.scrape(network.client)),
asyncio.create_task(streamed.scrape(network.client)), asyncio.create_task(streamed.scrape(network.client)),
@ -54,6 +56,7 @@ async def main() -> None:
additions = ( additions = (
fstv.urls fstv.urls
| ppv.urls | ppv.urls
| roxie.urls
| streambtw.urls | streambtw.urls
| streameast.urls | streameast.urls
| streamed.urls | streamed.urls

View file

@ -134,18 +134,30 @@ async def scrape(client: httpx.AsyncClient) -> None:
log.info(f"Processing {len(events)} new URL(s)") log.info(f"Processing {len(events)} new URL(s)")
if events:
now = Time.now().timestamp() now = Time.now().timestamp()
for i, ev in enumerate(events, start=1): for i, ev in enumerate(events, start=1):
handler = partial(process_event, client=client, url=ev["link"], url_num=i) handler = partial(
process_event,
client=client,
url=ev["link"],
url_num=i,
)
match_name, url = await network.safe_process(handler, url_num=i, log=log) match_name, url = await network.safe_process(
handler,
url_num=i,
log=log,
)
if url: if url:
sport = ev["sport"] sport = ev["sport"]
key = ( key = (
f"[{sport}] {match_name} (FSTV)" if match_name else f"[{sport}] (FSTV)" f"[{sport}] {match_name} (FSTV)"
if match_name
else f"[{sport}] (FSTV)"
) )
tvg_id, logo = leagues.info(sport) tvg_id, logo = leagues.info(sport)

View file

@ -180,13 +180,23 @@ async def scrape(client: httpx.AsyncClient) -> None:
log.info(f"Processing {len(events)} new URL(s)") log.info(f"Processing {len(events)} new URL(s)")
if events:
async with async_playwright() as p: async with async_playwright() as p:
browser, context = await network.browser(p) browser, context = await network.browser(p)
for i, ev in enumerate(events, start=1): for i, ev in enumerate(events, start=1):
handler = partial(process_event, url=ev["link"], url_num=i, context=context) handler = partial(
process_event,
url=ev["link"],
url_num=i,
context=context,
)
url = await network.safe_process(handler, url_num=i, log=log) url = await network.safe_process(
handler,
url_num=i,
log=log,
)
if url: if url:
sport, event, logo, ts = ( sport, event, logo, ts = (

226
M3U8/scrapers/roxie.py Normal file
View file

@ -0,0 +1,226 @@
import asyncio
from functools import partial
from pathlib import Path
from urllib.parse import urljoin
import httpx
from playwright.async_api import BrowserContext, async_playwright
from selectolax.parser import HTMLParser
from .utils import Cache, Time, get_logger, leagues, network
log = get_logger(__name__)
urls: dict[str, dict[str, str]] = {}
BASE_URL = "https://roxiestreams.cc"
SPORT_URLS = {
sport: urljoin(BASE_URL, sport.lower())
for sport in ["Soccer", "MLB", "NBA", "NFL", "Fighting", "Motorsports"]
}
CACHE_FILE = Cache(Path(__file__).parent / "caches" / "roxie.json", exp=10_800)
HTML_CACHE = Cache(Path(__file__).parent / "caches" / "roxie_html.json", exp=86_400)
async def process_event(
url: str,
url_num: int,
context: BrowserContext,
) -> str | None:
page = await context.new_page()
captured: list[str] = []
got_one = asyncio.Event()
handler = partial(network.capture_req, captured=captured, got_one=got_one)
page.on("request", handler)
try:
await page.goto(
url,
wait_until="domcontentloaded",
timeout=15_000,
)
wait_task = asyncio.create_task(got_one.wait())
try:
await asyncio.wait_for(wait_task, timeout=6)
except asyncio.TimeoutError:
log.warning(f"URL {url_num}) Timed out waiting for M3U8.")
return
finally:
if not wait_task.done():
wait_task.cancel()
try:
await wait_task
except asyncio.CancelledError:
pass
if captured:
log.info(f"URL {url_num}) Captured M3U8")
return captured[-1]
log.warning(f"URL {url_num}) No M3U8 captured after waiting.")
return
except Exception as e:
log.warning(f"URL {url_num}) Exception while processing: {e}")
return
finally:
page.remove_listener("request", handler)
await page.close()
async def refresh_html_cache(
client: httpx.AsyncClient,
url: str,
sport: str,
) -> dict[str, str | float]:
try:
r = await client.get(url)
r.raise_for_status()
except Exception as e:
log.error(f'Failed to fetch "{url}": {e}')
return []
soup = HTMLParser(r.text)
events = {}
for row in soup.css("table#eventsTable tbody tr"):
a_tag = row.css_first("td a")
if not a_tag:
continue
event = a_tag.text(strip=True)
event_link = a_tag.attributes.get("href")
if not (span := row.css_first("span.countdown-timer")):
continue
data_start = span.attributes["data-start"]
event_dt = Time.from_str(f"{data_start} PST", "%B %d, %Y %H:%M:%S")
key = f"[{sport}] {event} (ROXIE)"
events[key] = {
"sport": sport,
"event": event,
"link": event_link,
"event_ts": event_dt.timestamp(),
"timestamp": Time.now().timestamp(),
}
return events
async def get_events(
client: httpx.AsyncClient,
sport_urls: dict[str, str],
cached_keys: set[str],
) -> list[dict[str, str]]:
if not (events := HTML_CACHE.load()):
tasks = [
refresh_html_cache(client, url, sport) for sport, url in sport_urls.items()
]
results = await asyncio.gather(*tasks)
events = {k: v for data in results for k, v in data.items()}
HTML_CACHE.write(events)
live = []
now = Time.clean(Time.now())
start_ts = now.delta(minutes=-30).timestamp()
end_ts = now.delta(minutes=30).timestamp()
for k, v in events.items():
if cached_keys & {k}:
continue
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append({**v})
return live
async def scrape(client: httpx.AsyncClient) -> None:
cached_urls = CACHE_FILE.load()
cached_count = len(cached_urls)
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
log.info(f'Scraping from "{BASE_URL}"')
events = await get_events(
client,
SPORT_URLS,
set(cached_urls.keys()),
)
log.info(f"Processing {len(events)} new URL(s)")
if events:
async with async_playwright() as p:
browser, context = await network.browser(p)
for i, ev in enumerate(events, start=1):
handler = partial(
process_event,
url=ev["link"],
url_num=i,
context=context,
)
url = await network.safe_process(
handler,
url_num=i,
log=log,
)
if url:
sport, event, ts = ev["sport"], ev["event"], ev["event_ts"]
tvg_id, logo = leagues.info(sport)
key = f"[{sport}] {event} (ROXIE)"
entry = {
"url": url,
"logo": logo,
"base": "",
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
}
urls[key] = cached_urls[key] = entry
await browser.close()
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:
log.info("No new events found")
CACHE_FILE.write(cached_urls)

View file

@ -88,10 +88,16 @@ async def scrape(client: httpx.AsyncClient) -> None:
log.info(f"Processing {len(events)} new URL(s)") log.info(f"Processing {len(events)} new URL(s)")
if events:
now = Time.now().timestamp() now = Time.now().timestamp()
for i, ev in enumerate(events, start=1): for i, ev in enumerate(events, start=1):
handler = partial(process_event, client=client, url=ev["link"], url_num=i) handler = partial(
process_event,
client=client,
url=ev["link"],
url_num=i,
)
url = await network.safe_process( url = await network.safe_process(
handler, handler,

View file

@ -174,13 +174,23 @@ async def scrape(client: httpx.AsyncClient) -> None:
log.info(f"Processing {len(events)} new URL(s)") log.info(f"Processing {len(events)} new URL(s)")
if events:
async with async_playwright() as p: async with async_playwright() as p:
browser, context = await network.browser(p, browser="brave") browser, context = await network.browser(p, browser="brave")
for i, ev in enumerate(events, start=1): for i, ev in enumerate(events, start=1):
handler = partial(process_event, url=ev["link"], url_num=i, context=context) handler = partial(
process_event,
url=ev["link"],
url_num=i,
context=context,
)
url = await network.safe_process(handler, url_num=i, log=log) url = await network.safe_process(
handler,
url_num=i,
log=log,
)
if url: if url:
sport, event, ts = ev["sport"], ev["event"], ev["timestamp"] sport, event, ts = ev["sport"], ev["event"], ev["timestamp"]

View file

@ -72,7 +72,9 @@ async def process_event(
await page.close() await page.close()
async def refresh_html_cache(client: httpx.AsyncClient, url: str) -> dict[str, str]: async def refresh_html_cache(
client: httpx.AsyncClient, url: str
) -> dict[str, str | float]:
try: try:
r = await client.get(url) r = await client.get(url)
r.raise_for_status() r.raise_for_status()
@ -172,13 +174,23 @@ async def scrape(client: httpx.AsyncClient) -> None:
log.info(f"Processing {len(events)} new URL(s)") log.info(f"Processing {len(events)} new URL(s)")
if events:
async with async_playwright() as p: async with async_playwright() as p:
browser, context = await network.browser(p, browser="brave") browser, context = await network.browser(p, browser="brave")
for i, ev in enumerate(events, start=1): for i, ev in enumerate(events, start=1):
handler = partial(process_event, url=ev["link"], url_num=i, context=context) handler = partial(
process_event,
url=ev["link"],
url_num=i,
context=context,
)
url = await network.safe_process(handler, url_num=i, log=log) url = await network.safe_process(
handler,
url_num=i,
log=log,
)
if url: if url:
sport, event, ts = ev["sport"], ev["event"], ev["event_ts"] sport, event, ts = ev["sport"], ev["event"], ev["event_ts"]

View file

@ -206,13 +206,23 @@ async def scrape(client: httpx.AsyncClient) -> None:
log.info(f"Processing {len(events)} new URL(s)") log.info(f"Processing {len(events)} new URL(s)")
if events:
async with async_playwright() as p: async with async_playwright() as p:
browser, context = await network.browser(p, "brave") browser, context = await network.browser(p, "brave")
for i, ev in enumerate(events, start=1): for i, ev in enumerate(events, start=1):
handler = partial(process_event, url=ev["link"], url_num=i, context=context) handler = partial(
process_event,
url=ev["link"],
url_num=i,
context=context,
)
url = await network.safe_process(handler, url_num=i, log=log) url = await network.safe_process(
handler,
url_num=i,
log=log,
)
if url: if url:
sport, event, logo, ts = ( sport, event, logo, ts = (

View file

@ -166,13 +166,23 @@ async def scrape(client: httpx.AsyncClient) -> None:
log.info(f"Processing {len(events)} new URL(s)") log.info(f"Processing {len(events)} new URL(s)")
if events:
async with async_playwright() as p: async with async_playwright() as p:
browser, context = await network.browser(p) browser, context = await network.browser(p)
for i, ev in enumerate(events, start=1): for i, ev in enumerate(events, start=1):
handler = partial(process_event, url=ev["link"], url_num=i, context=context) handler = partial(
process_event,
url=ev["link"],
url_num=i,
context=context,
)
url = await network.safe_process(handler, url_num=i, log=log) url = await network.safe_process(
handler,
url_num=i,
log=log,
)
if url: if url:
sport, event, logo, ts = ( sport, event, logo, ts = (

View file

@ -5,12 +5,16 @@ from pathlib import Path
import pytz import pytz
ZONES = {"ET": pytz.timezone("America/New_York"), "UTC": timezone.utc}
ZONES["EDT"] = ZONES["EST"] = ZONES["ET"]
class Time(datetime): class Time(datetime):
ZONES = {
"ET": pytz.timezone("America/New_York"),
"PST": pytz.timezone("America/Los_Angeles"),
"UTC": timezone.utc,
}
ZONES["EDT"] = ZONES["EST"] = ZONES["ET"]
TZ = ZONES["ET"] TZ = ZONES["ET"]
@classmethod @classmethod
@ -39,8 +43,8 @@ class Time(datetime):
) )
def to_tz(self, tzone: str) -> "Time": def to_tz(self, tzone: str) -> "Time":
dt = self.astimezone(ZONES[tzone]) dt = self.astimezone(self.ZONES[tzone])
return self.__class__.fromtimestamp(dt.timestamp(), tz=ZONES[tzone]) return self.__class__.fromtimestamp(dt.timestamp(), tz=self.ZONES[tzone])
@classmethod @classmethod
def from_str( def from_str(
@ -49,16 +53,17 @@ class Time(datetime):
fmt: str | None = None, fmt: str | None = None,
) -> "Time": ) -> "Time":
pattern = re.compile(r"\b(ET|UTC|EST|EDT)\b") pattern = re.compile(r"\b(ET|UTC|EST|EDT|PST)\b")
match = pattern.search(s) match = pattern.search(s)
tz = ZONES.get(match[1]) if match else cls.TZ tz = cls.ZONES.get(match[1]) if match else cls.TZ
cleaned_str = pattern.sub("", s).strip() cleaned_str = pattern.sub("", s).strip()
if fmt: if fmt:
dt = datetime.strptime(cleaned_str, fmt) dt = datetime.strptime(cleaned_str, fmt)
else: else:
formats = [ formats = [
"%Y-%m-%d %H:%M", "%Y-%m-%d %H:%M",

View file

@ -224,13 +224,23 @@ async def scrape(client: httpx.AsyncClient) -> None:
log.info(f"Processing {len(events)} new URL(s)") log.info(f"Processing {len(events)} new URL(s)")
if events:
async with async_playwright() as p: async with async_playwright() as p:
browser, context = await network.browser(p) browser, context = await network.browser(p)
for i, ev in enumerate(events, start=1): for i, ev in enumerate(events, start=1):
handler = partial(process_event, url=ev["link"], url_num=i, context=context) handler = partial(
process_event,
url=ev["link"],
url_num=i,
context=context,
)
url = await network.safe_process(handler, url_num=i, log=log) url = await network.safe_process(
handler,
url_num=i,
log=log,
)
sport, event, logo, ts = ( sport, event, logo, ts = (
ev["sport"], ev["sport"],