justabranch/M3U8/scrapers/streambtw.py

159 lines
3.6 KiB
Python
Raw Normal View History

2025-12-22 19:59:39 -05:00
import base64
2026-02-07 12:52:02 -05:00
import json
2025-12-22 19:59:39 -05:00
import re
from functools import partial
from selectolax.parser import HTMLParser
from .utils import Cache, Time, get_logger, leagues, network
log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
TAG = "STRMBTW"
CACHE_FILE = Cache(TAG, exp=3_600)
2025-12-22 19:59:39 -05:00
2026-02-05 20:11:19 -05:00
BASE_URL = "https://hiteasport.info"
2025-12-22 19:59:39 -05:00
def fix_league(s: str) -> str:
pattern = re.compile(r"^\w*-\w*", re.IGNORECASE)
return " ".join(s.split("-")) if pattern.search(s) else s
async def process_event(url: str, url_num: int) -> str | None:
if not (html_data := await network.request(url, log=log)):
return
valid_m3u8 = re.compile(r'var\s+(\w+)\s*=\s*"([^"]*)"', re.IGNORECASE)
if not (match := valid_m3u8.search(html_data.text)):
log.info(f"URL {url_num}) No M3U8 found")
return
stream_link: str = match[2]
if not stream_link.startswith("http"):
stream_link = base64.b64decode(stream_link).decode("utf-8")
log.info(f"URL {url_num}) Captured M3U8")
return stream_link
2026-02-02 15:13:02 -05:00
async def get_events() -> list[dict[str, str]]:
2025-12-22 19:59:39 -05:00
events = []
2026-02-02 15:13:02 -05:00
if not (html_data := await network.request(BASE_URL, log=log)):
2025-12-22 19:59:39 -05:00
return events
soup = HTMLParser(html_data.content)
2026-02-07 12:52:02 -05:00
script_text = None
2026-02-02 15:13:02 -05:00
2026-02-07 12:52:02 -05:00
for s in soup.css("script"):
t = s.text() or ""
2025-12-22 19:59:39 -05:00
2026-02-07 12:52:02 -05:00
if "const DATA" in t:
script_text = t
break
2025-12-22 19:59:39 -05:00
2026-02-07 12:52:02 -05:00
if not script_text:
return events
if not (
match := re.search(r"const\s+DATA\s*=\s*(\[\s*.*?\s*\]);", script_text, re.S)
):
return events
data_js = match[1].replace("\n ", "").replace("\n ", "")
s1 = re.sub(r"{\s", '{"', data_js)
s2 = re.sub(r':"', '":"', s1)
s3 = re.sub(r":\[", '":[', s2)
s4 = re.sub(r"},\]", "}]", s3)
s5 = re.sub(r'",\s', '","', s4)
data: list[dict[str, str]] = json.loads(s5)
for matches in data:
league = matches["title"]
items: list[dict[str, str]] = matches["items"]
for info in items:
title = info["title"]
url = info["url"]
2025-12-22 19:59:39 -05:00
2026-02-05 20:11:19 -05:00
events.append(
{
"sport": fix_league(league),
2026-02-07 12:52:02 -05:00
"event": title,
"link": url,
2026-02-05 20:11:19 -05:00
}
)
2025-12-22 19:59:39 -05:00
return events
async def scrape() -> None:
if cached := CACHE_FILE.load():
urls.update(cached)
log.info(f"Loaded {len(urls)} event(s) from cache")
return
2026-02-02 15:13:02 -05:00
log.info(f'Scraping from "{BASE_URL}"')
2025-12-22 19:59:39 -05:00
2026-02-02 15:13:02 -05:00
events = await get_events()
2025-12-22 19:59:39 -05:00
log.info(f"Processing {len(events)} new URL(s)")
if events:
2025-12-22 21:07:33 -05:00
now = Time.clean(Time.now())
2025-12-22 19:59:39 -05:00
for i, ev in enumerate(events, start=1):
handler = partial(
process_event,
url=ev["link"],
url_num=i,
)
url = await network.safe_process(
handler,
url_num=i,
semaphore=network.HTTP_S,
2025-12-22 19:59:39 -05:00
log=log,
)
if url:
sport, event, link = (
ev["sport"],
ev["event"],
ev["link"],
)
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": link,
2025-12-22 21:07:33 -05:00
"timestamp": now.timestamp(),
2025-12-22 19:59:39 -05:00
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = entry
log.info(f"Collected {len(urls)} event(s)")
CACHE_FILE.write(urls)