iptv/M3U8/scrapers/streamfree.py
2025-12-02 15:26:13 -05:00

76 lines
1.8 KiB
Python

from urllib.parse import urljoin
import httpx
from .utils import Cache, Time, get_logger, leagues
log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
CACHE_FILE = Cache("streamfree.json", exp=19_800)
BASE_URL = "https://streamfree.to"
TAG = "STRMFR"
async def refresh_api_cache(
client: httpx.AsyncClient,
url: str,
) -> dict[str, dict[str, list]]:
try:
r = await client.get(url)
r.raise_for_status()
except Exception as e:
log.error(f'Failed to fetch "{url}": {e}')
return {}
return r.json()
async def get_events(client: httpx.AsyncClient) -> dict[str, dict[str, str | float]]:
api_data = await refresh_api_cache(client, urljoin(BASE_URL, "streams"))
events = {}
for streams in api_data.get("streams", {}).values():
if not streams:
continue
for stream in streams:
sport, name = stream["league"], stream["name"]
stream_key = stream["stream_key"]
key = f"[{sport}] {name} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, name)
events[key] = {
"url": urljoin(BASE_URL, f"live/{stream_key}720p/index.m3u8"),
"logo": logo,
"base": BASE_URL,
"timestamp": Time.now().timestamp(),
"id": tvg_id or "Live.Event.us",
}
return events
async def scrape(client: httpx.AsyncClient) -> None:
if cached := CACHE_FILE.load():
urls.update(cached)
log.info(f"Loaded {len(urls)} event(s) from cache")
return
log.info(f'Scraping from "{BASE_URL}"')
events = await get_events(client)
urls.update(events)
CACHE_FILE.write(urls)
log.info(f"Collected and cached {len(urls)} new event(s)")