iptv/M3U8/scrapers/fawa.py

144 lines
3.7 KiB
Python
Raw Normal View History

2025-12-08 13:21:43 -05:00
import re
from functools import partial
from urllib.parse import quote, urljoin
from selectolax.parser import HTMLParser
from .utils import Cache, Time, get_logger, leagues, network
log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
2025-12-13 16:57:14 -05:00
TAG = "FAWA"
2025-12-08 13:21:43 -05:00
2025-12-16 02:30:44 -05:00
CACHE_FILE = Cache(f"{TAG.lower()}.json", exp=10_800)
2025-12-08 13:21:43 -05:00
2025-12-13 16:57:14 -05:00
BASE_URL = "http://www.fawanews.sc/"
2025-12-08 13:21:43 -05:00
2025-12-18 03:04:11 -05:00
async def process_event(url: str, url_num: int) -> str | None:
if not (html_data := await network.request(url, log=log)):
log.info(f"URL {url_num}) Failed to load url.")
2025-12-08 13:21:43 -05:00
return
valid_m3u8 = re.compile(
r'var\s+(\w+)\s*=\s*\[["\']?(https?:\/\/[^"\'\s>]+\.m3u8(?:\?[^"\'\s>]*)?)["\']\]?',
re.IGNORECASE,
)
2025-12-18 03:04:11 -05:00
if not (match := valid_m3u8.search(html_data.text)):
2025-12-08 13:21:43 -05:00
log.info(f"URL {url_num}) No M3U8 found")
return
log.info(f"URL {url_num}) Captured M3U8")
return match[2]
2025-12-18 03:04:11 -05:00
async def get_events(cached_hrefs: set[str]) -> list[dict[str, str]]:
events = []
2025-12-08 13:21:43 -05:00
2025-12-18 03:04:11 -05:00
if not (html_data := await network.request(BASE_URL, log=log)):
return events
2025-12-08 13:21:43 -05:00
2025-12-18 03:04:11 -05:00
soup = HTMLParser(html_data.content)
2025-12-08 13:21:43 -05:00
valid_event = re.compile(r"\d{1,2}:\d{1,2}")
clean_event = re.compile(r"\s+-+\s+\w{1,4}")
for item in soup.css(".user-item"):
text = item.css_first(".user-item__name")
subtext = item.css_first(".user-item__playing")
link = item.css_first("a[href]")
if not (href := link.attributes.get("href")):
continue
href = quote(href)
if cached_hrefs & {href}:
continue
if not (text and subtext):
continue
event_name, details = text.text(strip=True), subtext.text(strip=True)
if not (valid_event.search(details)):
continue
sport = valid_event.split(details)[0].strip()
events.append(
{
"sport": sport,
"event": clean_event.sub("", event_name),
"link": urljoin(BASE_URL, href),
"href": href,
}
)
return events
2025-12-18 03:04:11 -05:00
async def scrape() -> None:
2025-12-08 13:21:43 -05:00
cached_urls = CACHE_FILE.load()
cached_hrefs = {entry["href"] for entry in cached_urls.values()}
cached_count = len(cached_urls)
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
log.info(f'Scraping from "{BASE_URL}"')
2025-12-18 03:04:11 -05:00
events = await get_events(cached_hrefs)
2025-12-08 13:21:43 -05:00
log.info(f"Processing {len(events)} new URL(s)")
if events:
2025-12-16 20:28:51 -05:00
now = Time.clean(Time.now()).timestamp()
2025-12-08 13:21:43 -05:00
for i, ev in enumerate(events, start=1):
handler = partial(
process_event,
url=ev["link"],
url_num=i,
)
url = await network.safe_process(
handler,
url_num=i,
log=log,
timeout=10,
)
if url:
sport, event, link = (
ev["sport"],
ev["event"],
ev["link"],
)
2025-12-12 15:20:10 -05:00
2025-12-08 13:21:43 -05:00
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": BASE_URL,
"timestamp": now,
"id": tvg_id or "Live.Event.us",
"href": ev["href"],
"link": link,
}
urls[key] = cached_urls[key] = entry
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:
log.info("No new events found")
CACHE_FILE.write(cached_urls)