iptv/M3U8/scrapers/tvapp.py

130 lines
3.5 KiB
Python
Raw Normal View History

2026-02-07 17:17:19 -05:00
from functools import partial
2026-02-12 00:27:36 -05:00
from urllib.parse import urljoin, urlparse
2026-02-07 17:17:19 -05:00
from playwright.async_api import Browser
from selectolax.parser import HTMLParser
2025-12-08 13:21:43 -05:00
2025-12-18 03:04:11 -05:00
from .utils import Cache, Time, get_logger, leagues, network
2025-12-08 13:21:43 -05:00
log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
TAG = "TVAPP"
2025-12-08 13:21:43 -05:00
2026-02-12 00:27:36 -05:00
CACHE_FILE = Cache(TAG, exp=86_400)
2025-12-08 13:21:43 -05:00
2026-02-07 17:17:19 -05:00
BASE_URL = "https://thetvapp.to"
2025-12-08 13:21:43 -05:00
2026-02-12 00:27:36 -05:00
def fix_url(s: str) -> str:
parsed = urlparse(s)
base = f"origin.{parsed.netloc.split('.', 1)[-1]}"
2026-02-07 17:17:19 -05:00
2026-02-12 00:27:36 -05:00
return urljoin(f"http://{base}", parsed.path.replace("tracks-v1a1/", ""))
async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
events = []
2025-12-08 13:21:43 -05:00
2026-02-07 17:17:19 -05:00
if not (html_data := await network.request(BASE_URL, log=log)):
2025-12-18 03:04:11 -05:00
return events
2025-12-08 13:21:43 -05:00
2026-02-07 17:17:19 -05:00
soup = HTMLParser(html_data.content)
2025-12-08 13:21:43 -05:00
2026-02-07 17:17:19 -05:00
for row in soup.css(".row"):
if not (h3_elem := row.css_first("h3")):
continue
2025-12-18 04:14:54 -05:00
2026-02-07 17:17:19 -05:00
sport = h3_elem.text(strip=True)
2025-12-18 04:14:54 -05:00
2026-02-07 17:17:19 -05:00
if sport.lower() == "live tv channels":
continue
2025-12-08 13:21:43 -05:00
2026-02-07 17:17:19 -05:00
for a in row.css("a.list-group-item[href]"):
2026-02-12 00:27:36 -05:00
event_name = a.text(strip=True).split(":", 1)[0]
2025-12-08 13:21:43 -05:00
2026-02-12 00:27:36 -05:00
if f"[{sport}] {event_name} ({TAG})" in cached_keys:
2026-02-07 17:17:19 -05:00
continue
2025-12-08 13:21:43 -05:00
2026-02-12 00:27:36 -05:00
if not (href := a.attributes.get("href")):
continue
2025-12-08 13:21:43 -05:00
2026-02-12 00:27:36 -05:00
events.append(
{
"sport": sport,
"event": event_name,
"link": urljoin(BASE_URL, href),
}
)
2025-12-08 13:21:43 -05:00
return events
2026-02-07 17:17:19 -05:00
async def scrape(browser: Browser) -> None:
cached_urls = CACHE_FILE.load()
cached_count = len(cached_urls)
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
log.info(f'Scraping from "{BASE_URL}"')
2025-12-08 13:21:43 -05:00
2026-02-07 17:17:19 -05:00
events = await get_events(cached_urls.keys())
log.info(f"Processing {len(events)} new URL(s)")
if events:
2026-02-12 00:27:36 -05:00
now = Time.clean(Time.now())
2026-02-07 17:17:19 -05:00
async with network.event_context(browser) as context:
for i, ev in enumerate(events, start=1):
async with network.event_page(context) as page:
handler = partial(
network.process_event,
url=ev["link"],
url_num=i,
page=page,
log=log,
)
url = await network.safe_process(
handler,
url_num=i,
semaphore=network.PW_S,
log=log,
)
if url:
2026-02-12 00:27:36 -05:00
sport, event, link = (
2026-02-07 17:17:19 -05:00
ev["sport"],
ev["event"],
ev["link"],
)
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
2026-02-12 00:27:36 -05:00
"url": fix_url(url),
2026-02-07 17:17:19 -05:00
"logo": logo,
"base": BASE_URL,
2026-02-12 00:27:36 -05:00
"timestamp": now.timestamp(),
2026-02-07 17:17:19 -05:00
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = cached_urls[key] = entry
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:
log.info("No new events found")
CACHE_FILE.write(cached_urls)