- change scraping method for streamhub.py
This commit is contained in:
doms9 2026-04-17 13:09:13 -04:00
parent c5b8bcda06
commit 00000d929d
2 changed files with 108 additions and 78 deletions

View file

@ -65,6 +65,7 @@ async def main() -> None:
asyncio.create_task(embedhd.scrape(hdl_brwsr)), asyncio.create_task(embedhd.scrape(hdl_brwsr)),
asyncio.create_task(ppv.scrape(xtrnl_brwsr)), asyncio.create_task(ppv.scrape(xtrnl_brwsr)),
asyncio.create_task(roxie.scrape(hdl_brwsr)), asyncio.create_task(roxie.scrape(hdl_brwsr)),
asyncio.create_task(streamhub.scrape(xtrnl_brwsr)),
] ]
httpx_tasks = [ httpx_tasks = [
@ -75,7 +76,6 @@ async def main() -> None:
asyncio.create_task(pawa.scrape()), asyncio.create_task(pawa.scrape()),
asyncio.create_task(shark.scrape()), asyncio.create_task(shark.scrape()),
asyncio.create_task(streamcenter.scrape()), asyncio.create_task(streamcenter.scrape()),
asyncio.create_task(streamhub.scrape()),
asyncio.create_task(streamsgate.scrape()), asyncio.create_task(streamsgate.scrape()),
asyncio.create_task(streamtpnew.scrape()), asyncio.create_task(streamtpnew.scrape()),
asyncio.create_task(totalsportek.scrape()), asyncio.create_task(totalsportek.scrape()),

View file

@ -1,8 +1,8 @@
import asyncio import asyncio
import re
from functools import partial from functools import partial
from urllib.parse import urljoin, urlparse from urllib.parse import urljoin
from playwright.async_api import Browser, Page, TimeoutError
from selectolax.parser import HTMLParser from selectolax.parser import HTMLParser
from .utils import Cache, Time, get_logger, leagues, network from .utils import Cache, Time, get_logger, leagues, network
@ -34,59 +34,86 @@ SPORT_ENDPOINTS = [
] ]
async def process_event(url: str, url_num: int) -> tuple[str | None, str | None]: async def process_event(
if not (event_data := await network.request(url, log=log)): url: str,
log.warning(f"URL {url_num}) Failed to load url.") url_num: int,
return page: Page,
) -> str | None:
soup_1 = HTMLParser(event_data.content) captured: list[str] = []
ifr_1 = soup_1.css_first("iframe#playerIframe") got_one = asyncio.Event()
if not ifr_1 or not (src := ifr_1.attributes.get("src")): handler = partial(
log.warning(f"URL {url_num}) No iframe element found.") network.capture_req,
return captured=captured,
got_one=got_one,
parsed = urlparse(src)
ifr_1_src = urljoin(
BASE_URL,
f"embed1/{parsed.path.split('/')[-1].split('_')[0]}.php",
) )
if not ( page.on("request", handler)
ifr_1_src_data := await network.request(
ifr_1_src, try:
headers={"Referer": url}, resp = await page.goto(
log=log, url,
wait_until="domcontentloaded",
timeout=6_000,
) )
):
log.warning(f"URL {url_num}) Failed to load iframe source. (IFR1)") if not resp or resp.status != 200:
log.warning(
f"URL {url_num}) Status Code: {resp.status if resp else 'None'}"
)
return return
soup_2 = HTMLParser(ifr_1_src_data.content) try:
btn = page.locator("button.btn.btn-sm.btn-success.streamLink")
ifr_2 = soup_2.css_first("center iframe") iframe_src = await btn.get_attribute("data-src", timeout=1_250)
except TimeoutError:
if not ifr_2 or not (ifr_2_src := ifr_2.attributes.get("src")): log.warning(f"URL {url_num}) No iframe source found.")
log.warning(f"URL {url_num}) Unable to locate iframe. (IFR2)")
return return
ifr_2_src = f"https:{ifr_2_src}" if ifr_2_src.startswith("//") else ifr_2_src await page.goto(
iframe_src,
wait_until="domcontentloaded",
timeout=5_000,
)
wait_task = asyncio.create_task(got_one.wait())
try:
await asyncio.wait_for(wait_task, timeout=10)
except asyncio.TimeoutError:
log.warning(f"URL {url_num}) Timed out waiting for M3U8.")
if not (ifr_2_src_data := await network.request(ifr_2_src, log=log)):
log.warning(f"URL {url_num}) Failed to load iframe source.")
return return
valid_m3u8 = re.compile(r"src:\s+(\'|\")([^\']+)(\'|\")", re.I) finally:
if not wait_task.done():
wait_task.cancel()
if not (match := valid_m3u8.search(ifr_2_src_data.text)): try:
log.warning(f"URL {url_num}) No source found.") await wait_task
return except asyncio.CancelledError:
pass
if captured:
log.info(f"URL {url_num}) Captured M3U8") log.info(f"URL {url_num}) Captured M3U8")
return match[2] return captured[0]
log.warning(f"URL {url_num}) No M3U8 captured after waiting.")
return
except Exception as e:
log.warning(f"URL {url_num}) {e}")
return
finally:
page.remove_listener("request", handler)
async def refresh_html_cache( async def refresh_html_cache(
@ -185,7 +212,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
return live return live
async def scrape() -> None: async def scrape(browser: Browser) -> None:
cached_urls = CACHE_FILE.load() cached_urls = CACHE_FILE.load()
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]} valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
@ -200,13 +227,15 @@ async def scrape() -> None:
if events := await get_events(cached_urls.keys()): if events := await get_events(cached_urls.keys()):
log.info(f"Processing {len(events)} new URL(s)") log.info(f"Processing {len(events)} new URL(s)")
async with network.event_context(browser) as context:
for i, ev in enumerate(events, start=1): for i, ev in enumerate(events, start=1):
async with network.event_page(context) as page:
handler = partial( handler = partial(
process_event, process_event,
url=(link := ev["link"]), url=(link := ev["link"]),
url_num=i, url_num=i,
page=page,
) )
url = await network.safe_process( url = await network.safe_process(
@ -233,7 +262,6 @@ async def scrape() -> None:
"timestamp": ts, "timestamp": ts,
"id": tvg_id or "Live.Event.us", "id": tvg_id or "Live.Event.us",
"link": link, "link": link,
"UA": "curl/8.19.0",
} }
cached_urls[key] = entry cached_urls[key] = entry
@ -241,6 +269,8 @@ async def scrape() -> None:
if url: if url:
valid_count += 1 valid_count += 1
entry["url"] = url.split("?st")[0]
urls[key] = entry urls[key] = entry
log.info(f"Collected and cached {valid_count - cached_count} new event(s)") log.info(f"Collected and cached {valid_count - cached_count} new event(s)")