- change scrape window for istreameast.py
- harden scraping method for roxie.py
- catch nulls for ppv.py
- change scraping method for totalsportek.py
- misc edits.
This commit is contained in:
doms9 2026-02-18 15:47:50 -05:00
parent 0fe9c5b1dd
commit 00000d940c
7 changed files with 141 additions and 87 deletions

View file

@ -71,7 +71,6 @@ async def main() -> None:
asyncio.create_task(streamcenter.scrape(xtrnl_brwsr)), asyncio.create_task(streamcenter.scrape(xtrnl_brwsr)),
# asyncio.create_task(streamhub.scrape(xtrnl_brwsr)), # asyncio.create_task(streamhub.scrape(xtrnl_brwsr)),
asyncio.create_task(streamsgate.scrape(xtrnl_brwsr)), asyncio.create_task(streamsgate.scrape(xtrnl_brwsr)),
asyncio.create_task(totalsportek.scrape(hdl_brwsr)),
# asyncio.create_task(tvapp.scrape(hdl_brwsr)), # asyncio.create_task(tvapp.scrape(hdl_brwsr)),
asyncio.create_task(webcast.scrape(hdl_brwsr)), asyncio.create_task(webcast.scrape(hdl_brwsr)),
] ]
@ -83,6 +82,7 @@ async def main() -> None:
asyncio.create_task(pawa.scrape()), asyncio.create_task(pawa.scrape()),
asyncio.create_task(shark.scrape()), asyncio.create_task(shark.scrape()),
asyncio.create_task(streambtw.scrape()), asyncio.create_task(streambtw.scrape()),
asyncio.create_task(totalsportek.scrape()),
asyncio.create_task(xstreameast.scrape()), asyncio.create_task(xstreameast.scrape()),
] ]

View file

@ -58,8 +58,6 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not (html_data := await network.request(BASE_URL, log=log)): if not (html_data := await network.request(BASE_URL, log=log)):
return events return events
pattern = re.compile(r"^(?:LIVE|(?:[1-9]|[12]\d|30)\s+minutes?\b)", re.I)
soup = HTMLParser(html_data.content) soup = HTMLParser(html_data.content)
for link in soup.css("li.f1-podium--item > a.f1-podium--link"): for link in soup.css("li.f1-podium--item > a.f1-podium--link"):
@ -71,9 +69,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not (time_elem := li_item.css_first(".SaatZamanBilgisi")): if not (time_elem := li_item.css_first(".SaatZamanBilgisi")):
continue continue
time_text = time_elem.text(strip=True) if time_elem.text(strip=True).lower() != "live":
if not pattern.search(time_text):
continue continue
sport = rank_elem.text(strip=True) sport = rank_elem.text(strip=True)

View file

@ -81,9 +81,11 @@ async def get_events(url: str, cached_keys: list[str]) -> list[dict[str, str]]:
async def scrape(browser: Browser) -> None: async def scrape(browser: Browser) -> None:
cached_urls = CACHE_FILE.load() cached_urls = CACHE_FILE.load()
cached_count = len(cached_urls) valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
urls.update(cached_urls) valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
log.info(f"Loaded {cached_count} event(s) from cache") log.info(f"Loaded {cached_count} event(s) from cache")
@ -120,31 +122,35 @@ async def scrape(browser: Browser) -> None:
log=log, log=log,
) )
sport, event, logo, ts, link = (
ev["sport"],
ev["event"],
ev["logo"],
ev["timestamp"],
ev["link"],
)
key = f"[{sport}] {event} ({TAG})"
tvg_id, pic = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo or pic,
"base": link,
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
cached_urls[key] = entry
if url: if url:
sport, event, logo, ts, link = ( valid_count += 1
ev["sport"],
ev["event"],
ev["logo"],
ev["timestamp"],
ev["link"],
)
key = f"[{sport}] {event} ({TAG})" urls[key] = entry
tvg_id, pic = leagues.get_tvg_info(sport, event) if new_count := valid_count - cached_count:
entry = {
"url": url,
"logo": logo or pic,
"base": link,
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = cached_urls[key] = entry
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)") log.info(f"Collected and cached {new_count} new event(s)")
else: else:

View file

@ -104,7 +104,13 @@ async def process_event(
"button:has-text('Stream 1')", "button:has-text('Stream 1')",
timeout=5_000, timeout=5_000,
): ):
await btn.click() await btn.click(force=True, click_count=2)
except TimeoutError:
pass
try:
if player := await page.wait_for_selector(".play-wrapper", timeout=5_000):
await player.click(force=True, click_count=3)
except TimeoutError: except TimeoutError:
pass pass
@ -165,8 +171,8 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
live = [] live = []
start_ts = now.delta(hours=-1).timestamp() start_ts = now.delta(hours=-1.5).timestamp()
end_ts = now.delta(minutes=5).timestamp() end_ts = now.delta(minutes=1).timestamp()
for k, v in events.items(): for k, v in events.items():
if k in cached_keys: if k in cached_keys:

View file

@ -79,7 +79,8 @@ async def get_events() -> list[dict[str, str]]:
for event in items: for event in items:
event_name: str = event["title"] event_name: str = event["title"]
link: str = event["url"] if not (link := event.get("url")):
continue
events.append( events.append(
{ {

View file

@ -17,7 +17,7 @@ CACHE_FILE = Cache(TAG, exp=10_800)
HTML_CACHE = Cache(f"{TAG}-html", exp=28_800) HTML_CACHE = Cache(f"{TAG}-html", exp=28_800)
MIRRORS = ["https://streamhub.pro", "https://livesports4u.net"] BASE_URL = "https://livesports4u.net"
CATEGORIES = { CATEGORIES = {
"Soccer": "sport_68c02a4464a38", "Soccer": "sport_68c02a4464a38",
@ -35,7 +35,6 @@ CATEGORIES = {
async def refresh_html_cache( async def refresh_html_cache(
url: str,
date: str, date: str,
sport_id: str, sport_id: str,
ts: float, ts: float,
@ -45,7 +44,7 @@ async def refresh_html_cache(
if not ( if not (
html_data := await network.request( html_data := await network.request(
urljoin(url, f"events/{date}"), urljoin(BASE_URL, f"events/{date}"),
log=log, log=log,
params={"sport_id": sport_id}, params={"sport_id": sport_id},
) )
@ -95,7 +94,7 @@ async def refresh_html_cache(
return events return events
async def get_events(url: str, cached_keys: list[str]) -> list[dict[str, str]]: async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
now = Time.clean(Time.now()) now = Time.clean(Time.now())
if not (events := HTML_CACHE.load()): if not (events := HTML_CACHE.load()):
@ -103,7 +102,6 @@ async def get_events(url: str, cached_keys: list[str]) -> list[dict[str, str]]:
tasks = [ tasks = [
refresh_html_cache( refresh_html_cache(
url,
date, date,
sport_id, sport_id,
now.timestamp(), now.timestamp(),
@ -146,16 +144,9 @@ async def scrape(browser: Browser) -> None:
log.info(f"Loaded {cached_count} event(s) from cache") log.info(f"Loaded {cached_count} event(s) from cache")
if not (base_url := await network.get_base(MIRRORS)): log.info(f'Scraping from "{BASE_URL}"')
log.warning("No working PPV mirrors")
CACHE_FILE.write(cached_urls) events = await get_events(cached_urls.keys())
return
log.info(f'Scraping from "{base_url}"')
events = await get_events(base_url, cached_urls.keys())
log.info(f"Processing {len(events)} new URL(s)") log.info(f"Processing {len(events)} new URL(s)")

View file

@ -1,7 +1,8 @@
import json
import re
from functools import partial from functools import partial
from urllib.parse import urljoin, urlparse from urllib.parse import urljoin, urlparse
from playwright.async_api import Browser
from selectolax.parser import HTMLParser from selectolax.parser import HTMLParser
from .utils import Cache, Time, get_logger, leagues, network from .utils import Cache, Time, get_logger, leagues, network
@ -23,6 +24,64 @@ def fix_txt(s: str) -> str:
return s.upper() if s.islower() else s return s.upper() if s.islower() else s
async def process_event(url: str, url_num: int) -> str | None:
if not (event_data := await network.request(url, log=log)):
log.info(f"URL {url_num}) Failed to load url.")
return
soup_1 = HTMLParser(event_data.content)
if not (iframe_1 := soup_1.css_first("iframe")):
log.warning(f"URL {url_num}) No iframe element found.")
return
if not (iframe_1_src := iframe_1.attributes.get("src")):
log.warning(f"URL {url_num}) No iframe source found.")
return
if not (iframe_1_src_data := await network.request(iframe_1_src, log=log)):
log.info(f"URL {url_num}) Failed to load iframe source.")
return
soup_2 = HTMLParser(iframe_1_src_data.content)
if not (iframe_2 := soup_2.css_first("iframe")):
log.warning(f"URL {url_num}) No iframe element found.")
return
if not (iframe_2_src := iframe_2.attributes.get("src")):
log.warning(f"URL {url_num}) No iframe source found.")
return
if not (
iframe_2_src_data := await network.request(
iframe_2_src,
log=log,
headers={"Referer": iframe_1_src},
)
):
log.info(f"URL {url_num}) Failed to load iframe source.")
return
valid_m3u8 = re.compile(r'currentStreamUrl\s+=\s+"([^"]*)"', re.I)
if not (match := valid_m3u8.search(iframe_2_src_data.text)):
log.warning(f"URL {url_num}) No Clappr source found.")
return
log.info(f"URL {url_num}) Captured M3U8")
return json.loads(f'"{match[1]}"')
async def get_events(cached_keys: list[str]) -> list[dict[str, str]]: async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
events = [] events = []
@ -54,7 +113,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not (time_node := node.css_first(".col-3 span")): if not (time_node := node.css_first(".col-3 span")):
continue continue
if time_node.text(strip=True) != "MatchStarted": if time_node.text(strip=True).lower() != "matchstarted":
continue continue
event_name = fix_txt(" vs ".join(teams)) event_name = fix_txt(" vs ".join(teams))
@ -73,7 +132,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
return events return events
async def scrape(browser: Browser) -> None: async def scrape() -> None:
cached_urls = CACHE_FILE.load() cached_urls = CACHE_FILE.load()
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]} valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
@ -93,50 +152,45 @@ async def scrape(browser: Browser) -> None:
if events: if events:
now = Time.clean(Time.now()) now = Time.clean(Time.now())
async with network.event_context(browser) as context: for i, ev in enumerate(events, start=1):
for i, ev in enumerate(events, start=1): handler = partial(
async with network.event_page(context) as page: process_event,
handler = partial( url=ev["link"],
network.process_event, url_num=i,
url=ev["link"], )
url_num=i,
page=page,
log=log,
)
url = await network.safe_process( url = await network.safe_process(
handler, handler,
url_num=i, url_num=i,
semaphore=network.PW_S, semaphore=network.HTTP_S,
log=log, log=log,
timeout=6, )
)
sport, event, link = ( sport, event, link = (
ev["sport"], ev["sport"],
ev["event"], ev["event"],
ev["link"], ev["link"],
) )
key = f"[{sport}] {event} ({TAG})" key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event) tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = { entry = {
"url": url, "url": url,
"logo": logo, "logo": logo,
"base": link, "base": link,
"timestamp": now.timestamp(), "timestamp": now.timestamp(),
"id": tvg_id or "Live.Event.us", "id": tvg_id or "Live.Event.us",
"link": link, "link": link,
} }
cached_urls[key] = entry cached_urls[key] = entry
if url: if url:
valid_count += 1 valid_count += 1
urls[key] = entry urls[key] = entry
if new_count := valid_count - cached_count: if new_count := valid_count - cached_count:
log.info(f"Collected and cached {new_count} new event(s)") log.info(f"Collected and cached {new_count} new event(s)")