From 00000d94e3e73b0871ac488b1c9f6ef412548c48 Mon Sep 17 00:00:00 2001 From: doms9 <96013514+doms9@users.noreply.github.com> Date: Fri, 19 Dec 2025 14:05:41 -0500 Subject: [PATCH] e --- M3U8/scrapers/istreameast.py | 2 +- M3U8/scrapers/watchfooty.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/M3U8/scrapers/istreameast.py b/M3U8/scrapers/istreameast.py index b6da253..7d0a653 100644 --- a/M3U8/scrapers/istreameast.py +++ b/M3U8/scrapers/istreameast.py @@ -57,7 +57,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]: if not (html_data := await network.request(BASE_URL, log=log)): return events - pattern = re.compile(r"^(?:LIVE|\d+\s+(minutes?)\b)", re.IGNORECASE) + pattern = re.compile(r"^(?:LIVE|(?:[1-9]|[12]\d|30)\s+minutes?\b)", re.IGNORECASE) soup = HTMLParser(html_data.content) diff --git a/M3U8/scrapers/watchfooty.py b/M3U8/scrapers/watchfooty.py index 0df5942..18aa7e7 100644 --- a/M3U8/scrapers/watchfooty.py +++ b/M3U8/scrapers/watchfooty.py @@ -76,6 +76,8 @@ async def process_event( context: BrowserContext, ) -> str | None: + pattern = re.compile(r"\((\d+)\)") + page = await context.new_page() captured: list[str] = [] @@ -111,9 +113,7 @@ async def process_event( return - match = re.search(r"\((\d+)\)", text) - - if not match or int(match[1]) == 0: + if not (match := pattern.search(text)) or int(match[1]) == 0: log.warning(f"URL {url_num}) No available stream links.") return