re-add streambtw
This commit is contained in:
doms9 2025-12-22 19:59:39 -05:00
parent 64e50831ec
commit 00000d9b54
3 changed files with 158 additions and 2 deletions

View file

@ -12,6 +12,7 @@ from scrapers import (
roxie, roxie,
shark, shark,
sport9, sport9,
streambtw,
streamcenter, streamcenter,
streamfree, streamfree,
streamhub, streamhub,
@ -56,6 +57,7 @@ async def main() -> None:
asyncio.create_task(roxie.scrape()), asyncio.create_task(roxie.scrape()),
asyncio.create_task(shark.scrape()), asyncio.create_task(shark.scrape()),
asyncio.create_task(sport9.scrape()), asyncio.create_task(sport9.scrape()),
asyncio.create_task(streambtw.scrape()),
asyncio.create_task(streamcenter.scrape()), asyncio.create_task(streamcenter.scrape()),
asyncio.create_task(streamfree.scrape()), asyncio.create_task(streamfree.scrape()),
asyncio.create_task(streamhub.scrape()), asyncio.create_task(streamhub.scrape()),
@ -77,6 +79,7 @@ async def main() -> None:
| roxie.urls | roxie.urls
| shark.urls | shark.urls
| sport9.urls | sport9.urls
| streambtw.urls
| streamcenter.urls | streamcenter.urls
| strmd.urls | strmd.urls
| streamfree.urls | streamfree.urls

View file

@ -1,5 +1,6 @@
import base64 import base64
import re import re
from functools import partial
from selectolax.parser import HTMLParser from selectolax.parser import HTMLParser
@ -9,7 +10,7 @@ log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {} urls: dict[str, dict[str, str | float]] = {}
TAG = "ISTRMEST" TAG = "iSTRMEST"
CACHE_FILE = Cache(f"{TAG.lower()}.json", exp=3_600) CACHE_FILE = Cache(f"{TAG.lower()}.json", exp=3_600)
@ -121,7 +122,20 @@ async def scrape() -> None:
now = Time.clean(Time.now()).timestamp() now = Time.clean(Time.now()).timestamp()
for i, ev in enumerate(events, start=1): for i, ev in enumerate(events, start=1):
if url := await process_event(ev["link"], i): handler = partial(
process_event,
url=ev["link"],
url_num=i,
)
url = await network.safe_process(
handler,
url_num=i,
log=log,
timeout=10,
)
if url:
sport, event, link = ( sport, event, link = (
ev["sport"], ev["sport"],
ev["event"], ev["event"],

139
M3U8/scrapers/streambtw.py Normal file
View file

@ -0,0 +1,139 @@
import base64
import re
from functools import partial
from urllib.parse import urljoin
from selectolax.parser import HTMLParser
from .utils import Cache, Time, get_logger, leagues, network
log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
TAG = "STRMBTW"
CACHE_FILE = Cache(f"{TAG.lower()}.json", exp=3_600)
BASE_URL = "https://streambtw.com"
def fix_league(s: str) -> str:
pattern = re.compile(r"^\w*-\w*", re.IGNORECASE)
return " ".join(s.split("-")) if pattern.search(s) else s
async def process_event(url: str, url_num: int) -> str | None:
if not (html_data := await network.request(url, log=log)):
return
valid_m3u8 = re.compile(r'var\s+(\w+)\s*=\s*"([^"]*)"', re.IGNORECASE)
if not (match := valid_m3u8.search(html_data.text)):
log.info(f"URL {url_num}) No M3U8 found")
return
stream_link: str = match[2]
if not stream_link.startswith("http"):
stream_link = base64.b64decode(stream_link).decode("utf-8")
log.info(f"URL {url_num}) Captured M3U8")
return stream_link
async def get_events() -> list[dict[str, str]]:
events = []
if not (html_data := await network.request(BASE_URL, log=log)):
return events
soup = HTMLParser(html_data.content)
for card in soup.css(".league"):
if not (league_elem := card.css_first(".league-header h4")):
continue
for event in card.css(".match"):
if not (event_elem := event.css_first(".match-title")):
continue
if not (watch_btn := event.css_first(".watch-btn")) or not (
href := watch_btn.attributes.get("href")
):
continue
league = league_elem.text(strip=True)
name = event_elem.text(strip=True)
events.append(
{
"sport": fix_league(league),
"event": name,
"link": urljoin(BASE_URL, href),
}
)
return events
async def scrape() -> None:
if cached := CACHE_FILE.load():
urls.update(cached)
log.info(f"Loaded {len(urls)} event(s) from cache")
return
log.info(f'Scraping from "{BASE_URL}"')
events = await get_events()
log.info(f"Processing {len(events)} new URL(s)")
if events:
now = Time.now().timestamp()
for i, ev in enumerate(events, start=1):
handler = partial(
process_event,
url=ev["link"],
url_num=i,
)
url = await network.safe_process(
handler,
url_num=i,
log=log,
timeout=10,
)
if url:
sport, event, link = (
ev["sport"],
ev["event"],
ev["link"],
)
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": link,
"timestamp": now,
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = entry
log.info(f"Collected {len(urls)} event(s)")
CACHE_FILE.write(urls)