replace time4tv with embedhd
This commit is contained in:
doms9 2025-12-22 18:26:47 -05:00
parent 7a2caba955
commit 00000d918e
5 changed files with 146 additions and 146 deletions

View file

@ -4,6 +4,7 @@ import re
from pathlib import Path
from scrapers import (
embedhd,
fawa,
istreameast,
pixel,
@ -16,7 +17,6 @@ from scrapers import (
streamhub,
streamsgate,
strmd,
time4tv,
tvpass,
watchfooty,
webcast,
@ -48,6 +48,7 @@ async def main() -> None:
base_m3u8, tvg_chno = load_base()
tasks = [
asyncio.create_task(embedhd.scrape()),
asyncio.create_task(fawa.scrape()),
asyncio.create_task(istreameast.scrape()),
asyncio.create_task(pixel.scrape()),
@ -60,7 +61,6 @@ async def main() -> None:
asyncio.create_task(streamhub.scrape()),
asyncio.create_task(streamsgate.scrape()),
asyncio.create_task(strmd.scrape()),
asyncio.create_task(time4tv.scrape()),
asyncio.create_task(tvpass.scrape()),
asyncio.create_task(watchfooty.scrape()),
asyncio.create_task(webcast.scrape()),
@ -69,7 +69,8 @@ async def main() -> None:
await asyncio.gather(*tasks)
additions = (
fawa.urls
embedhd.urls
| fawa.urls
| istreameast.urls
| pixel.urls
| ppv.urls
@ -81,7 +82,6 @@ async def main() -> None:
| streamfree.urls
| streamhub.urls
| streamsgate.urls
| time4tv.urls
| tvpass.urls
| watchfooty.urls
| webcast.urls

138
M3U8/scrapers/embedhd.py Normal file
View file

@ -0,0 +1,138 @@
from functools import partial
from playwright.async_api import async_playwright
from .utils import Cache, Time, get_logger, leagues, network
log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
TAG = "EMBEDHD"
CACHE_FILE = Cache(f"{TAG.lower()}.json", exp=5_400)
API_CACHE = Cache(f"{TAG.lower()}-api.json", exp=28_800)
BASE_URL = "https://embedhd.org/api-event.php"
def fix_league(s: str) -> str:
return " ".join(x.capitalize() for x in s.split()) if len(s) > 5 else s.upper()
async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
now = Time.clean(Time.now())
if not (api_data := API_CACHE.load(per_entry=False)):
api_data = {}
if r := await network.request(BASE_URL, log=log):
api_data: dict = r.json()
api_data["timestamp"] = now.timestamp()
API_CACHE.write(api_data)
events = []
for info in api_data.get("days", []):
event_dt = Time.from_str(info["day_et"], timezone="ET")
if now.date() != event_dt.date():
continue
for event in info["items"]:
if (event_league := event["league"]) == "channel tv":
continue
sport = fix_league(event_league)
event_name = event["title"]
if f"[{sport}] {event_name} ({TAG})" in cached_keys:
continue
event_streams: list[dict[str, str]] = event["streams"]
if not (event_link := event_streams[0].get("link")):
continue
events.append(
{
"sport": sport,
"event": event_name,
"link": event_link,
"timestamp": now.timestamp(),
}
)
return events
async def scrape() -> None:
cached_urls = CACHE_FILE.load()
cached_count = len(cached_urls)
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
log.info(f'Scraping from "{BASE_URL}"')
events = await get_events(cached_urls.keys())
log.info(f"Processing {len(events)} new URL(s)")
if events:
async with async_playwright() as p:
browser, context = await network.browser(p)
for i, ev in enumerate(events, start=1):
handler = partial(
network.process_event,
url=ev["link"],
url_num=i,
context=context,
log=log,
)
url = await network.safe_process(
handler,
url_num=i,
log=log,
)
if url:
sport, event, link, ts = (
ev["sport"],
ev["event"],
ev["link"],
ev["timestamp"],
)
tvg_id, logo = leagues.get_tvg_info(sport, event)
key = f"[{sport}] {event} ({TAG})"
entry = {
"url": url,
"logo": logo,
"base": "https://vividmosaica.com/",
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = cached_urls[key] = entry
await browser.close()
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:
log.info("No new events found")
CACHE_FILE.write(cached_urls)

View file

@ -1,138 +0,0 @@
import re
from functools import partial
from urllib.parse import urljoin
from playwright.async_api import async_playwright
from selectolax.parser import HTMLParser
from .utils import Cache, Time, get_logger, leagues, network
log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
TAG = "TIME4TV"
CACHE_FILE = Cache(f"{TAG.lower()}.json", exp=5_400)
BASE_URL = "https://time4tv.icu/"
def fix_league(s: str) -> str:
return " ".join(x.capitalize() for x in s.split()) if len(s) > 5 else s.upper()
async def get_events(cached_keys: list[str], now: Time) -> dict[dict[str, str]]:
events = []
if not (html_data := await network.request(BASE_URL, log=log)):
return events
pattern = re.compile(r"openPlayerPopup\(\s*(\d+)\s*\)", re.IGNORECASE)
soup = HTMLParser(html_data.content)
for row in soup.css(".wrap .row"):
if not (date := row.css_first(".date")):
continue
event_date = date.text(strip=True).replace("\t", " ")
try:
event_dt = Time.from_str(event_date, fmt="%m/%d/%Y %I:%M %p")
except ValueError:
continue
if event_dt.date() != now.date():
continue
league = row.css_first(".league")
title = row.css_first(".title")
hds_a = row.css_first(".hds a")
if not (league and title and hds_a):
continue
sport, event = fix_league(league.text(strip=True)), title.text(strip=True)
if f"[{sport}] {event} ({TAG})" in cached_keys:
continue
onclick = hds_a.attributes.get("onclick", "")
if not (match := pattern.search(onclick)):
continue
events.append(
{
"sport": sport,
"event": event,
"link": urljoin(BASE_URL, f"player1.php?{match[1]}"),
}
)
return events
async def scrape() -> None:
cached_urls = CACHE_FILE.load()
cached_count = len(cached_urls)
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
log.info(f'Scraping from "{BASE_URL}"')
now = Time.clean(Time.now())
events = await get_events(cached_urls.keys(), now)
log.info(f"Processing {len(events)} new URL(s)")
async with async_playwright() as p:
browser, context = await network.browser(p)
for i, ev in enumerate(events, start=1):
handler = partial(
network.process_event,
url=ev["link"],
url_num=i,
context=context,
log=log,
)
url = await network.safe_process(
handler,
url_num=i,
log=log,
)
if url:
sport, event = ev["sport"], ev["event"]
tvg_id, logo = leagues.info(sport)
key = f"[{sport}] {event} ({TAG})"
entry = {
"url": url,
"logo": logo,
"base": "https://vividmosaica.com/",
"timestamp": now.timestamp(),
"id": tvg_id or "Live.Event.us",
}
urls[key] = cached_urls[key] = entry
await browser.close()
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:
log.info("No new events found")
CACHE_FILE.write(cached_urls)

View file

@ -5,13 +5,13 @@ from .config import Time
class Cache:
now_ts: float = Time.now().timestamp()
def __init__(self, file: str, exp: int | float) -> None:
self.file = Path(__file__).parent.parent / "caches" / file
self.exp = exp
self.now_ts = Time.now().timestamp()
def is_fresh(self, entry: dict) -> bool:
ts: float | int = entry.get("timestamp", Time.default_8())

View file

@ -130,7 +130,7 @@ class Time(datetime):
class Leagues:
live_img = "https://i.gyazo.com/978f2eb4a199ca5b56b447aded0cb9e3.png"
live_img = "https://i.gyazo.com/4a5e9fa2525808ee4b65002b56d3450e.png"
def __init__(self) -> None:
self.data = json.loads(
@ -165,7 +165,7 @@ class Leagues:
league: str,
) -> bool:
pattern = re.compile(r"\s+(?:-|vs\.?|at|@)\s+", flags=re.IGNORECASE)
pattern = re.compile(r"\s+(?:-|vs\.?|at|@)\s+", re.IGNORECASE)
if pattern.search(event):
t1, t2 = re.split(pattern, event)