Compare commits

...

35 commits

Author SHA1 Message Date
doms9
00000d9812 e
edit tvapp.py scraping
2026-02-13 00:42:13 -05:00
GitHub Actions Bot
166fb66aa1 update EPG 2026-02-13 04:34:31 +00:00
GitHub Actions Bot
8f475ea3d1 health log 2026-02-13 04:32:34 +00:00
GitHub Actions Bot
b869464885 update M3U8 2026-02-12 23:31:15 -05:00
GitHub Actions Bot
e0a67e8c6c update M3U8 2026-02-12 23:01:22 -05:00
GitHub Actions Bot
fb72f7802f update M3U8 2026-02-12 22:30:38 -05:00
GitHub Actions Bot
cd9523627b update M3U8 2026-02-12 22:01:08 -05:00
GitHub Actions Bot
1c51134793 update M3U8 2026-02-12 21:31:17 -05:00
GitHub Actions Bot
8e888b133b update M3U8 2026-02-12 21:01:35 -05:00
GitHub Actions Bot
6cc060444a update M3U8 2026-02-12 20:31:25 -05:00
GitHub Actions Bot
eb160f8206 update M3U8 2026-02-12 20:01:19 -05:00
GitHub Actions Bot
a8e6e4aea7 update M3U8 2026-02-12 19:30:59 -05:00
GitHub Actions Bot
e27968a42e update M3U8 2026-02-12 19:02:53 -05:00
GitHub Actions Bot
48fe14b5de update M3U8 2026-02-12 18:32:25 -05:00
doms9
00000d964b e
misc edits
2026-02-12 18:15:01 -05:00
GitHub Actions Bot
fdcd1d7070 update M3U8 2026-02-12 18:02:47 -05:00
GitHub Actions Bot
ba840b3112 update M3U8 2026-02-12 17:31:16 -05:00
doms9
00000d92ff e 2026-02-12 17:29:49 -05:00
GitHub Actions Bot
5b7a84a759 update M3U8 2026-02-12 17:01:08 -05:00
GitHub Actions Bot
14c9f32b28 update M3U8 2026-02-12 16:31:00 -05:00
GitHub Actions Bot
ddd4421fda update M3U8 2026-02-12 16:02:19 -05:00
GitHub Actions Bot
52487cf79a health log 2026-02-12 20:58:25 +00:00
GitHub Actions Bot
684ab89306 update M3U8 2026-02-12 15:31:20 -05:00
GitHub Actions Bot
e0a2f72f19 update M3U8 2026-02-12 15:02:30 -05:00
GitHub Actions Bot
65bccab97b update EPG 2026-02-12 19:38:44 +00:00
GitHub Actions Bot
bcfbd9a48f update M3U8 2026-02-12 14:32:04 -05:00
GitHub Actions Bot
bcc9122949 update M3U8 2026-02-12 14:02:08 -05:00
GitHub Actions Bot
0223b61fd3 update M3U8 2026-02-12 13:33:37 -05:00
GitHub Actions Bot
1f59663c95 update M3U8 2026-02-12 12:05:27 -05:00
GitHub Actions Bot
3ce37b4e30 update M3U8 2026-02-12 11:05:29 -05:00
GitHub Actions Bot
3225e8f37e health log 2026-02-12 15:13:10 +00:00
GitHub Actions Bot
ca035cf7d4 update M3U8 2026-02-12 10:02:38 -05:00
GitHub Actions Bot
2d0b3c5698 update M3U8 2026-02-12 09:04:17 -05:00
GitHub Actions Bot
c362b3a3ca update EPG 2026-02-12 11:27:06 +00:00
GitHub Actions Bot
a430bb6d82 health log 2026-02-12 09:11:01 +00:00
18 changed files with 119065 additions and 119503 deletions

File diff suppressed because it is too large Load diff

236361
M3U8/TV.xml

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -67,11 +67,11 @@ async def main() -> None:
asyncio.create_task(ppv.scrape(xtrnl_brwsr)),
asyncio.create_task(sport9.scrape(xtrnl_brwsr)),
asyncio.create_task(streamcenter.scrape(xtrnl_brwsr)),
# asyncio.create_task(streamhub.scrape(xtrnl_brwsr)),
asyncio.create_task(streamhub.scrape(xtrnl_brwsr)),
asyncio.create_task(streamsgate.scrape(xtrnl_brwsr)),
asyncio.create_task(totalsportek.scrape(hdl_brwsr)),
asyncio.create_task(tvapp.scrape(hdl_brwsr)),
# asyncio.create_task(webcast.scrape(hdl_brwsr)),
asyncio.create_task(webcast.scrape(hdl_brwsr)),
]
httpx_tasks = [

View file

@ -25,7 +25,7 @@ async def process_event(url: str, url_num: int) -> str | None:
valid_m3u8 = re.compile(
r'var\s+(\w+)\s*=\s*\[["\']?(https?:\/\/[^"\'\s>]+\.m3u8(?:\?[^"\'\s>]*)?)["\']\]?',
re.IGNORECASE,
re.I,
)
if not (match := valid_m3u8.search(html_data.text)):
@ -89,9 +89,11 @@ async def scrape() -> None:
cached_hrefs = {entry["href"] for entry in cached_urls.values()}
cached_count = len(cached_urls)
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
urls.update(cached_urls)
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
@ -118,30 +120,34 @@ async def scrape() -> None:
log=log,
)
sport, event, link = (
ev["sport"],
ev["event"],
ev["link"],
)
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": BASE_URL,
"timestamp": now.timestamp(),
"id": tvg_id or "Live.Event.us",
"href": ev["href"],
"link": link,
}
cached_urls[key] = entry
if url:
sport, event, link = (
ev["sport"],
ev["event"],
ev["link"],
)
valid_count += 1
key = f"[{sport}] {event} ({TAG})"
urls[key] = entry
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": BASE_URL,
"timestamp": now.timestamp(),
"id": tvg_id or "Live.Event.us",
"href": ev["href"],
"link": link,
}
urls[key] = cached_urls[key] = entry
if new_count := len(cached_urls) - cached_count:
if new_count := valid_count - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:

View file

@ -40,7 +40,7 @@ async def process_event(url: str, url_num: int) -> str | None:
return
pattern = re.compile(r"source:\s*window\.atob\(\s*'([^']+)'\s*\)", re.IGNORECASE)
pattern = re.compile(r"source:\s*window\.atob\(\s*'([^']+)'\s*\)", re.I)
if not (match := pattern.search(iframe_src_data.text)):
log.warning(f"URL {url_num}) No Clappr source found.")
@ -58,7 +58,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not (html_data := await network.request(BASE_URL, log=log)):
return events
pattern = re.compile(r"^(?:LIVE|(?:[1-9]|[12]\d|30)\s+minutes?\b)", re.IGNORECASE)
pattern = re.compile(r"^(?:LIVE|(?:[1-9]|[12]\d|30)\s+minutes?\b)", re.I)
soup = HTMLParser(html_data.content)
@ -106,9 +106,11 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
async def scrape() -> None:
cached_urls = CACHE_FILE.load()
cached_count = len(cached_urls)
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
urls.update(cached_urls)
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
@ -135,29 +137,33 @@ async def scrape() -> None:
log=log,
)
sport, event, link = (
ev["sport"],
ev["event"],
ev["link"],
)
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": "https://gooz.aapmains.net",
"timestamp": now.timestamp(),
"id": tvg_id or "Live.Event.us",
"link": link,
}
cached_urls[key] = entry
if url:
sport, event, link = (
ev["sport"],
ev["event"],
ev["link"],
)
valid_count += 1
key = f"[{sport}] {event} ({TAG})"
urls[key] = entry
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": "https://gooz.aapmains.net",
"timestamp": now.timestamp(),
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = cached_urls[key] = entry
if new_count := len(cached_urls) - cached_count:
if new_count := valid_count - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:

View file

@ -184,7 +184,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append({**v})
live.append(v)
return live

View file

@ -41,7 +41,7 @@ async def process_event(url: str, url_num: int) -> str | None:
return
pattern = re.compile(r"source:\s*window\.atob\(\s*'([^']+)'\s*\)", re.IGNORECASE)
pattern = re.compile(r"source:\s*window\.atob\(\s*'([^']+)'\s*\)", re.I)
if not (match := pattern.search(iframe_src_data.text)):
log.warning(f"URL {url_num}) No Clappr source found.")
@ -89,9 +89,11 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
async def scrape() -> None:
cached_urls = CACHE_FILE.load()
cached_count = len(cached_urls)
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
urls.update(cached_urls)
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
@ -118,29 +120,33 @@ async def scrape() -> None:
log=log,
)
sport, event, link = (
ev["sport"],
ev["event"],
ev["link"],
)
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": link,
"timestamp": now.timestamp(),
"id": tvg_id or "Live.Event.us",
"link": link,
}
cached_urls[key] = entry
if url:
sport, event, link = (
ev["sport"],
ev["event"],
ev["link"],
)
valid_count += 1
key = f"[{sport}] {event} ({TAG})"
urls[key] = entry
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": link,
"timestamp": now.timestamp(),
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = cached_urls[key] = entry
if new_count := len(cached_urls) - cached_count:
if new_count := valid_count - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:

View file

@ -34,7 +34,7 @@ async def process_event(url: str, url_num: int) -> str | None:
if not (html_data := await network.request(url, log=log)):
return
valid_m3u8 = re.compile(r"'clappr',\s+'([^\"]*)'", re.IGNORECASE)
valid_m3u8 = re.compile(r"'clappr',\s+'([^\"]*)'", re.I)
if not (match := valid_m3u8.search(html_data.text)):
log.info(f"URL {url_num}) No M3U8 found")
@ -125,7 +125,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append({**v})
live.append(v)
return live
@ -133,9 +133,11 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
async def scrape() -> None:
cached_urls = CACHE_FILE.load()
cached_count = len(cached_urls)
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
urls.update(cached_urls)
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
@ -160,30 +162,34 @@ async def scrape() -> None:
log=log,
)
sport, event, ts, link = (
ev["sport"],
ev["event"],
ev["event_ts"],
ev["link"],
)
tvg_id, logo = leagues.get_tvg_info(sport, event)
key = f"[{sport}] {event} ({TAG})"
entry = {
"url": url,
"logo": logo,
"base": BASE_URL,
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
cached_urls[key] = entry
if url:
sport, event, ts, link = (
ev["sport"],
ev["event"],
ev["event_ts"],
ev["link"],
)
valid_count += 1
tvg_id, logo = leagues.get_tvg_info(sport, event)
urls[key] = entry
key = f"[{sport}] {event} ({TAG})"
entry = {
"url": url,
"logo": logo,
"base": BASE_URL,
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = cached_urls[key] = entry
if new_count := len(cached_urls) - cached_count:
if new_count := valid_count - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:

View file

@ -44,7 +44,7 @@ async def refresh_html_cache(now_ts: float) -> dict[str, dict[str, str | float]]
if not (html_data := await network.request(BASE_URL, log=log)):
return events
pattern = re.compile(r"openEmbed\('([^']+)'\)", re.IGNORECASE)
pattern = re.compile(r"openEmbed\('([^']+)'\)", re.I)
soup = HTMLParser(html_data.content)
@ -106,7 +106,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append({**v})
live.append(v)
return live

View file

@ -45,7 +45,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
for card in soup.css("a.match-card"):
live_badge = card.css_first(".live-badge")
if not live_badge or live_badge.text(strip=True) != "Live":
if not live_badge or live_badge.text(strip=True).lower() != "live":
continue
if not (sport_node := card.css_first(".tournament-name")):

View file

@ -19,7 +19,7 @@ BASE_URL = "https://hiteasport.info"
def fix_league(s: str) -> str:
pattern = re.compile(r"^\w*-\w*", re.IGNORECASE)
pattern = re.compile(r"^\w*-\w*", re.I)
return " ".join(s.split("-")) if pattern.search(s) else s
@ -28,7 +28,7 @@ async def process_event(url: str, url_num: int) -> str | None:
if not (html_data := await network.request(url, log=log)):
return
valid_m3u8 = re.compile(r'var\s+(\w+)\s*=\s*"([^"]*)"', re.IGNORECASE)
valid_m3u8 = re.compile(r'var\s+(\w+)\s*=\s*"([^"]*)"', re.I)
if not (match := valid_m3u8.search(html_data.text)):
log.info(f"URL {url_num}) No M3U8 found")

View file

@ -130,7 +130,7 @@ async def get_events(url: str, cached_keys: list[str]) -> list[dict[str, str]]:
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append({**v})
live.append(v)
return live

View file

@ -25,7 +25,7 @@ def fix_url(s: str) -> str:
return urljoin(f"http://{base}", parsed.path.replace("tracks-v1a1/", ""))
async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
async def get_events() -> list[dict[str, str]]:
events = []
if not (html_data := await network.request(BASE_URL, log=log)):
@ -45,9 +45,6 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
for a in row.css("a.list-group-item[href]"):
event_name = a.text(strip=True).split(":", 1)[0]
if f"[{sport}] {event_name} ({TAG})" in cached_keys:
continue
if not (href := a.attributes.get("href")):
continue
@ -63,17 +60,16 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
async def scrape(browser: Browser) -> None:
cached_urls = CACHE_FILE.load()
if cached := CACHE_FILE.load():
urls.update(cached)
cached_count = len(cached_urls)
log.info(f"Loaded {len(urls)} event(s) from cache")
urls.update(cached_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
return
log.info(f'Scraping from "{BASE_URL}"')
events = await get_events(cached_urls.keys())
events = await get_events()
log.info(f"Processing {len(events)} new URL(s)")
@ -118,12 +114,8 @@ async def scrape(browser: Browser) -> None:
"link": link,
}
urls[key] = cached_urls[key] = entry
urls[key] = entry
if new_count := len(cached_urls) - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
log.info(f"Collected and cached {len(urls)} new event(s)")
else:
log.info("No new events found")
CACHE_FILE.write(cached_urls)
CACHE_FILE.write(urls)

View file

@ -28,8 +28,6 @@ class Network:
PW_S = asyncio.Semaphore(3)
proxy_base = "https://stream.nvrmind.xyz"
def __init__(self) -> None:
self.client = httpx.AsyncClient(
timeout=httpx.Timeout(5.0),

View file

@ -108,7 +108,7 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
if not start_ts <= v["event_ts"] <= end_ts:
continue
live.append({**v})
live.append(v)
return live

View file

@ -30,7 +30,7 @@ SPORT_ENDPOINTS = [
async def process_event(url: str, url_num: int) -> tuple[str | None, str | None]:
valid_m3u8 = re.compile(r'(var|const)\s+(\w+)\s*=\s*"([^"]*)"', re.IGNORECASE)
valid_m3u8 = re.compile(r'(var|const)\s+(\w+)\s*=\s*"([^"]*)"', re.I)
nones = None, None

View file

@ -1,11 +1,10 @@
## Base Log @ 2026-02-12 04:35 UTC
## Base Log @ 2026-02-13 04:32 UTC
### ✅ Working Streams: 144<br>❌ Dead Streams: 2
### ✅ Working Streams: 145<br>❌ Dead Streams: 1
| Channel | Error (Code) | Link |
| ------- | ------------ | ---- |
| Hallmark Mystery | HTTP Error (403) | `http://mytvstream.net:8080/live/30550113/30550113/10289.m3u8` |
| NFL RedZone | HTTP Error (000) | `http://mytvstream.net:8080/live/30550113/30550113/159118.m3u8` |
---
#### Base Channels URL
```