Compare commits

...

36 commits

Author SHA1 Message Date
GitHub Actions Bot
cdb7d9f94d health log 2026-03-01 08:53:25 +00:00
GitHub Actions Bot
bbc1492601 update EPG 2026-03-01 04:35:28 +00:00
GitHub Actions Bot
dbc374046e health log 2026-03-01 04:33:28 +00:00
GitHub Actions Bot
3ef6c55e3c update M3U8 2026-02-28 23:31:09 -05:00
GitHub Actions Bot
f3ecdd79ce update M3U8 2026-02-28 23:02:38 -05:00
GitHub Actions Bot
949105eb34 update M3U8 2026-02-28 22:32:21 -05:00
GitHub Actions Bot
5144333759 update M3U8 2026-02-28 22:02:25 -05:00
GitHub Actions Bot
1c7b8401ff update M3U8 2026-02-28 21:31:26 -05:00
GitHub Actions Bot
b5901f780b update M3U8 2026-02-28 21:03:00 -05:00
GitHub Actions Bot
eb7c26218a update M3U8 2026-02-28 20:35:20 -05:00
GitHub Actions Bot
72a3caa3aa update M3U8 2026-02-28 20:03:39 -05:00
GitHub Actions Bot
4fd81c968b update M3U8 2026-02-28 19:32:49 -05:00
GitHub Actions Bot
ed6ffa3950 update M3U8 2026-02-28 19:08:09 -05:00
GitHub Actions Bot
4cf2c8ea82 update M3U8 2026-02-28 18:33:07 -05:00
GitHub Actions Bot
2f797899a6 update M3U8 2026-02-28 18:04:39 -05:00
GitHub Actions Bot
fc3ce03f09 update M3U8 2026-02-28 17:31:59 -05:00
GitHub Actions Bot
2b15c91dcf update M3U8 2026-02-28 17:05:00 -05:00
GitHub Actions Bot
b91dd3ab8f update M3U8 2026-02-28 16:33:40 -05:00
GitHub Actions Bot
344606bb4c update M3U8 2026-02-28 16:17:07 -05:00
GitHub Actions Bot
31ffaf38e3 health log 2026-02-28 20:46:20 +00:00
doms9
00000d9595 e
edit urls for websites
2026-02-28 15:42:50 -05:00
GitHub Actions Bot
b6f835b575 update M3U8 2026-02-28 15:33:23 -05:00
GitHub Actions Bot
5e06054ae9 update M3U8 2026-02-28 15:07:34 -05:00
GitHub Actions Bot
8287acb1fd update M3U8 2026-02-28 14:33:28 -05:00
GitHub Actions Bot
b234830e1e update M3U8 2026-02-28 14:12:13 -05:00
GitHub Actions Bot
f957585c4f update EPG 2026-02-28 19:00:25 +00:00
GitHub Actions Bot
c59870e8ba update M3U8 2026-02-28 13:35:43 -05:00
GitHub Actions Bot
43e3257166 update M3U8 2026-02-28 13:11:22 -05:00
GitHub Actions Bot
ef41e6aac7 update M3U8 2026-02-28 12:12:44 -05:00
doms9
00000d922c e
add timstreams.py
2026-02-28 11:58:39 -05:00
GitHub Actions Bot
045e172f94 update M3U8 2026-02-28 11:08:39 -05:00
GitHub Actions Bot
9dcfa16dfe update M3U8 2026-02-28 10:27:10 -05:00
GitHub Actions Bot
e4668f875a health log 2026-02-28 14:45:42 +00:00
GitHub Actions Bot
8235812864 update M3U8 2026-02-28 09:13:57 -05:00
GitHub Actions Bot
5f43936dae update M3U8 2026-02-28 08:05:52 -05:00
GitHub Actions Bot
540ef55eb4 update EPG 2026-02-28 10:55:28 +00:00
12 changed files with 116541 additions and 114874 deletions

File diff suppressed because it is too large Load diff

225567
M3U8/TV.xml

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load diff

View file

@ -17,9 +17,11 @@ from scrapers import (
roxie,
shark,
sport9,
streambtw,
streamcenter,
streamhub,
streamsgate,
timstreams,
totalsportek,
tvapp,
volokit,
@ -70,7 +72,8 @@ async def main() -> None:
asyncio.create_task(sport9.scrape(xtrnl_brwsr)),
asyncio.create_task(streamcenter.scrape(hdl_brwsr)),
# asyncio.create_task(streamhub.scrape(xtrnl_brwsr)),
asyncio.create_task(streamsgate.scrape(hdl_brwsr)),
asyncio.create_task(streamsgate.scrape(xtrnl_brwsr)),
asyncio.create_task(timstreams.scrape(xtrnl_brwsr)),
]
httpx_tasks = [
@ -79,6 +82,7 @@ async def main() -> None:
asyncio.create_task(ovogoal.scrape()),
asyncio.create_task(pawa.scrape()),
asyncio.create_task(shark.scrape()),
asyncio.create_task(streambtw.scrape()),
asyncio.create_task(totalsportek.scrape()),
asyncio.create_task(tvapp.scrape()),
asyncio.create_task(volokit.scrape()),
@ -112,9 +116,11 @@ async def main() -> None:
| roxie.urls
| shark.urls
| sport9.urls
| streambtw.urls
| streamcenter.urls
| streamhub.urls
| streamsgate.urls
| timstreams.urls
| totalsportek.urls
| tvapp.urls
| volokit.urls

View file

@ -1,3 +1,4 @@
import re
from functools import partial
from playwright.async_api import Browser
@ -14,7 +15,7 @@ CACHE_FILE = Cache(TAG, exp=10_800)
API_FILE = Cache(f"{TAG}-api", exp=19_800)
MIRRORS = [
API_MIRRORS = [
"https://api.ppv.to/api/streams",
"https://api.ppv.cx/api/streams",
"https://api.ppv.sh/api/streams",
@ -22,6 +23,12 @@ MIRRORS = [
]
def fix_url(s: str) -> str:
pattern = re.compile(r"index\.m3u8$", re.I)
return pattern.sub(r"tracks-v1a1/mono.ts.m3u8", s)
async def get_events(url: str, cached_keys: list[str]) -> list[dict[str, str]]:
now = Time.clean(Time.now())
@ -90,16 +97,16 @@ async def scrape(browser: Browser) -> None:
log.info(f"Loaded {cached_count} event(s) from cache")
if not (base_url := await network.get_base(MIRRORS)):
if not (api_url := await network.get_base(API_MIRRORS)):
log.warning("No working PPV mirrors")
CACHE_FILE.write(cached_urls)
return
log.info(f'Scraping from "{base_url}"')
log.info(f'Scraping from "{api_url}"')
events = await get_events(base_url, cached_urls.keys())
events = await get_events(api_url, cached_urls.keys())
if events:
log.info(f"Processing {len(events)} new URL(s)")
@ -148,6 +155,8 @@ async def scrape(browser: Browser) -> None:
if url:
valid_count += 1
entry["url"] = fix_url(url)
urls[key] = entry
if new_count := valid_count - cached_count:

View file

@ -31,9 +31,11 @@ async def process_event(url: str, url_num: int) -> str | None:
return
pattern = re.compile(r"playlist\.m3u8\?.*$", re.I)
log.info(f"URL {url_num}) Captured M3U8")
return urls[0]
return pattern.sub(r"chunks.m3u8", urls[0])
async def refresh_html_cache(now_ts: float) -> dict[str, dict[str, str | float]]:

147
M3U8/scrapers/streambtw.py Normal file
View file

@ -0,0 +1,147 @@
import base64
import re
from functools import partial
from urllib.parse import urljoin
from .utils import Cache, Time, get_logger, leagues, network
log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
TAG = "STRMBTW"
CACHE_FILE = Cache(TAG, exp=3_600)
API_FILE = Cache(f"{TAG}-api", exp=19_800)
BASE_URL = "https://streambtw.com"
def fix_league(s: str) -> str:
pattern = re.compile(r"^\w*-\w*", re.I)
return " ".join(s.split("-")) if pattern.search(s) else s
async def process_event(url: str, url_num: int) -> str | None:
if not (html_data := await network.request(url, log=log)):
return
valid_m3u8 = re.compile(r'var\s+(\w+)\s*=\s*"([^"]*)"', re.I)
if not (match := valid_m3u8.search(html_data.text)):
log.info(f"URL {url_num}) No M3U8 found")
return
stream_link: str = match[2]
if not stream_link.startswith("http"):
stream_link = base64.b64decode(stream_link).decode("utf-8")
log.info(f"URL {url_num}) Captured M3U8")
return stream_link
async def get_events() -> list[dict[str, str]]:
now = Time.clean(Time.now())
if not (api_data := API_FILE.load(per_entry=False)):
log.info("Refreshing API cache")
api_data = {"timestamp": now.timestamp()}
if r := await network.request(
urljoin(BASE_URL, "public/api.php"),
log=log,
params={"action": "get"},
):
api_data: dict = r.json()
api_data["timestamp"] = now.timestamp()
API_FILE.write(api_data)
events = []
if last_update := api_data.get("updated_at"):
last_update_dt = Time.from_str(last_update, timezone="UTC")
if last_update_dt.date() != now.date():
return events
for info in api_data.get("groups", []):
if not (sport := info["title"]):
sport = "Live Event"
if items := info.get("items"):
for event in items:
event_name: str = event["title"]
if not (link := event.get("url")):
continue
events.append(
{
"sport": fix_league(sport),
"event": event_name,
"link": link,
}
)
return events
async def scrape() -> None:
if cached := CACHE_FILE.load():
urls.update(cached)
log.info(f"Loaded {len(urls)} event(s) from cache")
return
log.info(f'Scraping from "{BASE_URL}"')
events = await get_events()
if events:
log.info(f"Processing {len(events)} new URL(s)")
now = Time.clean(Time.now())
for i, ev in enumerate(events, start=1):
handler = partial(
process_event,
url=(link := ev["link"]),
url_num=i,
)
url = await network.safe_process(
handler,
url_num=i,
semaphore=network.HTTP_S,
log=log,
)
if url:
sport, event = ev["sport"], ev["event"]
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": link,
"timestamp": now.timestamp(),
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = entry
log.info(f"Collected {len(urls)} event(s)")
CACHE_FILE.write(urls)

View file

@ -92,9 +92,11 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
async def scrape(browser: Browser) -> None:
cached_urls = CACHE_FILE.load()
cached_count = len(cached_urls)
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
urls.update(cached_urls)
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
@ -105,7 +107,7 @@ async def scrape(browser: Browser) -> None:
if events:
log.info(f"Processing {len(events)} new URL(s)")
async with network.event_context(browser, stealth=False) as context:
async with network.event_context(browser) as context:
for i, ev in enumerate(events, start=1):
async with network.event_page(context) as page:
handler = partial(
@ -123,29 +125,35 @@ async def scrape(browser: Browser) -> None:
log=log,
)
sport, event, ts = (
ev["sport"],
ev["event"],
ev["timestamp"],
)
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": "https://streamcenter.xyz",
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
cached_urls[key] = entry
if url:
sport, event, ts = (
ev["sport"],
ev["event"],
ev["timestamp"],
)
valid_count += 1
key = f"[{sport}] {event} ({TAG})"
entry["url"] = url.split("?")[0]
tvg_id, logo = leagues.get_tvg_info(sport, event)
urls[key] = entry
entry = {
"url": url,
"logo": logo,
"base": "https://streamcenter.xyz",
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = cached_urls[key] = entry
if new_count := len(cached_urls) - cached_count:
if new_count := valid_count - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:

View file

@ -123,9 +123,11 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
async def scrape(browser: Browser) -> None:
cached_urls = CACHE_FILE.load()
cached_count = len(cached_urls)
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
urls.update(cached_urls)
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
@ -154,29 +156,35 @@ async def scrape(browser: Browser) -> None:
log=log,
)
sport, event, ts = (
ev["sport"],
ev["event"],
ev["timestamp"],
)
key = f"[{sport}] {event} ({TAG})"
tvg_id, logo = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo,
"base": "https://instreams.click/",
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
cached_urls[key] = entry
if url:
sport, event, ts = (
ev["sport"],
ev["event"],
ev["timestamp"],
)
valid_count += 1
key = f"[{sport}] {event} ({TAG})"
entry["url"] = url.split("&e")[0]
tvg_id, logo = leagues.get_tvg_info(sport, event)
urls[key] = entry
entry = {
"url": url,
"logo": logo,
"base": "https://instreams.click/",
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
urls[key] = cached_urls[key] = entry
if new_count := len(cached_urls) - cached_count:
if new_count := valid_count - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:

177
M3U8/scrapers/timstreams.py Normal file
View file

@ -0,0 +1,177 @@
from functools import partial
from typing import Any
from urllib.parse import urljoin
from playwright.async_api import Browser
from .utils import Cache, Time, get_logger, leagues, network
log = get_logger(__name__)
urls: dict[str, dict[str, str | float]] = {}
TAG = "TIMSTRMS"
CACHE_FILE = Cache(TAG, exp=10_800)
API_FILE = Cache(f"{TAG}-api", exp=19_800)
API_URL = "https://stra.viaplus.site/main"
BASE_URL = "https://timstreams.fit"
SPORT_GENRES = {
1: "Soccer",
2: "Motorsport",
3: "MMA",
4: "Fight",
5: "Boxing",
6: "Wrestling",
7: "Basketball",
# 8: "American Football",
9: "Baseball",
10: "Tennis",
11: "Hockey",
# 12: "Darts",
# 13: "Cricket",
# 14: "Cycling",
# 15: "Rugby",
# 16: "Live Shows",
# 17: "Other",
}
async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
now = Time.clean(Time.now())
if not (api_data := API_FILE.load(per_entry=False, index=-1)):
log.info("Refreshing API cache")
api_data = [{"timestamp": now.timestamp()}]
if r := await network.request(API_URL, log=log):
api_data: list[dict] = r.json()
api_data[-1]["timestamp"] = now.timestamp()
API_FILE.write(api_data)
events = []
start_dt = now.delta(minutes=-30)
end_dt = now.delta(minutes=30)
for info in api_data:
if not (category := info.get("category")) or category != "Events":
continue
stream_events: list[dict[str, Any]] = info["events"]
for ev in stream_events:
if (genre := ev["genre"]) not in SPORT_GENRES:
continue
event_dt = Time.from_str(ev["time"], timezone="EST")
if not start_dt <= event_dt <= end_dt:
continue
name: str = ev["name"]
url_id: str = ev["URL"]
logo: str | None = ev.get("logo")
sport = SPORT_GENRES[genre]
if f"[{sport}] {name} ({TAG})" in cached_keys:
continue
if not (streams := ev["streams"]) or not (url := streams[0].get("url")):
continue
events.append(
{
"sport": sport,
"event": name,
"link": urljoin(BASE_URL, f"watch?id={url_id}"),
"ref": url,
"logo": logo,
"timestamp": event_dt.timestamp(),
}
)
return events
async def scrape(browser: Browser) -> None:
cached_urls = CACHE_FILE.load()
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
valid_count = cached_count = len(valid_urls)
urls.update(valid_urls)
log.info(f"Loaded {cached_count} event(s) from cache")
log.info(f'Scraping from "{BASE_URL}"')
events = await get_events(cached_urls.keys())
if events:
log.info(f"Processing {len(events)} new URL(s)")
async with network.event_context(browser, stealth=False) as context:
for i, ev in enumerate(events, start=1):
async with network.event_page(context) as page:
handler = partial(
network.process_event,
url=(link := ev["link"]),
url_num=i,
page=page,
log=log,
)
url = await network.safe_process(
handler,
url_num=i,
semaphore=network.PW_S,
log=log,
)
sport, event, logo, ref, ts = (
ev["sport"],
ev["event"],
ev["logo"],
ev["ref"],
ev["timestamp"],
)
key = f"[{sport}] {event} ({TAG})"
tvg_id, pic = leagues.get_tvg_info(sport, event)
entry = {
"url": url,
"logo": logo or pic,
"base": ref,
"timestamp": ts,
"id": tvg_id or "Live.Event.us",
"link": link,
}
cached_urls[key] = entry
if url:
valid_count += 1
urls[key] = entry
if new_count := valid_count - cached_count:
log.info(f"Collected and cached {new_count} new event(s)")
else:
log.info("No new events found")
CACHE_FILE.write(cached_urls)

View file

@ -15,7 +15,7 @@ TAG = "TOTALSPRTK"
CACHE_FILE = Cache(TAG, exp=28_800)
BASE_URL = "https://live3.totalsportek777.com"
BASE_URL = "https://live3.totalsportekarmy.com"
def fix_txt(s: str) -> str:

View file

@ -1,13 +1,14 @@
## Base Log @ 2026-02-28 08:50 UTC
## Base Log @ 2026-03-01 08:53 UTC
### ✅ Working Streams: 157<br>❌ Dead Streams: 4
### ✅ Working Streams: 156<br>❌ Dead Streams: 5
| Channel | Error (Code) | Link |
| ------- | ------------ | ---- |
| BET | HTTP Error (404) | `http://212.102.60.231/BET/index.m3u8` |
| Disney XD | HTTP Error (000) | `http://hardcoremedia.xyz/live/rabdsbmz/3731346838/130092.ts` |
| Disney | HTTP Error (000) | `http://hardcoremedia.xyz/live/rabdsbmz/3731346838/257087.ts` |
| FYI TV | HTTP Error (000) | `http://hardcoremedia.xyz/live/rabdsbmz/3731346838/130105.ts` |
| Golf Channel | HTTP Error (000) | `http://hardcoremedia.xyz/live/rabdsbmz/3731346838/258721.ts` |
| NBC Sports NOW | HTTP Error (403) | `https://starshare.st/live/P4B9TB9xR8/humongous2tonight/1001.ts` |
---
#### Base Channels URL
```