From 00000d94a5914a85abd6d896dac964b8dfe10d98 Mon Sep 17 00:00:00 2001
From: doms9 <96013514+doms9@users.noreply.github.com>
Date: Tue, 19 Aug 2025 10:54:50 -0400
Subject: [PATCH] e
---
M3U8/fetch.py | 18 ++---
M3U8/scrape/fstv.py | 5 --
M3U8/scrape/tvpass.py | 155 +++++++++++++++++++++++++-----------------
health.sh | 31 ++++-----
4 files changed, 109 insertions(+), 100 deletions(-)
diff --git a/M3U8/fetch.py b/M3U8/fetch.py
index 3271c00..dadc48c 100644
--- a/M3U8/fetch.py
+++ b/M3U8/fetch.py
@@ -1,17 +1,13 @@
#!/usr/bin/env python3
import json
-from datetime import datetime
from pathlib import Path
import httpx
-import pytz
from scrape import fstv, tvpass
m3u8_file = Path(__file__).parent / "TV.m3u8"
-base = "http://m3u4u.com/m3u/d5k2nvp8w2t3w2k1n984"
-
-current_hour = datetime.now(pytz.timezone("America/New_York")).hour
+base_url = "https://spoo.me/yBR2jV"
client = httpx.Client(
timeout=5,
@@ -26,10 +22,10 @@ def vanilla_fetch() -> tuple[list[str], int]:
print("Fetching base M3U8")
try:
- r = client.get(base)
+ r = client.get(base_url)
r.raise_for_status()
except Exception as e:
- raise SystemExit(f'Failed to fetch "{base}"\n{e}') from e
+ raise SystemExit(f'Failed to fetch "{base_url}"\n{e}') from e
d = r.text.splitlines()
@@ -41,13 +37,7 @@ def vanilla_fetch() -> tuple[list[str], int]:
def main() -> None:
- if current_hour <= 11:
- tvpass.main(client)
- else:
- try:
- tvpass.urls = json.loads(tvpass.base_file.read_text(encoding="utf-8"))
- except (FileNotFoundError, json.JSONDecodeError):
- pass
+ tvpass.main(client)
fstv.main(client)
diff --git a/M3U8/scrape/fstv.py b/M3U8/scrape/fstv.py
index 11ba9a4..3406f8d 100644
--- a/M3U8/scrape/fstv.py
+++ b/M3U8/scrape/fstv.py
@@ -104,8 +104,3 @@ def main(client: httpx.Client) -> None:
urls[key] = link
print(f"Collected {len(urls)} live events")
-
-
-# if __name__ == "__main__":
-# # create client beforehand
-# main()
diff --git a/M3U8/scrape/tvpass.py b/M3U8/scrape/tvpass.py
index 54b5dfc..fe40a2e 100644
--- a/M3U8/scrape/tvpass.py
+++ b/M3U8/scrape/tvpass.py
@@ -1,62 +1,93 @@
-import json
-import re
-from pathlib import Path
-from urllib.parse import urlparse
-
-import httpx
-
-base_url = "https://tvpass.org/playlist/m3u"
-base_file = Path(__file__).parent / "tvpass.json"
-
-urls: dict[str, str] = {}
-
-
-def fetch_m3u8(client: httpx.Client) -> list[str] | None:
- try:
- r = client.get(base_url)
- r.raise_for_status()
- except Exception as e:
- print(f'Failed to fetch "{base_url}"\n{e}')
-
- return r.text.splitlines()
-
-
-def main(client: httpx.Client) -> None:
- print(f'Scraping from "{base_url}"')
-
- if not (data := fetch_m3u8(client)):
- return
-
- for i in range(len(data) - 1):
- if data[i].startswith("#EXTINF"):
- tvg_id_match = re.search(r'tvg-id="([^"]*)"', data[i])
- tvg_name_match = re.search(r'tvg-name="([^"]*)"', data[i])
-
- tvg_id = tvg_id_match[1] if tvg_id_match else None
- tvg_name = tvg_name_match[1]
-
- if tvg_id == "":
- url = data[i + 1]
-
- tvg_name = tvg_name.split("(")[0].strip()
-
- if url.endswith("/sd"):
-
- path_parts = urlparse(url).path.strip("/").split("/")
-
- if len(path_parts) >= 2 and path_parts[-1] == "sd":
- sport = "".join(x for x in path_parts[1] if x.isalpha()).upper()
- else:
- sport = "UNKNWN"
-
- urls[f"[{sport}] {tvg_name}"] = url
-
- print(f"Collected {len(urls)} live events")
-
- if urls:
- base_file.write_text(json.dumps(urls, indent=2), encoding="utf-8")
-
-
-# if __name__ == "__main__":
-# # create client beforehand
-# main()
+import json
+import re
+from datetime import datetime, timedelta
+from pathlib import Path
+from urllib.parse import urlparse
+
+import httpx
+import pytz
+
+base_url = "https://tvpass.org/playlist/m3u"
+base_file = Path(__file__).parent / "tvpass.json"
+
+urls: dict[str, str] = {}
+
+TZ = pytz.timezone("America/New_York")
+
+
+def cache_expired(t: float) -> bool:
+ now = datetime.now(TZ)
+
+ r = now.replace(hour=11, minute=0, second=0, microsecond=0)
+
+ if now < r:
+ r -= timedelta(days=1)
+
+ return t < r.timestamp()
+
+
+def load_cache() -> dict[str, str]:
+ try:
+ data = json.loads(base_file.read_text(encoding="utf-8"))
+
+ ts = data.get("_timestamp", 0)
+
+ return {} if cache_expired(ts) else data.get("urls", {})
+ except (FileNotFoundError, json.JSONDecodeError):
+ return {}
+
+
+def save_cache(urls: dict[str, str]) -> None:
+ payload = {"_timestamp": datetime.now(TZ).timestamp(), "urls": urls}
+
+ base_file.write_text(json.dumps(payload, indent=2), encoding="utf-8")
+
+
+def fetch_m3u8(client: httpx.Client) -> list[str] | None:
+ try:
+ r = client.get(base_url)
+ r.raise_for_status()
+ except Exception as e:
+ print(f'Failed to fetch "{base_url}"\n{e}')
+
+ return r.text.splitlines()
+
+
+def main(client: httpx.Client) -> None:
+ if cached := load_cache():
+ urls.update(cached)
+ print(f"TVPass: Collected {len(urls)} live events from cache")
+ return
+
+ print(f'Scraping from "{base_url}"')
+
+ if not (data := fetch_m3u8(client)):
+ return
+
+ for i in range(len(data) - 1):
+ if data[i].startswith("#EXTINF"):
+ tvg_id_match = re.search(r'tvg-id="([^"]*)"', data[i])
+ tvg_name_match = re.search(r'tvg-name="([^"]*)"', data[i])
+
+ tvg_id = tvg_id_match[1] if tvg_id_match else None
+ tvg_name = tvg_name_match[1]
+
+ if tvg_id == "":
+ url = data[i + 1]
+
+ tvg_name = tvg_name.split("(")[0].strip()
+
+ if url.endswith("/sd"):
+
+ path_parts = urlparse(url).path.strip("/").split("/")
+
+ if len(path_parts) >= 2 and path_parts[-1] == "sd":
+ sport = "".join(x for x in path_parts[1] if x.isalpha()).upper()
+ else:
+ sport = "UNKNWN"
+
+ urls[f"[{sport}] {tvg_name}"] = url
+
+ if urls:
+ save_cache(urls)
+ print(f"Cached {len(urls)} live events")
diff --git a/health.sh b/health.sh
index e3faeff..d6a5334 100644
--- a/health.sh
+++ b/health.sh
@@ -1,12 +1,12 @@
#!/bin/bash
-main="http://m3u4u.com/m3u/d5k2nvp8w2t3w2k1n984"
+base_url="https://spoo.me/yBR2jV"
+UA="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36 Edg/134.0.0.0"
MAX_JOBS=10
RETRY_COUNT=3
README="./readme.md"
STATUSLOG=$(mktemp)
PASSED=0
FAILED=0
-REDIRECTED=0
EMPTY=0
get_status() {
@@ -17,7 +17,7 @@ get_status() {
[[ "$url" != http* ]] && return
for attempt in $(seq 1 "$RETRY_COUNT"); do
- response=$(curl -sL -o /dev/null --max-time 10 -w "%{http_code}" "$url" 2>&1)
+ response=$(curl -sL -A "$UA" -o /dev/null --max-time 10 -w "%{http_code}" "$url" 2>&1)
[[ "$response" =~ ^[0-9]+$ ]] && break
sleep 1
done
@@ -36,18 +36,13 @@ get_status() {
case "$status_code" in
200)
- if ! curl -sL --max-time 5 "$url" | head -c 1 | grep -q '.'; then
+ if ! curl -sL -A "$UA" --max-time 5 "$url" | head -c 1 | grep -q '.'; then
echo "| $channel | Empty body (404) | \`$url\` |" >>"$STATUSLOG"
echo "EMPTY" >>"$STATUSLOG"
else
echo "PASS" >>"$STATUSLOG"
fi
;;
- 301 | 302 | 307 | 308)
- redirect_url=$(curl -sI --max-time 5 "$url" | grep -i '^Location:' | sed 's/Location: //I' | tr -d '\r\n')
- echo "| $channel | Redirect ($status_code) | \`$url → $redirect_url\` |" >>"$STATUSLOG"
- echo "REDIRECT" >>"$STATUSLOG"
- ;;
4* | 5*)
echo "| $channel | HTTP Error ($status_code) | \`$url\` |" >>"$STATUSLOG"
echo "FAIL" >>"$STATUSLOG"
@@ -64,7 +59,7 @@ get_status() {
}
check_links() {
- echo "Checking links from: $main"
+ echo "Checking links from: $base_url"
channel_num=0
name=""
jobs_running=0
@@ -83,7 +78,7 @@ check_links() {
get_status "$line" "$name" &
((channel_num++))
fi
- done < <(curl -sL "$main")
+ done < <(curl -sL -A "$UA" "$base_url")
wait
echo "Done."
@@ -92,27 +87,25 @@ check_links() {
write_readme() {
local passed redirected empty failed
passed=$(grep -c '^PASS$' "$STATUSLOG")
- redirected=$(grep -c '^REDIRECT$' "$STATUSLOG")
empty=$(grep -c '^EMPTY$' "$STATUSLOG")
failed=$(grep -c '^FAIL$' "$STATUSLOG")
{
- echo "## Log @ $(date '+%Y-%m-%d %H:%M:%S UTC')"
+ echo "## Log @ $(date '+%Y-%m-%d %H:%M UTC')"
echo
- echo "### ✅ Working Streams: $passed
🔁 Redirected Links: $redirected
➖ Empty Streams: $empty
❌ Dead Streams: $failed"
+ echo "### ✅ Working Streams: $passed
➖ Empty Streams: $empty
❌ Dead Streams: $failed"
echo
- if [ $failed -gt 0 ] || [ $empty -gt 0 ] || [ $redirected -gt 0 ]; then
+ if (($failed > 0 || $empty > 0)); then
head -n 1 "$STATUSLOG"
- grep -v -e '^PASS$' -e '^FAIL$' -e '^EMPTY$' -e '^REDIRECT$' -e '^---' "$STATUSLOG" |
- grep -v '^| Channel' | sort -u
+ grep -v -e '^PASS$' -e '^FAIL$' -e '^EMPTY$' -e '^---' "$STATUSLOG" | grep -v '^| Channel' | sort -u
fi
echo "---"
echo "#### M3U8 URL"
- printf "\`\`\`\nhttps://raw.githubusercontent.com/doms9/iptv/refs/heads/default/M3U8/TV.m3u8\n\`\`\`\n"
+ printf "\`\`\`\nhttps://spoo.me/d9M3U8\n\`\`\`\n"
echo "#### EPG URL"
- printf "\`\`\`\nhttps://raw.githubusercontent.com/doms9/iptv/refs/heads/default/EPG/TV.xml\n\`\`\`\n"
+ printf "\`\`\`\nhttps://spoo.me/d9EPG\n\`\`\`\n"
echo "---"
echo "#### Legal Disclaimer"
echo "This repository lists publicly accessible IPTV streams as found on the internet at the time of checking."