mirror of
https://github.com/doms9/iptv.git
synced 2026-04-22 19:57:00 +02:00
e
- edit caching method - misc edits.
This commit is contained in:
parent
b4dc04ad58
commit
00000d90e4
4 changed files with 82 additions and 66 deletions
|
|
@ -92,9 +92,11 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
|
||||||
async def scrape(browser: Browser) -> None:
|
async def scrape(browser: Browser) -> None:
|
||||||
cached_urls = CACHE_FILE.load()
|
cached_urls = CACHE_FILE.load()
|
||||||
|
|
||||||
cached_count = len(cached_urls)
|
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
|
||||||
|
|
||||||
urls.update(cached_urls)
|
valid_count = cached_count = len(valid_urls)
|
||||||
|
|
||||||
|
urls.update(valid_urls)
|
||||||
|
|
||||||
log.info(f"Loaded {cached_count} event(s) from cache")
|
log.info(f"Loaded {cached_count} event(s) from cache")
|
||||||
|
|
||||||
|
|
@ -121,7 +123,6 @@ async def scrape(browser: Browser) -> None:
|
||||||
log=log,
|
log=log,
|
||||||
)
|
)
|
||||||
|
|
||||||
if url:
|
|
||||||
sport, event, ts = (
|
sport, event, ts = (
|
||||||
ev["sport"],
|
ev["sport"],
|
||||||
ev["event"],
|
ev["event"],
|
||||||
|
|
@ -141,9 +142,14 @@ async def scrape(browser: Browser) -> None:
|
||||||
"link": link,
|
"link": link,
|
||||||
}
|
}
|
||||||
|
|
||||||
urls[key] = cached_urls[key] = entry
|
cached_urls[key] = entry
|
||||||
|
|
||||||
log.info(f"Collected and cached {len(cached_urls) - cached_count} new event(s)")
|
if url:
|
||||||
|
valid_count += 1
|
||||||
|
|
||||||
|
urls[key] = entry
|
||||||
|
|
||||||
|
log.info(f"Collected and cached {valid_count - cached_count} new event(s)")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
log.info("No new events found")
|
log.info("No new events found")
|
||||||
|
|
|
||||||
|
|
@ -79,9 +79,11 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
|
||||||
async def scrape(browser: Browser) -> None:
|
async def scrape(browser: Browser) -> None:
|
||||||
cached_urls = CACHE_FILE.load()
|
cached_urls = CACHE_FILE.load()
|
||||||
|
|
||||||
cached_count = len(cached_urls)
|
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
|
||||||
|
|
||||||
urls.update(cached_urls)
|
valid_count = cached_count = len(valid_urls)
|
||||||
|
|
||||||
|
urls.update(valid_urls)
|
||||||
|
|
||||||
log.info(f"Loaded {cached_count} event(s) from cache")
|
log.info(f"Loaded {cached_count} event(s) from cache")
|
||||||
|
|
||||||
|
|
@ -108,7 +110,6 @@ async def scrape(browser: Browser) -> None:
|
||||||
log=log,
|
log=log,
|
||||||
)
|
)
|
||||||
|
|
||||||
if url:
|
|
||||||
sport, event, ts = (
|
sport, event, ts = (
|
||||||
ev["sport"],
|
ev["sport"],
|
||||||
ev["event"],
|
ev["event"],
|
||||||
|
|
@ -128,9 +129,14 @@ async def scrape(browser: Browser) -> None:
|
||||||
"link": link,
|
"link": link,
|
||||||
}
|
}
|
||||||
|
|
||||||
urls[key] = cached_urls[key] = entry
|
cached_urls[key] = entry
|
||||||
|
|
||||||
log.info(f"Collected and cached {len(cached_urls) - cached_count} new event(s)")
|
if url:
|
||||||
|
valid_count += 1
|
||||||
|
|
||||||
|
urls[key] = entry
|
||||||
|
|
||||||
|
log.info(f"Collected and cached {valid_count - cached_count} new event(s)")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
log.info("No new events found")
|
log.info("No new events found")
|
||||||
|
|
|
||||||
|
|
@ -80,9 +80,11 @@ async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
|
||||||
async def scrape(browser: Browser) -> None:
|
async def scrape(browser: Browser) -> None:
|
||||||
cached_urls = CACHE_FILE.load()
|
cached_urls = CACHE_FILE.load()
|
||||||
|
|
||||||
cached_count = len(cached_urls)
|
valid_urls = {k: v for k, v in cached_urls.items() if v["url"]}
|
||||||
|
|
||||||
urls.update(cached_urls)
|
valid_count = cached_count = len(valid_urls)
|
||||||
|
|
||||||
|
urls.update(valid_urls)
|
||||||
|
|
||||||
log.info(f"Loaded {cached_count} event(s) from cache")
|
log.info(f"Loaded {cached_count} event(s) from cache")
|
||||||
|
|
||||||
|
|
@ -111,7 +113,6 @@ async def scrape(browser: Browser) -> None:
|
||||||
log=log,
|
log=log,
|
||||||
)
|
)
|
||||||
|
|
||||||
if url:
|
|
||||||
sport, event = ev["sport"], ev["event"]
|
sport, event = ev["sport"], ev["event"]
|
||||||
|
|
||||||
key = f"[{sport}] {event} ({TAG})"
|
key = f"[{sport}] {event} ({TAG})"
|
||||||
|
|
@ -127,9 +128,14 @@ async def scrape(browser: Browser) -> None:
|
||||||
"link": link,
|
"link": link,
|
||||||
}
|
}
|
||||||
|
|
||||||
urls[key] = cached_urls[key] = entry
|
cached_urls[key] = entry
|
||||||
|
|
||||||
log.info(f"Collected and cached {len(cached_urls) - cached_count} new event(s)")
|
if url:
|
||||||
|
valid_count += 1
|
||||||
|
|
||||||
|
urls[key] = entry
|
||||||
|
|
||||||
|
log.info(f"Collected and cached {valid_count - cached_count} new event(s)")
|
||||||
|
|
||||||
else:
|
else:
|
||||||
log.info("No new events found")
|
log.info("No new events found")
|
||||||
|
|
|
||||||
|
|
@ -39,19 +39,17 @@ async def process_event(url: str, url_num: int) -> str | None:
|
||||||
|
|
||||||
embed_list: list[tuple[int, str]] = ast.literal_eval(embed_list_str)
|
embed_list: list[tuple[int, str]] = ast.literal_eval(embed_list_str)
|
||||||
|
|
||||||
embed_list.sort(key=lambda i: i[0])
|
|
||||||
|
|
||||||
m3u8 = "".join(
|
m3u8 = "".join(
|
||||||
chr(
|
chr(
|
||||||
int("".join(c for c in base64.b64decode(v).decode("utf-8") if c.isdigit()))
|
int("".join(c for c in base64.b64decode(v).decode("utf-8") if c.isdigit()))
|
||||||
- sum(map(int, digit_list))
|
- sum(map(int, digit_list))
|
||||||
)
|
)
|
||||||
for _, v in embed_list
|
for _, v in sorted(embed_list, key=lambda i: i[0])
|
||||||
)
|
)
|
||||||
|
|
||||||
log.info(f"URL {url_num}) Captured M3U8")
|
log.info(f"URL {url_num}) Captured M3U8")
|
||||||
|
|
||||||
return m3u8.split("&ip")[0]
|
return m3u8.split("ip=")[0]
|
||||||
|
|
||||||
|
|
||||||
async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
|
async def get_events(cached_keys: list[str]) -> list[dict[str, str]]:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue