StreamingCommunity 2.6.0__py3-none-any.whl → 2.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (47) hide show
  1. StreamingCommunity/Api/Player/Helper/Vixcloud/util.py +26 -2
  2. StreamingCommunity/Api/Player/ddl.py +2 -2
  3. StreamingCommunity/Api/Player/maxstream.py +3 -3
  4. StreamingCommunity/Api/Player/supervideo.py +2 -2
  5. StreamingCommunity/Api/Player/vixcloud.py +16 -18
  6. StreamingCommunity/Api/Site/1337xx/site.py +11 -4
  7. StreamingCommunity/Api/Site/1337xx/title.py +3 -4
  8. StreamingCommunity/Api/Site/animeunity/film_serie.py +5 -4
  9. StreamingCommunity/Api/Site/animeunity/site.py +9 -3
  10. StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +8 -9
  11. StreamingCommunity/Api/Site/cb01new/site.py +12 -4
  12. StreamingCommunity/Api/Site/ddlstreamitaly/site.py +10 -4
  13. StreamingCommunity/Api/Site/ddlstreamitaly/util/ScrapeSerie.py +2 -2
  14. StreamingCommunity/Api/Site/guardaserie/site.py +17 -11
  15. StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +4 -3
  16. StreamingCommunity/Api/Site/mostraguarda/film.py +11 -6
  17. StreamingCommunity/Api/Site/streamingcommunity/__init__.py +3 -5
  18. StreamingCommunity/Api/Site/streamingcommunity/film.py +1 -1
  19. StreamingCommunity/Api/Site/streamingcommunity/series.py +6 -7
  20. StreamingCommunity/Api/Site/streamingcommunity/site.py +14 -63
  21. StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +9 -24
  22. StreamingCommunity/Api/Template/Util/get_domain.py +11 -72
  23. StreamingCommunity/Api/Template/Util/manage_ep.py +28 -23
  24. StreamingCommunity/Api/Template/config_loader.py +6 -2
  25. StreamingCommunity/Lib/Downloader/HLS/downloader.py +2 -2
  26. StreamingCommunity/Lib/Downloader/HLS/proxyes.py +2 -2
  27. StreamingCommunity/Lib/Downloader/HLS/segments.py +5 -5
  28. StreamingCommunity/Lib/Downloader/MP4/downloader.py +2 -2
  29. StreamingCommunity/Upload/update.py +3 -3
  30. StreamingCommunity/Upload/version.py +1 -1
  31. StreamingCommunity/Util/_jsonConfig.py +209 -96
  32. StreamingCommunity/Util/headers.py +8 -1
  33. StreamingCommunity/Util/table.py +6 -6
  34. StreamingCommunity/run.py +13 -9
  35. {StreamingCommunity-2.6.0.dist-info → StreamingCommunity-2.7.0.dist-info}/METADATA +39 -24
  36. {StreamingCommunity-2.6.0.dist-info → StreamingCommunity-2.7.0.dist-info}/RECORD +40 -47
  37. StreamingCommunity/Api/Site/altadefinizionegratis/__init__.py +0 -76
  38. StreamingCommunity/Api/Site/altadefinizionegratis/film.py +0 -76
  39. StreamingCommunity/Api/Site/altadefinizionegratis/site.py +0 -109
  40. StreamingCommunity/Api/Site/ilcorsaronero/__init__.py +0 -53
  41. StreamingCommunity/Api/Site/ilcorsaronero/site.py +0 -64
  42. StreamingCommunity/Api/Site/ilcorsaronero/title.py +0 -42
  43. StreamingCommunity/Api/Site/ilcorsaronero/util/ilCorsarScraper.py +0 -149
  44. {StreamingCommunity-2.6.0.dist-info → StreamingCommunity-2.7.0.dist-info}/LICENSE +0 -0
  45. {StreamingCommunity-2.6.0.dist-info → StreamingCommunity-2.7.0.dist-info}/WHEEL +0 -0
  46. {StreamingCommunity-2.6.0.dist-info → StreamingCommunity-2.7.0.dist-info}/entry_points.txt +0 -0
  47. {StreamingCommunity-2.6.0.dist-info → StreamingCommunity-2.7.0.dist-info}/top_level.txt +0 -0
@@ -1,149 +0,0 @@
1
- # 12.14.24
2
-
3
- import logging
4
- import asyncio
5
- from typing import List, Dict, Optional
6
-
7
-
8
- # External libraries
9
- import httpx
10
- from bs4 import BeautifulSoup
11
-
12
-
13
- # Internal utilities
14
- from StreamingCommunity.Util._jsonConfig import config_manager
15
- from StreamingCommunity.Util.headers import get_headers
16
- from StreamingCommunity.Util.console import console
17
-
18
-
19
- # Variable
20
- max_timeout = config_manager.get_int("REQUESTS", "timeout")
21
-
22
-
23
- class IlCorsaroNeroScraper:
24
- def __init__(self, base_url: str, max_page: int = 1):
25
- self.base_url = base_url
26
- self.max_page = max_page
27
- self.headers = {
28
- 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
29
- 'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
30
- 'cache-control': 'max-age=0',
31
- 'priority': 'u=0, i',
32
- 'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
33
- 'sec-ch-ua-mobile': '?0',
34
- 'sec-ch-ua-platform': '"Windows"',
35
- 'sec-fetch-dest': 'document',
36
- 'sec-fetch-mode': 'navigate',
37
- 'sec-fetch-site': 'same-origin',
38
- 'sec-fetch-user': '?1',
39
- 'upgrade-insecure-requests': '1',
40
- 'user-agent': get_headers()
41
- }
42
-
43
- async def fetch_url(self, url: str) -> Optional[str]:
44
- """
45
- Fetch the HTML content of a given URL.
46
- """
47
- try:
48
- console.print(f"[cyan]Fetching url[white]: [red]{url}")
49
- async with httpx.AsyncClient(headers=self.headers, follow_redirects=True, timeout=max_timeout) as client:
50
- response = await client.get(url)
51
-
52
- # If the request was successful, return the HTML content
53
- response.raise_for_status()
54
- return response.text
55
-
56
- except Exception as e:
57
- logging.error(f"Error fetching from {url}: {e}")
58
- return None
59
-
60
- def parse_torrents(self, html: str) -> List[Dict[str, str]]:
61
- """
62
- Parse the HTML content and extract torrent details.
63
- """
64
- torrents = []
65
- soup = BeautifulSoup(html, "html.parser")
66
- table = soup.find("tbody")
67
-
68
- for row in table.find_all("tr"):
69
- try:
70
- columns = row.find_all("td")
71
-
72
- torrents.append({
73
- 'type': columns[0].get_text(strip=True),
74
- 'name': row.find("th").find("a").get_text(strip=True),
75
- 'seed': columns[1].get_text(strip=True),
76
- 'leech': columns[2].get_text(strip=True),
77
- 'size': columns[3].get_text(strip=True),
78
- 'date': columns[4].get_text(strip=True),
79
- 'url': "https://ilcorsaronero.link" + row.find("th").find("a").get("href")
80
- })
81
-
82
- except Exception as e:
83
- logging.error(f"Error parsing row: {e}")
84
- continue
85
-
86
- return torrents
87
-
88
- async def fetch_real_url(self, url: str) -> Optional[str]:
89
- """
90
- Fetch the real torrent URL from the detailed page.
91
- """
92
- response_html = await self.fetch_url(url)
93
- if not response_html:
94
- return None
95
-
96
- soup = BeautifulSoup(response_html, "html.parser")
97
- links = soup.find_all("a")
98
-
99
- # Find and return the magnet link
100
- for link in links:
101
- if "magnet" in str(link):
102
- return link.get("href")
103
-
104
- return None
105
-
106
- async def search(self, query: str) -> List[Dict[str, str]]:
107
- """
108
- Search for torrents based on the query string.
109
- """
110
- all_torrents = []
111
-
112
- # Loop through each page
113
- for page in range(self.max_page):
114
- url = f'{self.base_url}search?q={query}&page={page}'
115
-
116
- html = await self.fetch_url(url)
117
- if not html:
118
- console.print(f"[bold red]No HTML content for page {page}[/bold red]")
119
- break
120
-
121
- torrents = self.parse_torrents(html)
122
- if not torrents:
123
- console.print(f"[bold red]No torrents found on page {page}[/bold red]")
124
- break
125
-
126
- # Use asyncio.gather to fetch all real URLs concurrently
127
- tasks = [self.fetch_real_url(result['url']) for result in torrents]
128
- real_urls = await asyncio.gather(*tasks)
129
-
130
- # Attach real URLs to the torrent data
131
- for i, result in enumerate(torrents):
132
- result['url'] = real_urls[i]
133
-
134
- all_torrents.extend(torrents)
135
-
136
- return all_torrents
137
-
138
- async def main():
139
- scraper = IlCorsaroNeroScraper("https://ilcorsaronero.link/")
140
- results = await scraper.search("cars")
141
-
142
- if results:
143
- for i, torrent in enumerate(results):
144
- console.print(f"[bold green]{i} = {torrent}[/bold green] \n")
145
- else:
146
- console.print("[bold red]No torrents found.[/bold red]")
147
-
148
- if __name__ == '__main__':
149
- asyncio.run(main())