StreamingCommunity 3.0.1__py3-none-any.whl → 3.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (29) hide show
  1. StreamingCommunity/Api/Player/hdplayer.py +65 -0
  2. StreamingCommunity/Api/Player/mixdrop.py +145 -0
  3. StreamingCommunity/Api/Site/1337xx/site.py +1 -1
  4. StreamingCommunity/Api/Site/altadefinizione/site.py +1 -1
  5. StreamingCommunity/Api/Site/animeunity/site.py +2 -1
  6. StreamingCommunity/Api/Site/animeworld/site.py +7 -3
  7. StreamingCommunity/Api/Site/ddlstreamitaly/site.py +1 -1
  8. StreamingCommunity/Api/Site/guardaserie/site.py +1 -1
  9. StreamingCommunity/Api/Site/raiplay/site.py +2 -2
  10. StreamingCommunity/Api/Site/streamingcommunity/series.py +2 -2
  11. StreamingCommunity/Api/Site/streamingcommunity/site.py +1 -1
  12. StreamingCommunity/Api/Site/streamingwatch/__init__.py +95 -0
  13. StreamingCommunity/Api/Site/streamingwatch/film.py +61 -0
  14. StreamingCommunity/Api/Site/streamingwatch/series.py +160 -0
  15. StreamingCommunity/Api/Site/streamingwatch/site.py +111 -0
  16. StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py +118 -0
  17. StreamingCommunity/Lib/Proxies/proxy.py +232 -0
  18. StreamingCommunity/Upload/update.py +23 -22
  19. StreamingCommunity/Upload/version.py +1 -1
  20. StreamingCommunity/Util/os.py +38 -0
  21. StreamingCommunity/global_search.py +16 -4
  22. StreamingCommunity/run.py +11 -2
  23. {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.3.dist-info}/METADATA +17 -2
  24. {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.3.dist-info}/RECORD +28 -21
  25. {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.3.dist-info}/WHEEL +1 -1
  26. StreamingCommunity/Api/Player/maxstream.py +0 -140
  27. {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.3.dist-info}/entry_points.txt +0 -0
  28. {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.3.dist-info}/licenses/LICENSE +0 -0
  29. {streamingcommunity-3.0.1.dist-info → streamingcommunity-3.0.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,111 @@
1
+ # 29.04.25
2
+
3
+ import re
4
+
5
+
6
+ # External libraries
7
+ import httpx
8
+ from bs4 import BeautifulSoup
9
+ from rich.console import Console
10
+
11
+
12
+ # Internal utilities
13
+ from StreamingCommunity.Util.config_json import config_manager
14
+ from StreamingCommunity.Util.headers import get_userAgent
15
+ from StreamingCommunity.Util.table import TVShowManager
16
+
17
+
18
+ # Logic class
19
+ from StreamingCommunity.Api.Template.config_loader import site_constant
20
+ from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
21
+
22
+
23
+ # Variable
24
+ console = Console()
25
+ media_search_manager = MediaManager()
26
+ table_show_manager = TVShowManager()
27
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
28
+
29
+
30
+ def extract_nonce(response_) -> str:
31
+ """Extract nonce value from the page script"""
32
+ soup = BeautifulSoup(response_.content, 'html.parser')
33
+ script = soup.find('script', id='live-search-js-extra')
34
+ if script:
35
+ match = re.search(r'"admin_ajax_nonce":"([^"]+)"', script.text)
36
+ if match:
37
+ return match.group(1)
38
+ return ""
39
+
40
+
41
+ def title_search(query: str, additionalData: list) -> int:
42
+ """
43
+ Search for titles based on a search query.
44
+
45
+ Parameters:
46
+ - query (str): The query to search for.
47
+
48
+ Returns:
49
+ int: The number of titles found.
50
+ """
51
+ media_search_manager.clear()
52
+ table_show_manager.clear()
53
+
54
+ proxy, response_serie = additionalData
55
+ search_url = f"{site_constant.FULL_URL}/wp-admin/admin-ajax.php"
56
+ console.print(f"[cyan]Search url: [yellow]{search_url}")
57
+
58
+ try:
59
+ _wpnonce = extract_nonce(response_serie)
60
+
61
+ if not _wpnonce:
62
+ console.print("[red]Error: Failed to extract nonce")
63
+ return 0
64
+
65
+ data = {
66
+ 'action': 'data_fetch',
67
+ 'keyword': query,
68
+ '_wpnonce': _wpnonce
69
+ }
70
+
71
+ response = httpx.post(
72
+ search_url,
73
+ headers={
74
+ 'origin': site_constant.FULL_URL,
75
+ 'user-agent': get_userAgent()
76
+ },
77
+ data=data,
78
+ timeout=max_timeout,
79
+ proxy=proxy
80
+ )
81
+ response.raise_for_status()
82
+ soup = BeautifulSoup(response.text, 'html.parser')
83
+
84
+ except Exception as e:
85
+ console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
86
+ return 0
87
+
88
+ for item in soup.find_all('div', class_='searchelement'):
89
+ try:
90
+
91
+ title = item.find_all("a")[-1].get_text(strip=True) if item.find_all("a") else 'N/A'
92
+ url = item.find('a').get('href', '')
93
+ year = item.find('div', id='search-cat-year')
94
+ year = year.get_text(strip=True) if year else 'N/A'
95
+
96
+ if any(keyword in year.lower() for keyword in ['stagione', 'episodio', 'ep.', 'season', 'episode']):
97
+ continue
98
+
99
+ media_search_manager.add_media({
100
+ 'name': title,
101
+ 'type': 'tv' if '/serie/' in url else 'Film',
102
+ 'date': year,
103
+ 'image': item.find('img').get('src', ''),
104
+ 'url': url
105
+ })
106
+
107
+ except Exception as e:
108
+ print(f"Error parsing a film entry: {e}")
109
+
110
+ # Return the number of titles found
111
+ return media_search_manager.get_length()
@@ -0,0 +1,118 @@
1
+ # 29.04.25
2
+
3
+ import re
4
+ import logging
5
+
6
+
7
+ # External libraries
8
+ import httpx
9
+ from bs4 import BeautifulSoup
10
+
11
+ # Internal utilities
12
+ from StreamingCommunity.Util.headers import get_userAgent
13
+ from StreamingCommunity.Util.config_json import config_manager
14
+ from StreamingCommunity.Api.Player.Helper.Vixcloud.util import SeasonManager, Episode
15
+
16
+
17
+ # Variable
18
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
19
+
20
+
21
+ class GetSerieInfo:
22
+ def __init__(self, url, proxy: str = None):
23
+ self.headers = {'user-agent': get_userAgent()}
24
+ self.url = url
25
+ self.seasons_manager = SeasonManager()
26
+ self.series_name = None
27
+
28
+ self.client = httpx.Client(headers=self.headers, proxy=proxy, timeout=max_timeout)
29
+
30
+ def collect_info_season(self) -> None:
31
+ """
32
+ Retrieve all series information including episodes and seasons.
33
+ """
34
+ try:
35
+ response = self.client.get(self.url)
36
+ response.raise_for_status()
37
+ soup = BeautifulSoup(response.text, 'html.parser')
38
+
39
+ if not self.series_name:
40
+ title_tag = soup.find('h1', class_='title-border')
41
+ self.series_name = title_tag.get_text(strip=True) if title_tag else 'N/A'
42
+
43
+ # Extract episodes and organize by season
44
+ episodes = {}
45
+ for ep in soup.find_all('div', class_='bolumust'):
46
+ a_tag = ep.find('a')
47
+ if not a_tag:
48
+ continue
49
+
50
+ ep_url = a_tag.get('href', '')
51
+ episode_title = a_tag.get_text(strip=True)
52
+
53
+ # Clean up episode title by removing season info and date
54
+ clean_title = re.sub(r'Stagione \d+ Episodio \d+\s*\(?([^)]+)\)?\s*\d+\s*\w+\s*\d+', r'\1', episode_title)
55
+
56
+ season_match = re.search(r'stagione-(\d+)', ep_url)
57
+ if season_match:
58
+ season_num = int(season_match.group(1))
59
+ if season_num not in episodes:
60
+ episodes[season_num] = []
61
+
62
+ episodes[season_num].append({
63
+ 'id': len(episodes[season_num]) + 1,
64
+ 'number': len(episodes[season_num]) + 1,
65
+ 'name': clean_title.strip(),
66
+ 'url': ep_url
67
+ })
68
+
69
+ # Add seasons to SeasonManager
70
+ for season_num, eps in episodes.items():
71
+ season = self.seasons_manager.add_season({
72
+ 'id': season_num,
73
+ 'number': season_num,
74
+ 'name': f'Stagione {season_num}'
75
+ })
76
+
77
+ # Add episodes to season's EpisodeManager
78
+ for ep in eps:
79
+ season.episodes.add(ep)
80
+
81
+ except Exception as e:
82
+ logging.error(f"Error collecting series info: {str(e)}")
83
+ raise
84
+
85
+ # ------------- FOR GUI -------------
86
+ def getNumberSeason(self) -> int:
87
+ """
88
+ Get the total number of seasons available for the series.
89
+ """
90
+ if not self.seasons_manager.seasons:
91
+ self.collect_info_season()
92
+
93
+ return len(self.seasons_manager.seasons)
94
+
95
+ def getEpisodeSeasons(self, season_number: int) -> list:
96
+ """
97
+ Get all episodes for a specific season.
98
+ """
99
+ if not self.seasons_manager.seasons:
100
+ self.collect_info_season()
101
+
102
+ season = self.seasons_manager.get_season_by_number(season_number)
103
+ if not season:
104
+ logging.error(f"Season {season_number} not found")
105
+ return []
106
+
107
+ return season.episodes.episodes
108
+
109
+ def selectEpisode(self, season_number: int, episode_index: int) -> Episode:
110
+ """
111
+ Get information for a specific episode in a specific season.
112
+ """
113
+ episodes = self.getEpisodeSeasons(season_number)
114
+ if not episodes or episode_index < 0 or episode_index >= len(episodes):
115
+ logging.error(f"Episode index {episode_index} is out of range for season {season_number}")
116
+ return None
117
+
118
+ return episodes[episode_index]
@@ -0,0 +1,232 @@
1
+ # 29.04.25
2
+
3
+ import os
4
+ import sys
5
+ import time
6
+ import json
7
+ import signal
8
+ import warnings
9
+ warnings.filterwarnings("ignore", category=UserWarning)
10
+ from datetime import datetime, timedelta
11
+ from concurrent.futures import ThreadPoolExecutor, as_completed
12
+
13
+
14
+ # External library
15
+ import httpx
16
+ from rich import print
17
+ from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn, TimeRemainingColumn
18
+
19
+
20
+ # Internal utilities
21
+ from StreamingCommunity.Util.config_json import config_manager
22
+ from StreamingCommunity.Util.headers import get_headers
23
+
24
+
25
+ # Variable
26
+ MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
27
+
28
+
29
+ class ProxyFinder:
30
+ def __init__(self, url, timeout_threshold: float = 7.0, max_proxies: int = 150, max_workers: int = 12):
31
+ self.url = url
32
+ self.timeout_threshold = timeout_threshold
33
+ self.max_proxies = max_proxies
34
+ self.max_workers = max_workers
35
+ self.found_proxy = None
36
+ self.shutdown_flag = False
37
+ self.json_file = os.path.join(os.path.dirname(__file__), 'working_proxies.json')
38
+ signal.signal(signal.SIGINT, self._handle_interrupt)
39
+
40
+ def load_saved_proxies(self) -> tuple:
41
+ """Load saved proxies if they're not expired (2 hours old)"""
42
+ try:
43
+ if not os.path.exists(self.json_file):
44
+ return None, None
45
+
46
+ with open(self.json_file, 'r') as f:
47
+ data = json.load(f)
48
+
49
+ if not data.get('proxies') or not data.get('last_update'):
50
+ return None, None
51
+
52
+ last_update = datetime.fromisoformat(data['last_update'])
53
+ if datetime.now() - last_update > timedelta(hours=2):
54
+ return None, None
55
+
56
+ return data['proxies'], last_update
57
+ except Exception:
58
+ return None, None
59
+
60
+ def save_working_proxy(self, proxy: str, response_time: float):
61
+ """Save working proxy to JSON file"""
62
+ data = {
63
+ 'proxies': [{'proxy': proxy, 'response_time': response_time}],
64
+ 'last_update': datetime.now().isoformat()
65
+ }
66
+ try:
67
+ with open(self.json_file, 'w') as f:
68
+ json.dump(data, f, indent=4)
69
+ except Exception as e:
70
+ print(f"[bold red]Error saving proxy:[/bold red] {str(e)}")
71
+
72
+ def fetch_geonode(self) -> list:
73
+ proxies = []
74
+ try:
75
+ response = httpx.get(
76
+ "https://proxylist.geonode.com/api/proxy-list?protocols=http%2Chttps&limit=100&page=1&sort_by=speed&sort_type=asc",
77
+ headers=get_headers(),
78
+ timeout=MAX_TIMEOUT
79
+ )
80
+ data = response.json()
81
+ proxies = [(f"http://{p['ip']}:{p['port']}", "Geonode") for p in data.get('data', [])]
82
+
83
+ except Exception as e:
84
+ print(f"[bold red]Error in Geonode:[/bold red] {str(e)[:100]}")
85
+
86
+ return proxies
87
+
88
+ def fetch_proxyscrape(self) -> list:
89
+ proxies = []
90
+ try:
91
+ response = httpx.get(
92
+ "https://api.proxyscrape.com/v4/free-proxy-list/get?request=get_proxies&protocol=http&skip=0&proxy_format=protocolipport&format=json&limit=100&timeout=1000",
93
+ headers=get_headers(),
94
+ timeout=MAX_TIMEOUT
95
+ )
96
+ data = response.json()
97
+ if 'proxies' in data and isinstance(data['proxies'], list):
98
+ proxies = [(proxy_data['proxy'], "ProxyScrape") for proxy_data in data['proxies'] if 'proxy' in proxy_data]
99
+
100
+ except Exception as e:
101
+ print(f"[bold red]Error in ProxyScrape:[/bold red] {str(e)[:100]}")
102
+
103
+ return proxies
104
+
105
+ def fetch_proxies_from_sources(self) -> list:
106
+ #print("[cyan]Fetching proxies from sources...[/cyan]")
107
+ with ThreadPoolExecutor(max_workers=3) as executor:
108
+ proxyscrape_future = executor.submit(self.fetch_proxyscrape)
109
+ geonode_future = executor.submit(self.fetch_geonode)
110
+
111
+ sources_proxies = {}
112
+
113
+ try:
114
+ proxyscrape_result = proxyscrape_future.result()
115
+ sources_proxies["proxyscrape"] = proxyscrape_result[:int(self.max_proxies/2)]
116
+ except Exception as e:
117
+ print(f"[bold red]Error fetching from proxyscrape:[/bold red] {str(e)[:100]}")
118
+ sources_proxies["proxyscrape"] = []
119
+
120
+ try:
121
+ geonode_result = geonode_future.result()
122
+ sources_proxies["geonode"] = geonode_result[:int(self.max_proxies/2)]
123
+ except Exception as e:
124
+ print(f"[bold red]Error fetching from geonode:[/bold red] {str(e)[:100]}")
125
+ sources_proxies["geonode"] = []
126
+
127
+ merged_proxies = []
128
+
129
+ if "proxyscrape" in sources_proxies:
130
+ merged_proxies.extend(sources_proxies["proxyscrape"])
131
+
132
+ if "geonode" in sources_proxies:
133
+ merged_proxies.extend(sources_proxies["geonode"])
134
+
135
+ proxy_list = merged_proxies[:self.max_proxies]
136
+ return proxy_list
137
+
138
+ def _test_single_request(self, proxy_info: tuple) -> tuple:
139
+ proxy, source = proxy_info
140
+ try:
141
+ start = time.time()
142
+ with httpx.Client(proxy=proxy, timeout=self.timeout_threshold) as client:
143
+ response = client.get(self.url, headers=get_headers())
144
+ if response.status_code == 200:
145
+ return (True, time.time() - start, response, source)
146
+ except Exception:
147
+ pass
148
+ return (False, self.timeout_threshold + 1, None, source)
149
+
150
+ def test_proxy(self, proxy_info: tuple) -> tuple:
151
+ proxy, source = proxy_info
152
+ if self.shutdown_flag:
153
+ return (proxy, False, 0, None, source)
154
+
155
+ success1, time1, text1, source = self._test_single_request(proxy_info)
156
+ if not success1 or time1 > self.timeout_threshold:
157
+ return (proxy, False, time1, None, source)
158
+
159
+ success2, time2, _, source = self._test_single_request(proxy_info)
160
+ avg_time = (time1 + time2) / 2
161
+ return (proxy, success2 and time2 <= self.timeout_threshold, avg_time, text1, source)
162
+
163
+ def _handle_interrupt(self, sig, frame):
164
+ print("\n[bold yellow]Received keyboard interrupt. Terminating...[/bold yellow]")
165
+ self.shutdown_flag = True
166
+ sys.exit(0)
167
+
168
+ def find_fast_proxy(self) -> tuple:
169
+ saved_proxies, last_update = self.load_saved_proxies()
170
+ if saved_proxies:
171
+ print("[cyan]Testing saved proxy...[/cyan]")
172
+ for proxy_data in saved_proxies:
173
+ result = self.test_proxy((proxy_data['proxy'], 'cached'))
174
+ if result[1]:
175
+ return proxy_data['proxy'], result[3], result[2]
176
+ else:
177
+ print(f"[red]Saved proxy {proxy_data['proxy']} failed - response time: {result[2]:.2f}s[/red]")
178
+
179
+ proxies = self.fetch_proxies_from_sources()
180
+ if not proxies:
181
+ print("[bold red]No proxies fetched to test.[/bold red]")
182
+ return (None, None, None)
183
+
184
+ found_proxy = None
185
+ response_text = None
186
+ source = None
187
+ failed_count = 0
188
+ success_count = 0
189
+
190
+ #print(f"[cyan]Testing {len(proxies)} proxies...[/cyan]")
191
+ with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
192
+ futures = {executor.submit(self.test_proxy, p): p for p in proxies}
193
+ with Progress(
194
+ SpinnerColumn(),
195
+ TextColumn("[progress.description]{task.description}"),
196
+ BarColumn(),
197
+ TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
198
+ TextColumn("[cyan]{task.fields[success]}[/cyan]/[red]{task.fields[failed]}[/red]"),
199
+ TimeRemainingColumn(),
200
+ ) as progress:
201
+ task = progress.add_task(
202
+ "[cyan]Testing Proxies",
203
+ total=len(futures),
204
+ success=success_count,
205
+ failed=failed_count
206
+ )
207
+
208
+ for future in as_completed(futures):
209
+ if self.shutdown_flag:
210
+ break
211
+
212
+ try:
213
+ proxy, success, elapsed, response, proxy_source = future.result()
214
+ if success:
215
+ success_count += 1
216
+ print(f"[bold green]Found valid proxy:[/bold green] {proxy} ({elapsed:.2f}s)")
217
+ found_proxy = proxy
218
+ response_text = response
219
+ self.save_working_proxy(proxy, elapsed)
220
+ self.shutdown_flag = True
221
+ break
222
+ else:
223
+ failed_count += 1
224
+ except Exception:
225
+ failed_count += 1
226
+
227
+ progress.update(task, advance=1, success=success_count, failed=failed_count)
228
+
229
+ if not found_proxy:
230
+ print("[bold red]No working proxies found[/bold red]")
231
+
232
+ return (found_proxy, response_text, source)
@@ -3,7 +3,7 @@
3
3
  import os
4
4
  import sys
5
5
  import time
6
-
6
+ import asyncio
7
7
 
8
8
  # External library
9
9
  import httpx
@@ -24,32 +24,33 @@ else:
24
24
  base_path = os.path.dirname(__file__)
25
25
  console = Console()
26
26
 
27
+ async def fetch_github_data(client, url):
28
+ """Helper function to fetch data from GitHub API"""
29
+ response = await client.get(
30
+ url=url,
31
+ headers={'user-agent': get_userAgent()},
32
+ timeout=config_manager.get_int("REQUESTS", "timeout"),
33
+ follow_redirects=True
34
+ )
35
+ return response.json()
36
+
37
+ async def async_github_requests():
38
+ """Make concurrent GitHub API requests"""
39
+ async with httpx.AsyncClient() as client:
40
+ tasks = [
41
+ fetch_github_data(client, f"https://api.github.com/repos/{__author__}/{__title__}"),
42
+ fetch_github_data(client, f"https://api.github.com/repos/{__author__}/{__title__}/releases"),
43
+ fetch_github_data(client, f"https://api.github.com/repos/{__author__}/{__title__}/commits")
44
+ ]
45
+ return await asyncio.gather(*tasks)
27
46
 
28
47
  def update():
29
48
  """
30
49
  Check for updates on GitHub and display relevant information.
31
50
  """
32
51
  try:
33
- response_reposity = httpx.get(
34
- url=f"https://api.github.com/repos/{__author__}/{__title__}",
35
- headers={'user-agent': get_userAgent()},
36
- timeout=config_manager.get_int("REQUESTS", "timeout"),
37
- follow_redirects=True
38
- ).json()
39
-
40
- response_releases = httpx.get(
41
- url=f"https://api.github.com/repos/{__author__}/{__title__}/releases",
42
- headers={'user-agent': get_userAgent()},
43
- timeout=config_manager.get_int("REQUESTS", "timeout"),
44
- follow_redirects=True
45
- ).json()
46
-
47
- response_commits = httpx.get(
48
- url=f"https://api.github.com/repos/{__author__}/{__title__}/commits",
49
- headers={'user-agent': get_userAgent()},
50
- timeout=config_manager.get_int("REQUESTS", "timeout"),
51
- follow_redirects=True
52
- ).json()
52
+ # Run async requests concurrently
53
+ response_reposity, response_releases, response_commits = asyncio.run(async_github_requests())
53
54
 
54
55
  except Exception as e:
55
56
  console.print(f"[red]Error accessing GitHub API: {e}")
@@ -92,4 +93,4 @@ def update():
92
93
  console.print(f"\n[red]{__title__} has been downloaded [yellow]{total_download_count} [red]times, but only [yellow]{percentual_stars}% [red]of users have starred it.\n\
93
94
  [cyan]Help the repository grow today by leaving a [yellow]star [cyan]and [yellow]sharing [cyan]it with others online!")
94
95
 
95
- time.sleep(3)
96
+ time.sleep(4)
@@ -1,5 +1,5 @@
1
1
  __title__ = 'StreamingCommunity'
2
- __version__ = '3.0.1'
2
+ __version__ = '3.0.3'
3
3
  __author__ = 'Arrowar'
4
4
  __description__ = 'A command-line program to download film'
5
5
  __copyright__ = 'Copyright 2024'
@@ -19,6 +19,7 @@ from unidecode import unidecode
19
19
  from rich.console import Console
20
20
  from rich.prompt import Prompt
21
21
  from pathvalidate import sanitize_filename, sanitize_filepath
22
+ from dns.resolver import dns
22
23
 
23
24
 
24
25
  # Internal utilities
@@ -282,6 +283,43 @@ class InternManager():
282
283
  else:
283
284
  return f"{bytes / (1024 * 1024):.2f} MB/s"
284
285
 
286
+ def check_dns_provider(self):
287
+ """
288
+ Check if the system's current DNS server matches any known DNS providers.
289
+
290
+ Returns:
291
+ bool: True if the current DNS server matches a known provider,
292
+ False if no match is found or in case of errors
293
+ """
294
+ dns_providers = {
295
+ "Cloudflare": ["1.1.1.1", "1.0.0.1"],
296
+ "Google": ["8.8.8.8", "8.8.4.4"],
297
+ "OpenDNS": ["208.67.222.222", "208.67.220.220"],
298
+ "Quad9": ["9.9.9.9", "149.112.112.112"],
299
+ "AdGuard": ["94.140.14.14", "94.140.15.15"],
300
+ "Comodo": ["8.26.56.26", "8.20.247.20"],
301
+ "Level3": ["209.244.0.3", "209.244.0.4"],
302
+ "Norton": ["199.85.126.10", "199.85.127.10"],
303
+ "CleanBrowsing": ["185.228.168.9", "185.228.169.9"],
304
+ "Yandex": ["77.88.8.8", "77.88.8.1"]
305
+ }
306
+
307
+ try:
308
+ resolver = dns.resolver.Resolver()
309
+ nameservers = resolver.nameservers
310
+
311
+ if not nameservers:
312
+ return False
313
+
314
+ for server in nameservers:
315
+ for provider, ips in dns_providers.items():
316
+ if server in ips:
317
+ return True
318
+ return False
319
+
320
+ except Exception:
321
+ return False
322
+
285
323
 
286
324
  class OsSummary:
287
325
  def __init__(self):
@@ -58,7 +58,10 @@ def load_search_functions():
58
58
  # Get 'indice' from the module
59
59
  indice = getattr(mod, 'indice', 0)
60
60
  use_for = getattr(mod, '_useFor', 'other')
61
- modules.append((module_name, indice, use_for))
61
+ priority = getattr(mod, '_priority', 0)
62
+
63
+ if priority == 0:
64
+ modules.append((module_name, indice, use_for))
62
65
 
63
66
  except Exception as e:
64
67
  console.print(f"[red]Failed to import module {module_name}: {str(e)}")
@@ -296,17 +299,26 @@ def process_selected_item(selected_item, search_functions):
296
299
  console.print(f"\n[bold green]Processing selection from:[/bold green] {selected_item.get('source')}")
297
300
 
298
301
  # Extract necessary information to pass to the site's search function
299
- item_id = selected_item.get('id', selected_item.get('media_id'))
302
+ item_id = None
303
+ for id_field in ['id', 'media_id', 'ID', 'item_id', 'url']:
304
+ item_id = selected_item.get(id_field)
305
+ if item_id:
306
+ break
307
+
300
308
  item_type = selected_item.get('type', selected_item.get('media_type', 'unknown'))
301
309
  item_title = selected_item.get('title', selected_item.get('name', 'Unknown'))
302
310
 
303
311
  if item_id:
304
312
  console.print(f"[bold green]Selected item:[/bold green] {item_title} (ID: {item_id}, Type: {item_type})")
305
313
 
306
- # Call the site's search function with direct_item parameter to process download
307
314
  try:
308
315
  func(direct_item=selected_item)
316
+
309
317
  except Exception as e:
310
318
  console.print(f"[bold red]Error processing download:[/bold red] {str(e)}")
319
+ logging.exception("Download processing error")
320
+
311
321
  else:
312
- console.print("[bold red]Error: Item ID not found.[/bold red]")
322
+ console.print("[bold red]Error: Item ID not found. Available fields:[/bold red]")
323
+ for key in selected_item.keys():
324
+ console.print(f"[yellow]- {key}: {selected_item[key]}[/yellow]")
StreamingCommunity/run.py CHANGED
@@ -21,7 +21,7 @@ from rich.prompt import Prompt
21
21
  from .global_search import global_search
22
22
  from StreamingCommunity.Util.message import start_message
23
23
  from StreamingCommunity.Util.config_json import config_manager
24
- from StreamingCommunity.Util.os import os_summary
24
+ from StreamingCommunity.Util.os import os_summary, internet_manager
25
25
  from StreamingCommunity.Util.logger import Logger
26
26
  from StreamingCommunity.Upload.update import update as git_update
27
27
  from StreamingCommunity.Lib.TMBD import tmdb
@@ -200,6 +200,15 @@ def main(script_id = 0):
200
200
  # Create logger
201
201
  log_not = Logger()
202
202
  initialize()
203
+
204
+ if not internet_manager.check_dns_provider():
205
+ console.print("[red]❌ ERROR: DNS configuration is required!")
206
+ console.print("[red]The program cannot function correctly without proper DNS settings.")
207
+ console.print("[yellow]Please configure one of these DNS servers:")
208
+ console.print("[blue]• Cloudflare (1.1.1.1)")
209
+ console.print("[blue]• Quad9 (9.9.9.9)")
210
+ console.print("\n[yellow]⚠️ The program will not work until you configure your DNS settings.")
211
+ input("[yellow]Press Enter to exit...")
203
212
 
204
213
  # Load search functions
205
214
  search_functions = load_search_functions()
@@ -381,4 +390,4 @@ def main(script_id = 0):
381
390
  # Delete script_id
382
391
  script_id = TelegramSession.get_session()
383
392
  if script_id != "unknown":
384
- TelegramSession.deleteScriptId(script_id)
393
+ TelegramSession.deleteScriptId(script_id)