StreamingCommunity 2.6.1__py3-none-any.whl → 2.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (76) hide show
  1. StreamingCommunity/Api/Player/ddl.py +4 -4
  2. StreamingCommunity/Api/Player/maxstream.py +10 -16
  3. StreamingCommunity/Api/Player/supervideo.py +9 -35
  4. StreamingCommunity/Api/Player/vixcloud.py +18 -92
  5. StreamingCommunity/Api/Site/1337xx/__init__.py +8 -1
  6. StreamingCommunity/Api/Site/1337xx/site.py +16 -15
  7. StreamingCommunity/Api/Site/1337xx/title.py +7 -5
  8. StreamingCommunity/Api/Site/animeunity/__init__.py +9 -2
  9. StreamingCommunity/Api/Site/animeunity/film_serie.py +12 -5
  10. StreamingCommunity/Api/Site/animeunity/site.py +14 -10
  11. StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +9 -10
  12. StreamingCommunity/Api/Site/cb01new/__init__.py +8 -1
  13. StreamingCommunity/Api/Site/cb01new/film.py +7 -1
  14. StreamingCommunity/Api/Site/cb01new/site.py +24 -15
  15. StreamingCommunity/Api/Site/ddlstreamitaly/__init__.py +9 -2
  16. StreamingCommunity/Api/Site/ddlstreamitaly/series.py +7 -1
  17. StreamingCommunity/Api/Site/ddlstreamitaly/site.py +16 -15
  18. StreamingCommunity/Api/Site/ddlstreamitaly/util/ScrapeSerie.py +3 -3
  19. StreamingCommunity/Api/Site/guardaserie/__init__.py +9 -2
  20. StreamingCommunity/Api/Site/guardaserie/series.py +9 -1
  21. StreamingCommunity/Api/Site/guardaserie/site.py +23 -22
  22. StreamingCommunity/Api/Site/guardaserie/util/ScrapeSerie.py +5 -4
  23. StreamingCommunity/Api/Site/mostraguarda/__init__.py +6 -2
  24. StreamingCommunity/Api/Site/mostraguarda/film.py +10 -6
  25. StreamingCommunity/Api/Site/streamingcommunity/__init__.py +9 -2
  26. StreamingCommunity/Api/Site/streamingcommunity/film.py +9 -2
  27. StreamingCommunity/Api/Site/streamingcommunity/series.py +15 -6
  28. StreamingCommunity/Api/Site/streamingcommunity/site.py +16 -14
  29. StreamingCommunity/Api/Site/streamingcommunity/util/ScrapeSerie.py +10 -11
  30. StreamingCommunity/Api/Template/Util/__init__.py +0 -1
  31. StreamingCommunity/Api/Template/Util/get_domain.py +31 -134
  32. StreamingCommunity/Api/Template/Util/manage_ep.py +10 -5
  33. StreamingCommunity/Api/Template/config_loader.py +14 -10
  34. StreamingCommunity/Api/Template/site.py +3 -6
  35. StreamingCommunity/Lib/Downloader/HLS/downloader.py +12 -15
  36. StreamingCommunity/Lib/Downloader/HLS/segments.py +14 -34
  37. StreamingCommunity/Lib/Downloader/MP4/downloader.py +14 -11
  38. StreamingCommunity/Lib/Downloader/TOR/downloader.py +109 -101
  39. StreamingCommunity/Lib/FFmpeg/__init__.py +1 -1
  40. StreamingCommunity/Lib/FFmpeg/capture.py +10 -12
  41. StreamingCommunity/Lib/FFmpeg/command.py +15 -14
  42. StreamingCommunity/Lib/FFmpeg/util.py +9 -38
  43. StreamingCommunity/Lib/M3U8/decryptor.py +72 -146
  44. StreamingCommunity/Lib/M3U8/estimator.py +8 -16
  45. StreamingCommunity/Lib/M3U8/parser.py +1 -17
  46. StreamingCommunity/Lib/M3U8/url_fixer.py +1 -4
  47. StreamingCommunity/Lib/TMBD/__init__.py +2 -0
  48. StreamingCommunity/Lib/TMBD/obj_tmbd.py +3 -17
  49. StreamingCommunity/Lib/TMBD/tmdb.py +4 -9
  50. StreamingCommunity/TelegramHelp/telegram_bot.py +50 -50
  51. StreamingCommunity/Upload/update.py +6 -5
  52. StreamingCommunity/Upload/version.py +1 -1
  53. StreamingCommunity/Util/color.py +1 -1
  54. StreamingCommunity/Util/config_json.py +435 -0
  55. StreamingCommunity/Util/headers.py +7 -36
  56. StreamingCommunity/Util/logger.py +72 -42
  57. StreamingCommunity/Util/message.py +8 -3
  58. StreamingCommunity/Util/os.py +41 -93
  59. StreamingCommunity/Util/table.py +8 -17
  60. StreamingCommunity/run.py +39 -43
  61. {StreamingCommunity-2.6.1.dist-info → StreamingCommunity-2.8.0.dist-info}/METADATA +203 -114
  62. StreamingCommunity-2.8.0.dist-info/RECORD +75 -0
  63. StreamingCommunity/Api/Site/ilcorsaronero/__init__.py +0 -53
  64. StreamingCommunity/Api/Site/ilcorsaronero/site.py +0 -64
  65. StreamingCommunity/Api/Site/ilcorsaronero/title.py +0 -42
  66. StreamingCommunity/Api/Site/ilcorsaronero/util/ilCorsarScraper.py +0 -149
  67. StreamingCommunity/Api/Template/Util/recall_search.py +0 -37
  68. StreamingCommunity/Lib/Downloader/HLS/proxyes.py +0 -110
  69. StreamingCommunity/Util/_jsonConfig.py +0 -241
  70. StreamingCommunity/Util/call_stack.py +0 -42
  71. StreamingCommunity/Util/console.py +0 -12
  72. StreamingCommunity-2.6.1.dist-info/RECORD +0 -83
  73. {StreamingCommunity-2.6.1.dist-info → StreamingCommunity-2.8.0.dist-info}/LICENSE +0 -0
  74. {StreamingCommunity-2.6.1.dist-info → StreamingCommunity-2.8.0.dist-info}/WHEEL +0 -0
  75. {StreamingCommunity-2.6.1.dist-info → StreamingCommunity-2.8.0.dist-info}/entry_points.txt +0 -0
  76. {StreamingCommunity-2.6.1.dist-info → StreamingCommunity-2.8.0.dist-info}/top_level.txt +0 -0
@@ -9,8 +9,8 @@ from bs4 import BeautifulSoup
9
9
 
10
10
 
11
11
  # Internal utilities
12
- from StreamingCommunity.Util._jsonConfig import config_manager
13
- from StreamingCommunity.Util.headers import get_headers
12
+ from StreamingCommunity.Util.config_json import config_manager
13
+ from StreamingCommunity.Util.headers import get_userAgent
14
14
 
15
15
 
16
16
  # Variable
@@ -22,7 +22,7 @@ class VideoSource:
22
22
  """
23
23
  Initializes the VideoSource object with default values.
24
24
  """
25
- self.headers = {'user-agent': get_headers()}
25
+ self.headers = {'user-agent': get_userAgent()}
26
26
  self.cookie = cookie
27
27
 
28
28
  def setup(self, url: str) -> None:
@@ -85,4 +85,4 @@ class VideoSource:
85
85
  logging.error("Failed to retrieve content from the URL.")
86
86
 
87
87
  except Exception as e:
88
- logging.error(f"An error occurred while parsing the playlist: {e}")
88
+ logging.error(f"An error occurred while parsing the playlist: {e}")
@@ -11,12 +11,12 @@ from bs4 import BeautifulSoup
11
11
 
12
12
 
13
13
  # Internal utilities
14
- from StreamingCommunity.Util._jsonConfig import config_manager
15
- from StreamingCommunity.Util.headers import get_headers
14
+ from StreamingCommunity.Util.config_json import config_manager
15
+ from StreamingCommunity.Util.headers import get_userAgent
16
16
 
17
17
 
18
18
  # Variable
19
- max_timeout = config_manager.get_int("REQUESTS", "timeout")
19
+ MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
20
20
 
21
21
 
22
22
  class VideoSource:
@@ -31,16 +31,14 @@ class VideoSource:
31
31
  self.redirect_url = None
32
32
  self.maxstream_url = None
33
33
  self.m3u8_url = None
34
- self.headers = {'user-agent': get_headers()}
34
+ self.headers = {'user-agent': get_userAgent()}
35
35
 
36
36
  def get_redirect_url(self):
37
37
  """
38
38
  Sends a request to the initial URL and extracts the redirect URL.
39
39
  """
40
40
  try:
41
-
42
- # Send a GET request to the initial URL
43
- response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
41
+ response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
44
42
  response.raise_for_status()
45
43
 
46
44
  # Extract the redirect URL from the HTML
@@ -63,9 +61,7 @@ class VideoSource:
63
61
  Sends a request to the redirect URL and extracts the Maxstream URL.
64
62
  """
65
63
  try:
66
-
67
- # Send a GET request to the redirect URL
68
- response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
64
+ response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
69
65
  response.raise_for_status()
70
66
 
71
67
  # Extract the Maxstream URL from the HTML
@@ -78,7 +74,7 @@ class VideoSource:
78
74
  logging.warning("Anchor tag not found. Trying the alternative method.")
79
75
  headers = {
80
76
  'origin': 'https://stayonline.pro',
81
- 'user-agent': get_headers(),
77
+ 'user-agent': get_userAgent(),
82
78
  'x-requested-with': 'XMLHttpRequest',
83
79
  }
84
80
 
@@ -89,7 +85,7 @@ class VideoSource:
89
85
  uprot_url = response.json()['data']['value']
90
86
 
91
87
  # Retry getting maxtstream url
92
- response = httpx.get(uprot_url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
88
+ response = httpx.get(uprot_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
93
89
  response.raise_for_status()
94
90
  soup = BeautifulSoup(response.text, "html.parser")
95
91
  maxstream_url = soup.find("a").get("href")
@@ -115,9 +111,7 @@ class VideoSource:
115
111
  Sends a request to the Maxstream URL and extracts the .m3u8 file URL.
116
112
  """
117
113
  try:
118
-
119
- # Send a GET request to the Maxstream URL
120
- response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=max_timeout)
114
+ response = httpx.get(self.maxstream_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
121
115
  response.raise_for_status()
122
116
  soup = BeautifulSoup(response.text, "html.parser")
123
117
 
@@ -148,4 +142,4 @@ class VideoSource:
148
142
  """
149
143
  self.get_redirect_url()
150
144
  self.get_maxstream_url()
151
- return self.get_m3u8_url()
145
+ return self.get_m3u8_url()
@@ -11,12 +11,12 @@ from bs4 import BeautifulSoup
11
11
 
12
12
 
13
13
  # Internal utilities
14
- from StreamingCommunity.Util._jsonConfig import config_manager
15
- from StreamingCommunity.Util.headers import get_headers
14
+ from StreamingCommunity.Util.config_json import config_manager
15
+ from StreamingCommunity.Util.headers import get_userAgent
16
16
 
17
17
 
18
18
  # Variable
19
- max_timeout = config_manager.get_int("REQUESTS", "timeout")
19
+ MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
20
20
 
21
21
 
22
22
  class VideoSource:
@@ -30,7 +30,7 @@ class VideoSource:
30
30
  self.headers = {
31
31
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
32
32
  'accept-language': 'it-IT,it;q=0.9,en-US;q=0.8,en;q=0.7',
33
- 'User-Agent': get_headers()
33
+ 'User-Agent': get_userAgent()
34
34
  }
35
35
  self.client = httpx.Client()
36
36
  self.url = url
@@ -45,40 +45,15 @@ class VideoSource:
45
45
  Returns:
46
46
  - str: The response content if successful, None otherwise.
47
47
  """
48
-
49
48
  try:
50
- response = self.client.get(
51
- url=url,
52
- headers=self.headers,
53
- follow_redirects=True,
54
- timeout=max_timeout
55
- )
49
+ response = self.client.get(url, headers=self.headers, timeout=MAX_TIMEOUT, follow_redirects=True)
56
50
  response.raise_for_status()
57
51
  return response.text
58
52
 
59
53
  except Exception as e:
60
54
  logging.error(f"Request failed: {e}")
61
55
  return None
62
-
63
- def parse_html(self, html_content: str) -> BeautifulSoup:
64
- """
65
- Parse the provided HTML content using BeautifulSoup.
66
-
67
- Parameters:
68
- - html_content (str): The HTML content to parse.
69
-
70
- Returns:
71
- - BeautifulSoup: Parsed HTML content if successful, None otherwise.
72
- """
73
-
74
- try:
75
- soup = BeautifulSoup(html_content, "html.parser")
76
- return soup
77
-
78
- except Exception as e:
79
- logging.error(f"Failed to parse HTML content: {e}")
80
- return None
81
-
56
+
82
57
  def get_iframe(self, soup):
83
58
  """
84
59
  Extracts the source URL of the second iframe in the provided BeautifulSoup object.
@@ -107,7 +82,7 @@ class VideoSource:
107
82
  """
108
83
  content = self.make_request(url)
109
84
  if content:
110
- return self.parse_html(content)
85
+ return BeautifulSoup(content, "html.parser")
111
86
 
112
87
  return None
113
88
 
@@ -140,7 +115,7 @@ class VideoSource:
140
115
  logging.error("Failed to fetch HTML content.")
141
116
  return None
142
117
 
143
- soup = self.parse_html(html_content)
118
+ soup = BeautifulSoup(html_content, "html.parser")
144
119
  if not soup:
145
120
  logging.error("Failed to parse HTML content.")
146
121
  return None
@@ -190,5 +165,4 @@ class VideoSource:
190
165
 
191
166
  except Exception as e:
192
167
  logging.error(f"An error occurred: {e}")
193
- return None
194
-
168
+ return None
@@ -8,32 +8,32 @@ from urllib.parse import urlparse, parse_qs, urlencode, urlunparse
8
8
  # External libraries
9
9
  import httpx
10
10
  from bs4 import BeautifulSoup
11
+ from rich.console import Console
11
12
 
12
13
 
13
14
  # Internal utilities
14
- from StreamingCommunity.Util.headers import get_headers
15
- from StreamingCommunity.Util.console import console
16
- from StreamingCommunity.Util._jsonConfig import config_manager
15
+ from StreamingCommunity.Util.headers import get_userAgent
16
+ from StreamingCommunity.Util.config_json import config_manager
17
17
  from .Helper.Vixcloud.util import WindowVideo, WindowParameter, StreamsCollection
18
18
  from .Helper.Vixcloud.js_parser import JavaScriptParser
19
19
 
20
20
 
21
21
  # Variable
22
- max_timeout = config_manager.get_int("REQUESTS", "timeout")
22
+ MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
23
+ console = Console()
23
24
 
24
25
 
25
26
  class VideoSource:
26
- def __init__(self, site_name: str, is_series: bool):
27
+ def __init__(self, url: str, is_series: bool):
27
28
  """
28
29
  Initialize video source for streaming site.
29
30
 
30
31
  Args:
31
- site_name (str): Name of streaming site
32
- is_series (bool): Flag for series or movie content
32
+ - url (str): The URL of the streaming site.
33
+ - is_series (bool): Flag for series or movie content
33
34
  """
34
- self.headers = {'user-agent': get_headers()}
35
- self.base_name = site_name
36
- self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
35
+ self.headers = {'user-agent': get_userAgent()}
36
+ self.url = url
37
37
  self.is_series = is_series
38
38
 
39
39
  def setup(self, media_id: int):
@@ -61,13 +61,7 @@ class VideoSource:
61
61
  }
62
62
 
63
63
  try:
64
-
65
- # Make a request to get iframe source
66
- response = httpx.get(
67
- url=f"https://{self.base_name}.{self.domain}/iframe/{self.media_id}",
68
- params=params,
69
- timeout=max_timeout
70
- )
64
+ response = httpx.get(f"{self.url}/iframe/{self.media_id}", params=params, timeout=MAX_TIMEOUT)
71
65
  response.raise_for_status()
72
66
 
73
67
  # Parse response with BeautifulSoup to get iframe source
@@ -109,19 +103,8 @@ class VideoSource:
109
103
  """
110
104
  try:
111
105
  if self.iframe_src is not None:
112
-
113
- # Make a request to get content
114
- try:
115
- response = httpx.get(
116
- url=self.iframe_src,
117
- headers=self.headers,
118
- timeout=max_timeout
119
- )
120
- response.raise_for_status()
121
-
122
- except Exception as e:
123
- logging.error(f"Failed to get vixcloud contente with error: {e}")
124
- sys.exit(0)
106
+ response = httpx.get(self.iframe_src, headers=self.headers, timeout=MAX_TIMEOUT)
107
+ response.raise_for_status()
125
108
 
126
109
  # Parse response with BeautifulSoup to get content
127
110
  soup = BeautifulSoup(response.text, "html.parser")
@@ -141,7 +124,6 @@ class VideoSource:
141
124
  Returns:
142
125
  str: Fully constructed playlist URL with authentication parameters
143
126
  """
144
- # Initialize parameters dictionary
145
127
  params = {}
146
128
 
147
129
  # Add 'h' parameter if video quality is 1080p
@@ -168,70 +150,19 @@ class VideoSource:
168
150
  # Construct the new URL with updated query parameters
169
151
  return urlunparse(parsed_url._replace(query=query_string))
170
152
 
171
- def get_mp4(self, url_to_download: str, scws_id: str) -> list:
172
- """
173
- Generate download links for the specified resolutions from StreamingCommunity.
174
-
175
- Args:
176
- url_to_download (str): URL of the video page.
177
- scws_id (str): SCWS ID of the title.
178
-
179
- Returns:
180
- list: A list of video download URLs.
181
- """
182
- headers = {
183
- 'referer': url_to_download,
184
- 'user-agent': get_headers(),
185
- }
186
-
187
- # API request to get video details
188
- video_api_url = f'https://{self.base_name}.{self.domain}/api/video/{scws_id}'
189
- response = httpx.get(video_api_url, headers=headers)
190
-
191
- if response.status_code == 200:
192
- response_json = response.json()
193
-
194
- video_tracks = response_json.get('video_tracks', [])
195
- track = video_tracks[-1]
196
- console.print(f"[cyan]Available resolutions: [red]{[str(track['quality']) for track in video_tracks]}")
197
-
198
- # Request download link generation for each track
199
- download_response = httpx.post(
200
- url=f'https://{self.base_name}.{self.domain}/api/download/generate_link?scws_id={track["video_id"]}&rendition={track["quality"]}',
201
- headers={
202
- 'referer': url_to_download,
203
- 'user-agent': get_headers(),
204
- 'x-xsrf-token': config_manager.get("SITE", self.base_name)['extra']['x-xsrf-token']
205
- },
206
- cookies={
207
- 'streamingcommunity_session': config_manager.get("SITE", self.base_name)['extra']['streamingcommunity_session']
208
- }
209
- )
210
-
211
- if download_response.status_code == 200:
212
- return {'url': download_response.text, 'quality': track["quality"]}
213
-
214
- else:
215
- logging.error(f"Failed to generate link for resolution {track['quality']} (HTTP {download_response.status_code}).")
216
-
217
- else:
218
- logging.error(f"Error fetching video API URL (HTTP {response.status_code}).")
219
- return []
220
-
221
153
 
222
154
  class VideoSourceAnime(VideoSource):
223
- def __init__(self, site_name: str):
155
+ def __init__(self, url: str):
224
156
  """
225
157
  Initialize anime-specific video source.
226
158
 
227
159
  Args:
228
- site_name (str): Name of anime streaming site
160
+ - url (str): The URL of the streaming site.
229
161
 
230
162
  Extends base VideoSource with anime-specific initialization
231
163
  """
232
- self.headers = {'user-agent': get_headers()}
233
- self.base_name = site_name
234
- self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
164
+ self.headers = {'user-agent': get_userAgent()}
165
+ self.url = url
235
166
  self.src_mp4 = None
236
167
 
237
168
  def get_embed(self, episode_id: int):
@@ -245,12 +176,7 @@ class VideoSourceAnime(VideoSource):
245
176
  str: Parsed script content
246
177
  """
247
178
  try:
248
-
249
- response = httpx.get(
250
- url=f"https://www.{self.base_name}.{self.domain}/embed-url/{episode_id}",
251
- headers=self.headers,
252
- timeout=max_timeout
253
- )
179
+ response = httpx.get(f"{self.url}/embed-url/{episode_id}", headers=self.headers, timeout=MAX_TIMEOUT)
254
180
  response.raise_for_status()
255
181
 
256
182
  # Extract and clean embed URL
@@ -3,8 +3,12 @@
3
3
  from urllib.parse import quote_plus
4
4
 
5
5
 
6
+ # External library
7
+ from rich.console import Console
8
+ from rich.prompt import Prompt
9
+
10
+
6
11
  # Internal utilities
7
- from StreamingCommunity.Util.console import console, msg
8
12
  from StreamingCommunity.Api.Template import get_select_title
9
13
 
10
14
 
@@ -21,6 +25,9 @@ _deprecate = False
21
25
  _priority = 2
22
26
  _engineDownload = "tor"
23
27
 
28
+ console = Console()
29
+ msg = Prompt()
30
+
24
31
 
25
32
  def search(string_to_search: str = None, get_onylDatabase: bool = False):
26
33
  """
@@ -1,14 +1,16 @@
1
1
  # 02.07.24
2
2
 
3
+ import sys
4
+
3
5
  # External libraries
4
6
  import httpx
5
7
  from bs4 import BeautifulSoup
8
+ from rich.console import Console
6
9
 
7
10
 
8
11
  # Internal utilities
9
- from StreamingCommunity.Util.console import console
10
- from StreamingCommunity.Util._jsonConfig import config_manager
11
- from StreamingCommunity.Util.headers import get_headers
12
+ from StreamingCommunity.Util.config_json import config_manager
13
+ from StreamingCommunity.Util.headers import get_userAgent
12
14
  from StreamingCommunity.Util.table import TVShowManager
13
15
 
14
16
 
@@ -19,10 +21,10 @@ from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
19
21
 
20
22
 
21
23
  # Variable
24
+ console = Console()
22
25
  media_search_manager = MediaManager()
23
26
  table_show_manager = TVShowManager()
24
27
  max_timeout = config_manager.get_int("REQUESTS", "timeout")
25
- disable_searchDomain = config_manager.get_bool("DEFAULT", "disable_searchDomain")
26
28
 
27
29
 
28
30
  def title_search(word_to_search: str) -> int:
@@ -38,20 +40,19 @@ def title_search(word_to_search: str) -> int:
38
40
  media_search_manager.clear()
39
41
  table_show_manager.clear()
40
42
 
41
- # Find new domain if prev dont work
42
- domain_to_use = site_constant.DOMAIN_NOW
43
+ # Check if domain is working
44
+ domain_to_use, base_url = search_domain(site_constant.SITE_NAME, site_constant.FULL_URL)
45
+
46
+ if domain_to_use is None or base_url is None:
47
+ console.log("[bold red]Error: Unable to determine valid domain or base URL.[/bold red]")
48
+ console.print("[yellow]The service might be temporarily unavailable or the domain may have changed.[/yellow]")
49
+ sys.exit(1)
43
50
 
44
- if not disable_searchDomain:
45
- domain_to_use, base_url = search_domain(site_constant.SITE_NAME, f"https://{site_constant.SITE_NAME}.{site_constant.DOMAIN_NOW}")
51
+ search_url = f"{site_constant.FULL_URL}/search/{word_to_search}/1/"
52
+ console.print(f"[cyan]Search url: [yellow]{search_url}")
46
53
 
47
- # Construct the full site URL and load the search page
48
54
  try:
49
- response = httpx.get(
50
- url=f"https://{site_constant.SITE_NAME}.{domain_to_use}/search/{word_to_search}/1/",
51
- headers={'user-agent': get_headers()},
52
- follow_redirects=True,
53
- timeout=max_timeout
54
- )
55
+ response = httpx.get(search_url, headers={'user-agent': get_userAgent()}, timeout=max_timeout, follow_redirects=True)
55
56
  response.raise_for_status()
56
57
 
57
58
  except Exception as e:
@@ -6,13 +6,13 @@ import os
6
6
  # External libraries
7
7
  import httpx
8
8
  from bs4 import BeautifulSoup
9
+ from rich.console import Console
9
10
 
10
11
 
11
12
  # Internal utilities
12
- from StreamingCommunity.Util.console import console
13
13
  from StreamingCommunity.Util.os import os_manager
14
14
  from StreamingCommunity.Util.message import start_message
15
- from StreamingCommunity.Util.headers import get_headers
15
+ from StreamingCommunity.Util.headers import get_userAgent
16
16
  from StreamingCommunity.Lib.Downloader import TOR_downloader
17
17
 
18
18
 
@@ -21,6 +21,9 @@ from StreamingCommunity.Api.Template.config_loader import site_constant
21
21
  from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
22
22
 
23
23
 
24
+ # Variable
25
+ console = Console()
26
+
24
27
 
25
28
  def download_title(select_title: MediaItem):
26
29
  """
@@ -41,11 +44,10 @@ def download_title(select_title: MediaItem):
41
44
  os_manager.create_path(mp4_path)
42
45
 
43
46
  # Make request to page with magnet
44
- full_site_name = f"{site_constant.SITE_NAME}.{site_constant.DOMAIN_NOW}"
45
47
  response = httpx.get(
46
- url="https://" + full_site_name + select_title.url,
48
+ url=f"{site_constant.FULL_URL}{select_title.url}",
47
49
  headers={
48
- 'user-agent': get_headers()
50
+ 'user-agent': get_userAgent()
49
51
  },
50
52
  follow_redirects=True
51
53
  )
@@ -4,8 +4,12 @@ import sys
4
4
  import subprocess
5
5
 
6
6
 
7
+ # External library
8
+ from rich.console import Console
9
+ from rich.prompt import Prompt
10
+
11
+
7
12
  # Internal utilities
8
- from StreamingCommunity.Util.console import console, msg
9
13
  from StreamingCommunity.Api.Template import get_select_title
10
14
  from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
11
15
 
@@ -23,6 +27,9 @@ _deprecate = False
23
27
  _priority = 2
24
28
  _engineDownload = "mp4"
25
29
 
30
+ msg = Prompt()
31
+ console = Console()
32
+
26
33
 
27
34
  def search(string_to_search: str = None, get_onylDatabase: bool = False):
28
35
 
@@ -34,7 +41,7 @@ def search(string_to_search: str = None, get_onylDatabase: bool = False):
34
41
  # Chiedi la scelta all'utente con il bot Telegram
35
42
  string_to_search = bot.ask(
36
43
  "key_search",
37
- f"Inserisci la parola da cercare\noppure 🔙 back per tornare alla scelta: ",
44
+ f"Inserisci la parola da cercare\noppure back per tornare alla scelta: ",
38
45
  None
39
46
  )
40
47
 
@@ -5,8 +5,12 @@ import logging
5
5
  from typing import Tuple
6
6
 
7
7
 
8
+ # External library
9
+ from rich.console import Console
10
+ from rich.prompt import Prompt
11
+
12
+
8
13
  # Internal utilities
9
- from StreamingCommunity.Util.console import console, msg
10
14
  from StreamingCommunity.Util.os import os_manager
11
15
  from StreamingCommunity.Util.message import start_message
12
16
  from StreamingCommunity.Lib.Downloader import MP4_downloader
@@ -25,6 +29,8 @@ from StreamingCommunity.Api.Player.vixcloud import VideoSourceAnime
25
29
 
26
30
 
27
31
  # Variable
32
+ console = Console()
33
+ msg = Prompt()
28
34
  KILL_HANDLER = bool(False)
29
35
 
30
36
 
@@ -85,6 +91,7 @@ def download_episode(index_select: int, scrape_serie: ScrapeSerieAnime, video_so
85
91
 
86
92
  else:
87
93
  logging.error(f"Skip index: {index_select} cant find info with api.")
94
+ return None, True
88
95
 
89
96
 
90
97
  def download_series(select_title: MediaItem):
@@ -100,8 +107,8 @@ def download_series(select_title: MediaItem):
100
107
  if site_constant.TELEGRAM_BOT:
101
108
  bot = get_bot_instance()
102
109
 
103
- scrape_serie = ScrapeSerieAnime(site_constant.SITE_NAME)
104
- video_source = VideoSourceAnime(site_constant.SITE_NAME)
110
+ scrape_serie = ScrapeSerieAnime(site_constant.FULL_URL)
111
+ video_source = VideoSourceAnime(site_constant.FULL_URL)
105
112
 
106
113
  # Set up video source
107
114
  scrape_serie.setup(None, select_title.id, select_title.slug)
@@ -160,8 +167,8 @@ def download_film(select_title: MediaItem):
160
167
  """
161
168
 
162
169
  # Init class
163
- scrape_serie = ScrapeSerieAnime(site_constant.SITE_NAME)
164
- video_source = VideoSourceAnime(site_constant.SITE_NAME)
170
+ scrape_serie = ScrapeSerieAnime(site_constant.FULL_URL)
171
+ video_source = VideoSourceAnime(site_constant.FULL_URL)
165
172
 
166
173
  # Set up video source
167
174
  scrape_serie.setup(None, select_title.id, select_title.slug)
@@ -1,16 +1,17 @@
1
1
  # 10.12.23
2
2
 
3
+ import sys
3
4
  import logging
4
5
 
5
6
 
6
7
  # External libraries
7
8
  import httpx
8
9
  from bs4 import BeautifulSoup
10
+ from rich.console import Console
9
11
 
10
12
 
11
13
  # Internal utilities
12
- from StreamingCommunity.Util.console import console
13
- from StreamingCommunity.Util._jsonConfig import config_manager
14
+ from StreamingCommunity.Util.config_json import config_manager
14
15
  from StreamingCommunity.Util.table import TVShowManager
15
16
  from StreamingCommunity.TelegramHelp.telegram_bot import get_bot_instance
16
17
 
@@ -22,10 +23,10 @@ from StreamingCommunity.Api.Template.Class.SearchType import MediaManager
22
23
 
23
24
 
24
25
  # Variable
26
+ console = Console()
25
27
  media_search_manager = MediaManager()
26
28
  table_show_manager = TVShowManager()
27
29
  max_timeout = config_manager.get_int("REQUESTS", "timeout")
28
- disable_searchDomain = config_manager.get_bool("DEFAULT", "disable_searchDomain")
29
30
 
30
31
 
31
32
  def get_token(site_name: str, domain: str) -> dict:
@@ -42,7 +43,7 @@ def get_token(site_name: str, domain: str) -> dict:
42
43
 
43
44
  # Send a GET request to the specified URL composed of the site name and domain
44
45
  response = httpx.get(
45
- url=f"https://www.{site_name}.{domain}",
46
+ url=site_constant.FULL_URL,
46
47
  timeout=max_timeout
47
48
  )
48
49
  response.raise_for_status()
@@ -109,11 +110,13 @@ def title_search(title: str) -> int:
109
110
  media_search_manager.clear()
110
111
  table_show_manager.clear()
111
112
 
112
- # Get token and session value from configuration
113
- domain_to_use = site_constant.DOMAIN_NOW
114
-
115
- if not disable_searchDomain:
116
- domain_to_use, base_url = search_domain(site_constant.SITE_NAME, f"https://www.{site_constant.SITE_NAME}.{site_constant.DOMAIN_NOW}")
113
+ # Check if domain is working
114
+ domain_to_use, base_url = search_domain(site_constant.SITE_NAME, site_constant.FULL_URL)
115
+
116
+ if domain_to_use is None or base_url is None:
117
+ console.print("[bold red]Error: Unable to determine valid domain or base URL.[/bold red]")
118
+ console.print("[yellow]The service might be temporarily unavailable or the domain may have changed.[/yellow]")
119
+ sys.exit(1)
117
120
 
118
121
  data = get_token(site_constant.SITE_NAME, domain_to_use)
119
122
 
@@ -138,7 +141,7 @@ def title_search(title: str) -> int:
138
141
  # Send a POST request to the API endpoint for live search
139
142
  try:
140
143
  response = httpx.post(
141
- url=f'https://www.{site_constant.SITE_NAME}.{domain_to_use}/livesearch',
144
+ f'{site_constant.FULL_URL}/livesearch',
142
145
  cookies=cookies,
143
146
  headers=headers,
144
147
  json=json_data,
@@ -169,6 +172,7 @@ def title_search(title: str) -> int:
169
172
  })
170
173
 
171
174
  if site_constant.TELEGRAM_BOT:
175
+
172
176
  # Crea una stringa formattata per ogni scelta con numero
173
177
  choice_text = f"{len(choices)} - {dict_title.get('name')} ({dict_title.get('type')}) - Episodi: {dict_title.get('episodes_count')}"
174
178
  choices.append(choice_text)