StreamingCommunity 3.0.0__py3-none-any.whl → 3.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (29) hide show
  1. StreamingCommunity/Api/Player/hdplayer.py +65 -0
  2. StreamingCommunity/Api/Player/mixdrop.py +145 -0
  3. StreamingCommunity/Api/Site/1337xx/site.py +5 -2
  4. StreamingCommunity/Api/Site/altadefinizione/site.py +1 -1
  5. StreamingCommunity/Api/Site/animeunity/site.py +2 -1
  6. StreamingCommunity/Api/Site/animeunity/util/ScrapeSerie.py +22 -12
  7. StreamingCommunity/Api/Site/animeworld/site.py +1 -1
  8. StreamingCommunity/Api/Site/ddlstreamitaly/site.py +1 -1
  9. StreamingCommunity/Api/Site/guardaserie/site.py +1 -1
  10. StreamingCommunity/Api/Site/raiplay/site.py +2 -2
  11. StreamingCommunity/Api/Site/streamingcommunity/series.py +2 -2
  12. StreamingCommunity/Api/Site/streamingcommunity/site.py +1 -1
  13. StreamingCommunity/Api/Site/streamingwatch/__init__.py +95 -0
  14. StreamingCommunity/Api/Site/{cb01new → streamingwatch}/film.py +12 -13
  15. StreamingCommunity/Api/Site/streamingwatch/series.py +160 -0
  16. StreamingCommunity/Api/Site/streamingwatch/site.py +111 -0
  17. StreamingCommunity/Api/Site/streamingwatch/util/ScrapeSerie.py +118 -0
  18. StreamingCommunity/Lib/Proxies/proxy.py +232 -0
  19. StreamingCommunity/Upload/version.py +1 -1
  20. StreamingCommunity/Util/config_json.py +11 -13
  21. {streamingcommunity-3.0.0.dist-info → streamingcommunity-3.0.2.dist-info}/METADATA +16 -2
  22. {streamingcommunity-3.0.0.dist-info → streamingcommunity-3.0.2.dist-info}/RECORD +26 -22
  23. {streamingcommunity-3.0.0.dist-info → streamingcommunity-3.0.2.dist-info}/WHEEL +1 -1
  24. StreamingCommunity/Api/Player/maxstream.py +0 -140
  25. StreamingCommunity/Api/Site/cb01new/__init__.py +0 -71
  26. StreamingCommunity/Api/Site/cb01new/site.py +0 -81
  27. {streamingcommunity-3.0.0.dist-info → streamingcommunity-3.0.2.dist-info}/entry_points.txt +0 -0
  28. {streamingcommunity-3.0.0.dist-info → streamingcommunity-3.0.2.dist-info}/licenses/LICENSE +0 -0
  29. {streamingcommunity-3.0.0.dist-info → streamingcommunity-3.0.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,65 @@
1
+ # 29.04.25
2
+
3
+ import re
4
+
5
+
6
+ # External library
7
+ import httpx
8
+ from bs4 import BeautifulSoup
9
+
10
+
11
+ # Internal utilities
12
+ from StreamingCommunity.Util.headers import get_headers
13
+ from StreamingCommunity.Util.config_json import config_manager
14
+
15
+
16
+ # Variable
17
+ MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
18
+
19
+
20
+ class VideoSource:
21
+ def __init__(self, proxy=None):
22
+ self.client = httpx.Client(headers=get_headers(), timeout=MAX_TIMEOUT, proxy=proxy)
23
+
24
+ def extractLinkHdPlayer(self, response):
25
+ """Extract iframe source from the page."""
26
+ soup = BeautifulSoup(response.content, 'html.parser')
27
+ iframes = soup.find_all("iframe")
28
+ if iframes:
29
+ return iframes[0].get('data-lazy-src')
30
+ return None
31
+
32
+ def get_m3u8_url(self, page_url):
33
+ """
34
+ Extract m3u8 URL from hdPlayer page.
35
+ """
36
+ try:
37
+ # Get the page content
38
+ response = self.client.get(page_url)
39
+
40
+ # Extract HDPlayer iframe URL
41
+ iframe_url = self.extractLinkHdPlayer(response)
42
+ if not iframe_url:
43
+ return None
44
+
45
+ # Get HDPlayer page content
46
+ response_hdplayer = self.client.get(iframe_url)
47
+ if response_hdplayer.status_code != 200:
48
+ return None
49
+
50
+ soup = BeautifulSoup(response_hdplayer.text, 'html.parser')
51
+
52
+ # Find m3u8 URL in scripts
53
+ for script in soup.find_all("script"):
54
+ match = re.search(r'sources:\s*\[\{\s*file:\s*"([^"]+)"', script.text)
55
+ if match:
56
+ return match.group(1)
57
+
58
+ return None
59
+
60
+ except Exception as e:
61
+ print(f"Error in HDPlayer: {str(e)}")
62
+ return None
63
+
64
+ finally:
65
+ self.client.close()
@@ -0,0 +1,145 @@
1
+ # 05.07.24
2
+
3
+ import re
4
+ import logging
5
+
6
+
7
+ # External libraries
8
+ import httpx
9
+ import jsbeautifier
10
+ from bs4 import BeautifulSoup
11
+
12
+
13
+ # Internal utilities
14
+ from StreamingCommunity.Util.config_json import config_manager
15
+ from StreamingCommunity.Util.headers import get_userAgent
16
+
17
+
18
+ # Variable
19
+ MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
20
+
21
+
22
+ class VideoSource:
23
+ STAYONLINE_BASE_URL = "https://stayonline.pro"
24
+ MIXDROP_BASE_URL = "https://mixdrop.sb"
25
+
26
+ def __init__(self, url: str):
27
+ self.url = url
28
+ self.redirect_url: str | None = None
29
+ self._init_headers()
30
+
31
+ def _init_headers(self) -> None:
32
+ """Initialize the base headers used for requests."""
33
+ self.headers = {
34
+ 'origin': self.STAYONLINE_BASE_URL,
35
+ 'user-agent': get_userAgent(),
36
+ }
37
+
38
+ def _get_mixdrop_headers(self) -> dict:
39
+ """Get headers specifically for MixDrop requests."""
40
+ return {
41
+ 'referer': 'https://mixdrop.club/',
42
+ 'user-agent': get_userAgent()
43
+ }
44
+
45
+ def get_redirect_url(self) -> str:
46
+ """Extract the stayonline redirect URL from the initial page."""
47
+ try:
48
+ response = httpx.get(self.url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
49
+ response.raise_for_status()
50
+ soup = BeautifulSoup(response.text, "html.parser")
51
+
52
+ for link in soup.find_all('a'):
53
+ href = link.get('href')
54
+ if href and 'stayonline' in href:
55
+ self.redirect_url = href
56
+ logging.info(f"Redirect URL: {self.redirect_url}")
57
+ return self.redirect_url
58
+
59
+ raise ValueError("Stayonline URL not found")
60
+
61
+ except Exception as e:
62
+ logging.error(f"Error getting redirect URL: {e}")
63
+ raise
64
+
65
+ def get_link_id(self) -> str:
66
+ """Extract the link ID from the redirect page."""
67
+ if not self.redirect_url:
68
+ raise ValueError("Redirect URL not set. Call get_redirect_url first.")
69
+
70
+ try:
71
+ response = httpx.get(self.redirect_url, headers=self.headers, follow_redirects=True, timeout=MAX_TIMEOUT)
72
+ response.raise_for_status()
73
+ soup = BeautifulSoup(response.text, "html.parser")
74
+
75
+ for script in soup.find_all('script'):
76
+ match = re.search(r'var\s+linkId\s*=\s*"([^"]+)"', script.text)
77
+ if match:
78
+ return match.group(1)
79
+
80
+ raise ValueError("LinkId not found")
81
+
82
+ except Exception as e:
83
+ logging.error(f"Error getting link ID: {e}")
84
+ raise
85
+
86
+ def get_final_url(self, link_id: str) -> str:
87
+ """Get the final URL using the link ID."""
88
+ try:
89
+ self.headers['referer'] = f'{self.STAYONLINE_BASE_URL}/l/{link_id}/'
90
+ data = {'id': link_id, 'ref': ''}
91
+
92
+ response = httpx.post(f'{self.STAYONLINE_BASE_URL}/ajax/linkView.php', headers=self.headers, data=data, timeout=MAX_TIMEOUT)
93
+ response.raise_for_status()
94
+ return response.json()['data']['value']
95
+
96
+ except Exception as e:
97
+ logging.error(f"Error getting final URL: {e}")
98
+ raise
99
+
100
+ def _extract_video_id(self, final_url: str) -> str:
101
+ """Extract video ID from the final URL."""
102
+ parts = final_url.split('/')
103
+ if len(parts) < 5:
104
+ raise ValueError("Invalid final URL format")
105
+ return parts[4]
106
+
107
+ def _extract_delivery_url(self, script_text: str) -> str:
108
+ """Extract delivery URL from beautified JavaScript."""
109
+ beautified = jsbeautifier.beautify(script_text)
110
+ for line in beautified.splitlines():
111
+ if 'MDCore.wurl' in line:
112
+ url = line.split('= ')[1].strip('"').strip(';')
113
+ return f"https:{url}"
114
+ raise ValueError("Delivery URL not found in script")
115
+
116
+ def get_playlist(self) -> str:
117
+ """
118
+ Execute the entire flow to obtain the final video URL.
119
+ Returns:
120
+ str: The final video delivery URL
121
+ """
122
+ self.get_redirect_url()
123
+ link_id = self.get_link_id()
124
+
125
+ final_url = self.get_final_url(link_id)
126
+ video_id = self._extract_video_id(final_url)
127
+
128
+ response = httpx.get(
129
+ f'{self.MIXDROP_BASE_URL}/e/{video_id}',
130
+ headers=self._get_mixdrop_headers(),
131
+ timeout=MAX_TIMEOUT
132
+ )
133
+ response.raise_for_status()
134
+ soup = BeautifulSoup(response.text, "html.parser")
135
+
136
+ script_text = next(
137
+ (script.text for script in soup.find_all('script')
138
+ if "eval" in str(script.text)),
139
+ None
140
+ )
141
+
142
+ if not script_text:
143
+ raise ValueError("Required script not found")
144
+
145
+ return self._extract_delivery_url(script_text).replace('"', '')
@@ -52,13 +52,13 @@ def title_search(query: str) -> int:
52
52
  response.raise_for_status()
53
53
 
54
54
  except Exception as e:
55
- console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
55
+ console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
56
56
  return 0
57
57
 
58
58
  # Create soup and find table
59
59
  soup = BeautifulSoup(response.text, "html.parser")
60
60
 
61
- for tr in soup.find_all('tr'):
61
+ for i, tr in enumerate(soup.find_all('tr')):
62
62
  try:
63
63
 
64
64
  title_info = {
@@ -72,6 +72,9 @@ def title_search(query: str) -> int:
72
72
  }
73
73
  media_search_manager.add_media(title_info)
74
74
 
75
+ if i == 20:
76
+ break
77
+
75
78
  except Exception as e:
76
79
  print(f"Error parsing a film entry: {e}")
77
80
 
@@ -55,7 +55,7 @@ def title_search(query: str) -> int:
55
55
  response.raise_for_status()
56
56
 
57
57
  except Exception as e:
58
- console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
58
+ console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
59
59
  if site_constant.TELEGRAM_BOT:
60
60
  bot.send_message(f"ERRORE\n\nErrore nella richiesta di ricerca:\n\n{e}", None)
61
61
  return 0
@@ -119,7 +119,8 @@ def title_search(query: str) -> int:
119
119
  process_results(response1.json()['records'], seen_titles, media_search_manager, choices)
120
120
 
121
121
  except Exception as e:
122
- console.print(f"Site: {site_constant.SITE_NAME}, livesearch error: {e}")
122
+ console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
123
+ return 0
123
124
 
124
125
  # Second API call - archivio
125
126
  try:
@@ -68,22 +68,32 @@ class ScrapeSerieAnime:
68
68
  Fetch all episodes data at once and cache it
69
69
  """
70
70
  try:
71
+ all_episodes = []
71
72
  count = self.get_count_episodes()
72
73
  if not count:
73
74
  return
74
-
75
- response = httpx.get(
76
- url=f"{self.url}/info_api/{self.media_id}/1",
77
- params={
78
- "start_range": 1,
79
- "end_range": count
80
- },
81
- headers=self.headers,
82
- timeout=max_timeout
83
- )
84
- response.raise_for_status()
85
75
 
86
- self.episodes_cache = response.json()["episodes"]
76
+ # Fetch episodes
77
+ start_range = 1
78
+ while start_range <= count:
79
+ end_range = min(start_range + 119, count)
80
+
81
+ response = httpx.get(
82
+ url=f"{self.url}/info_api/{self.media_id}/1",
83
+ params={
84
+ "start_range": start_range,
85
+ "end_range": end_range
86
+ },
87
+ headers=self.headers,
88
+ timeout=max_timeout
89
+ )
90
+ response.raise_for_status()
91
+
92
+ chunk_episodes = response.json().get("episodes", [])
93
+ all_episodes.extend(chunk_episodes)
94
+ start_range = end_range + 1
95
+
96
+ self.episodes_cache = all_episodes
87
97
  except Exception as e:
88
98
  logging.error(f"Error fetching all episodes: {e}")
89
99
  self.episodes_cache = None
@@ -78,7 +78,7 @@ def title_search(query: str) -> int:
78
78
  )
79
79
 
80
80
  except Exception as e:
81
- console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
81
+ console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
82
82
  return 0
83
83
 
84
84
  # Create soup istance
@@ -53,7 +53,7 @@ def title_search(query: str) -> int:
53
53
  response.raise_for_status()
54
54
 
55
55
  except Exception as e:
56
- console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
56
+ console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
57
57
  return 0
58
58
 
59
59
  # Create soup and find table
@@ -54,7 +54,7 @@ def title_search(query: str) -> int:
54
54
  response.raise_for_status()
55
55
 
56
56
  except Exception as e:
57
- console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
57
+ console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
58
58
  return 0
59
59
 
60
60
  # Create soup and find table
@@ -26,7 +26,7 @@ console = Console()
26
26
  media_search_manager = MediaManager()
27
27
  table_show_manager = TVShowManager()
28
28
  max_timeout = config_manager.get_int("REQUESTS", "timeout")
29
- MAX_THREADS = 4
29
+ MAX_THREADS = 12
30
30
 
31
31
 
32
32
  def determine_media_type(title):
@@ -134,7 +134,7 @@ def title_search(query: str) -> int:
134
134
  response.raise_for_status()
135
135
 
136
136
  except Exception as e:
137
- console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
137
+ console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
138
138
  return 0
139
139
 
140
140
  # Limit to only 15 results for performance
@@ -155,7 +155,7 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
155
155
 
156
156
  # Init class
157
157
  video_source = VideoSource(site_constant.FULL_URL, True, select_season.id)
158
- scrape_serie = GetSerieInfo(site_constant.FULL_URL, select_season.id, select_season.name)
158
+ scrape_serie = GetSerieInfo(site_constant.FULL_URL, select_season.id, select_season.slug)
159
159
 
160
160
  # Collect information about season
161
161
  scrape_serie.getNumberSeason()
@@ -219,4 +219,4 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
219
219
  # Get script_id
220
220
  script_id = TelegramSession.get_session()
221
221
  if script_id != "unknown":
222
- TelegramSession.deleteScriptId(script_id)
222
+ TelegramSession.deleteScriptId(script_id)
@@ -55,7 +55,7 @@ def title_search(query: str) -> int:
55
55
  response.raise_for_status()
56
56
 
57
57
  except Exception as e:
58
- console.print(f"Site: {site_constant.SITE_NAME}, request search error: {e}")
58
+ console.print(f"[red]Site: {site_constant.SITE_NAME}, request search error: {e}")
59
59
  if site_constant.TELEGRAM_BOT:
60
60
  bot.send_message(f"ERRORE\n\nErrore nella richiesta di ricerca:\n\n{e}", None)
61
61
  return 0
@@ -0,0 +1,95 @@
1
+ # 29.04.25
2
+
3
+ # External library
4
+ from rich.console import Console
5
+ from rich.prompt import Prompt
6
+
7
+
8
+ # Internal utilities
9
+ from StreamingCommunity.Api.Template import get_select_title
10
+ from StreamingCommunity.Lib.Proxies.proxy import ProxyFinder
11
+ from StreamingCommunity.Api.Template.config_loader import site_constant
12
+ from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
13
+
14
+
15
+ # Logic class
16
+ from .site import title_search, table_show_manager, media_search_manager
17
+ from .film import download_film
18
+ from .series import download_series
19
+
20
+
21
+ # Variable
22
+ indice = 8
23
+ _useFor = "film_serie"
24
+ _priority = 10 # !!! MOLTO LENTO
25
+ _engineDownload = "hls"
26
+
27
+ msg = Prompt()
28
+ console = Console()
29
+
30
+
31
+ def get_user_input(string_to_search: str = None):
32
+ """
33
+ Asks the user to input a search term.
34
+ Handles both Telegram bot input and direct input.
35
+ """
36
+ string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
37
+ return string_to_search
38
+
39
+ def process_search_result(select_title, selections=None, proxy=None):
40
+ """
41
+ Handles the search result and initiates the download for either a film or series.
42
+
43
+ Parameters:
44
+ select_title (MediaItem): The selected media item
45
+ selections (dict, optional): Dictionary containing selection inputs that bypass manual input
46
+ {'season': season_selection, 'episode': episode_selection}
47
+ """
48
+ if select_title.type == 'tv':
49
+ season_selection = None
50
+ episode_selection = None
51
+
52
+ if selections:
53
+ season_selection = selections.get('season')
54
+ episode_selection = selections.get('episode')
55
+
56
+ download_series(select_title, season_selection, episode_selection, proxy)
57
+
58
+ else:
59
+ download_film(select_title, proxy)
60
+
61
+ def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
62
+ """
63
+ Main function of the application for search.
64
+
65
+ Parameters:
66
+ string_to_search (str, optional): String to search for
67
+ get_onlyDatabase (bool, optional): If True, return only the database object
68
+ direct_item (dict, optional): Direct item to process (bypass search)
69
+ selections (dict, optional): Dictionary containing selection inputs that bypass manual input
70
+ {'season': season_selection, 'episode': episode_selection}
71
+ """
72
+ if direct_item:
73
+ select_title = MediaItem(**direct_item)
74
+ process_search_result(select_title, selections) # DONT SUPPORT PROXY FOR NOW
75
+ return
76
+
77
+ if string_to_search is None:
78
+ string_to_search = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constant.SITE_NAME}").strip()
79
+
80
+ finder = ProxyFinder(url=f"{site_constant.FULL_URL}/serie/euphoria/")
81
+ proxy, response_serie, _ = finder.find_fast_proxy()
82
+ len_database = title_search(string_to_search, [proxy, response_serie])
83
+
84
+ # If only the database is needed, return the manager
85
+ if get_onlyDatabase:
86
+ return media_search_manager
87
+
88
+ if len_database > 0:
89
+ select_title = get_select_title(table_show_manager, media_search_manager)
90
+ process_search_result(select_title, selections, proxy)
91
+
92
+ else:
93
+ # If no results are found, ask again
94
+ console.print(f"\n[red]Nothing matching was found for[white]: [purple]{string_to_search}")
95
+ search()
@@ -1,4 +1,4 @@
1
- # 03.07.24
1
+ # 29.04.25
2
2
 
3
3
  import os
4
4
 
@@ -19,19 +19,20 @@ from StreamingCommunity.Api.Template.Class.SearchType import MediaItem
19
19
 
20
20
 
21
21
  # Player
22
- from StreamingCommunity.Api.Player.maxstream import VideoSource
22
+ from StreamingCommunity.Api.Player.hdplayer import VideoSource
23
23
 
24
24
 
25
25
  # Variable
26
26
  console = Console()
27
27
 
28
28
 
29
- def download_film(select_title: MediaItem) -> str:
29
+ def download_film(select_title: MediaItem, proxy) -> str:
30
30
  """
31
- Downloads a film using the provided obj.
31
+ Downloads a film using the provided film ID, title name, and domain.
32
32
 
33
33
  Parameters:
34
- - select_title (MediaItem): The media item to be downloaded. This should be an instance of the MediaItem class, containing attributes like `name` and `url`.
34
+ - domain (str): The domain of the site
35
+ - version (str): Version of site.
35
36
 
36
37
  Return:
37
38
  - str: output path
@@ -39,19 +40,17 @@ def download_film(select_title: MediaItem) -> str:
39
40
  start_message()
40
41
  console.print(f"[bold yellow]Download:[/bold yellow] [red]{site_constant.SITE_NAME}[/red] → [cyan]{select_title.name}[/cyan] \n")
41
42
 
42
- # Setup api manger
43
- video_source = VideoSource(select_title.url)
43
+ # Get master playlists
44
+ video_source = VideoSource(proxy)
45
+ master_playlist = video_source.get_m3u8_url(select_title.url)
44
46
 
45
- # Define output path
46
- title_name = os_manager.get_sanitize_file(select_title.name) +".mp4"
47
+ # Define the filename and path for the downloaded film
48
+ title_name = os_manager.get_sanitize_file(select_title.name) + ".mp4"
47
49
  mp4_path = os.path.join(site_constant.MOVIE_FOLDER, title_name.replace(".mp4", ""))
48
50
 
49
- # Get m3u8 master playlist
50
- master_playlist = video_source.get_playlist()
51
-
52
51
  # Download the film using the m3u8 playlist, and output filename
53
52
  r_proc = HLS_Downloader(
54
- m3u8_url=master_playlist,
53
+ m3u8_url=master_playlist,
55
54
  output_path=os.path.join(mp4_path, title_name)
56
55
  ).start()
57
56