StreamingCommunity 1.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of StreamingCommunity might be problematic. Click here for more details.

Files changed (97) hide show
  1. StreamingCommunity/Src/Api/Player/Helper/Vixcloud/js_parser.py +140 -0
  2. StreamingCommunity/Src/Api/Player/Helper/Vixcloud/util.py +166 -0
  3. StreamingCommunity/Src/Api/Player/ddl.py +89 -0
  4. StreamingCommunity/Src/Api/Player/maxstream.py +151 -0
  5. StreamingCommunity/Src/Api/Player/supervideo.py +194 -0
  6. StreamingCommunity/Src/Api/Player/vixcloud.py +212 -0
  7. StreamingCommunity/Src/Api/Site/1337xx/__init__.py +50 -0
  8. StreamingCommunity/Src/Api/Site/1337xx/costant.py +15 -0
  9. StreamingCommunity/Src/Api/Site/1337xx/site.py +84 -0
  10. StreamingCommunity/Src/Api/Site/1337xx/title.py +66 -0
  11. StreamingCommunity/Src/Api/Site/altadefinizione/__init__.py +50 -0
  12. StreamingCommunity/Src/Api/Site/altadefinizione/costant.py +15 -0
  13. StreamingCommunity/Src/Api/Site/altadefinizione/film.py +69 -0
  14. StreamingCommunity/Src/Api/Site/altadefinizione/site.py +86 -0
  15. StreamingCommunity/Src/Api/Site/animeunity/__init__.py +50 -0
  16. StreamingCommunity/Src/Api/Site/animeunity/anime.py +126 -0
  17. StreamingCommunity/Src/Api/Site/animeunity/costant.py +15 -0
  18. StreamingCommunity/Src/Api/Site/animeunity/film_serie.py +131 -0
  19. StreamingCommunity/Src/Api/Site/animeunity/site.py +165 -0
  20. StreamingCommunity/Src/Api/Site/animeunity/util/ScrapeSerie.py +97 -0
  21. StreamingCommunity/Src/Api/Site/bitsearch/__init__.py +51 -0
  22. StreamingCommunity/Src/Api/Site/bitsearch/costant.py +15 -0
  23. StreamingCommunity/Src/Api/Site/bitsearch/site.py +84 -0
  24. StreamingCommunity/Src/Api/Site/bitsearch/title.py +47 -0
  25. StreamingCommunity/Src/Api/Site/cb01new/__init__.py +51 -0
  26. StreamingCommunity/Src/Api/Site/cb01new/costant.py +15 -0
  27. StreamingCommunity/Src/Api/Site/cb01new/film.py +69 -0
  28. StreamingCommunity/Src/Api/Site/cb01new/site.py +74 -0
  29. StreamingCommunity/Src/Api/Site/ddlstreamitaly/Player/ScrapeSerie.py +83 -0
  30. StreamingCommunity/Src/Api/Site/ddlstreamitaly/__init__.py +57 -0
  31. StreamingCommunity/Src/Api/Site/ddlstreamitaly/costant.py +16 -0
  32. StreamingCommunity/Src/Api/Site/ddlstreamitaly/series.py +142 -0
  33. StreamingCommunity/Src/Api/Site/ddlstreamitaly/site.py +93 -0
  34. StreamingCommunity/Src/Api/Site/ddlstreamitaly/util/ScrapeSerie.py +83 -0
  35. StreamingCommunity/Src/Api/Site/guardaserie/Player/ScrapeSerie.py +110 -0
  36. StreamingCommunity/Src/Api/Site/guardaserie/__init__.py +52 -0
  37. StreamingCommunity/Src/Api/Site/guardaserie/costant.py +15 -0
  38. StreamingCommunity/Src/Api/Site/guardaserie/series.py +195 -0
  39. StreamingCommunity/Src/Api/Site/guardaserie/site.py +84 -0
  40. StreamingCommunity/Src/Api/Site/guardaserie/util/ScrapeSerie.py +110 -0
  41. StreamingCommunity/Src/Api/Site/mostraguarda/__init__.py +48 -0
  42. StreamingCommunity/Src/Api/Site/mostraguarda/costant.py +15 -0
  43. StreamingCommunity/Src/Api/Site/mostraguarda/film.py +94 -0
  44. StreamingCommunity/Src/Api/Site/piratebays/__init__.py +50 -0
  45. StreamingCommunity/Src/Api/Site/piratebays/costant.py +15 -0
  46. StreamingCommunity/Src/Api/Site/piratebays/site.py +89 -0
  47. StreamingCommunity/Src/Api/Site/piratebays/title.py +45 -0
  48. StreamingCommunity/Src/Api/Site/streamingcommunity/__init__.py +55 -0
  49. StreamingCommunity/Src/Api/Site/streamingcommunity/costant.py +15 -0
  50. StreamingCommunity/Src/Api/Site/streamingcommunity/film.py +70 -0
  51. StreamingCommunity/Src/Api/Site/streamingcommunity/series.py +203 -0
  52. StreamingCommunity/Src/Api/Site/streamingcommunity/site.py +126 -0
  53. StreamingCommunity/Src/Api/Site/streamingcommunity/util/ScrapeSerie.py +113 -0
  54. StreamingCommunity/Src/Api/Template/Class/SearchType.py +101 -0
  55. StreamingCommunity/Src/Api/Template/Util/__init__.py +5 -0
  56. StreamingCommunity/Src/Api/Template/Util/get_domain.py +137 -0
  57. StreamingCommunity/Src/Api/Template/Util/manage_ep.py +153 -0
  58. StreamingCommunity/Src/Api/Template/Util/recall_search.py +37 -0
  59. StreamingCommunity/Src/Api/Template/__init__.py +3 -0
  60. StreamingCommunity/Src/Api/Template/site.py +87 -0
  61. StreamingCommunity/Src/Lib/Downloader/HLS/downloader.py +968 -0
  62. StreamingCommunity/Src/Lib/Downloader/HLS/proxyes.py +110 -0
  63. StreamingCommunity/Src/Lib/Downloader/HLS/segments.py +540 -0
  64. StreamingCommunity/Src/Lib/Downloader/MP4/downloader.py +156 -0
  65. StreamingCommunity/Src/Lib/Downloader/TOR/downloader.py +222 -0
  66. StreamingCommunity/Src/Lib/Downloader/__init__.py +5 -0
  67. StreamingCommunity/Src/Lib/Driver/driver_1.py +76 -0
  68. StreamingCommunity/Src/Lib/FFmpeg/__init__.py +4 -0
  69. StreamingCommunity/Src/Lib/FFmpeg/capture.py +170 -0
  70. StreamingCommunity/Src/Lib/FFmpeg/command.py +292 -0
  71. StreamingCommunity/Src/Lib/FFmpeg/util.py +242 -0
  72. StreamingCommunity/Src/Lib/M3U8/__init__.py +6 -0
  73. StreamingCommunity/Src/Lib/M3U8/decryptor.py +129 -0
  74. StreamingCommunity/Src/Lib/M3U8/estimator.py +173 -0
  75. StreamingCommunity/Src/Lib/M3U8/parser.py +666 -0
  76. StreamingCommunity/Src/Lib/M3U8/url_fixer.py +52 -0
  77. StreamingCommunity/Src/Lib/TMBD/__init__.py +2 -0
  78. StreamingCommunity/Src/Lib/TMBD/obj_tmbd.py +39 -0
  79. StreamingCommunity/Src/Lib/TMBD/tmdb.py +346 -0
  80. StreamingCommunity/Src/Upload/update.py +64 -0
  81. StreamingCommunity/Src/Upload/version.py +5 -0
  82. StreamingCommunity/Src/Util/_jsonConfig.py +204 -0
  83. StreamingCommunity/Src/Util/call_stack.py +42 -0
  84. StreamingCommunity/Src/Util/color.py +20 -0
  85. StreamingCommunity/Src/Util/console.py +12 -0
  86. StreamingCommunity/Src/Util/headers.py +147 -0
  87. StreamingCommunity/Src/Util/logger.py +53 -0
  88. StreamingCommunity/Src/Util/message.py +46 -0
  89. StreamingCommunity/Src/Util/os.py +417 -0
  90. StreamingCommunity/Src/Util/table.py +163 -0
  91. StreamingCommunity/run.py +196 -0
  92. StreamingCommunity-1.7.6.dist-info/LICENSE +674 -0
  93. StreamingCommunity-1.7.6.dist-info/METADATA +348 -0
  94. StreamingCommunity-1.7.6.dist-info/RECORD +97 -0
  95. StreamingCommunity-1.7.6.dist-info/WHEEL +5 -0
  96. StreamingCommunity-1.7.6.dist-info/entry_points.txt +2 -0
  97. StreamingCommunity-1.7.6.dist-info/top_level.txt +1 -0
@@ -0,0 +1,203 @@
1
+ # 3.12.23
2
+
3
+ import os
4
+ import sys
5
+ import time
6
+
7
+
8
+ # Internal utilities
9
+ from StreamingCommunity.Src.Util.console import console, msg
10
+ from StreamingCommunity.Src.Util.message import start_message
11
+ from StreamingCommunity.Src.Util.call_stack import get_call_stack
12
+ from StreamingCommunity.Src.Util.table import TVShowManager
13
+ from StreamingCommunity.Src.Lib.Downloader import HLS_Downloader
14
+
15
+
16
+ # Logic class
17
+ from .util.ScrapeSerie import ScrapeSerie
18
+ from StreamingCommunity.Src.Api.Template.Util import manage_selection, map_episode_title, validate_selection, validate_episode_selection, execute_search
19
+ from StreamingCommunity.Src.Api.Template.Class.SearchType import MediaItem
20
+
21
+
22
+ # Player
23
+ from StreamingCommunity.Src.Api.Player.vixcloud import VideoSource
24
+
25
+
26
+ # Variable
27
+ from .costant import ROOT_PATH, SITE_NAME, SERIES_FOLDER
28
+ scrape_serie = ScrapeSerie(SITE_NAME)
29
+ video_source = VideoSource(SITE_NAME, True)
30
+ table_show_manager = TVShowManager()
31
+
32
+
33
+
34
+ def download_video(tv_name: str, index_season_selected: int, index_episode_selected: int) -> None:
35
+ """
36
+ Download a single episode video.
37
+
38
+ Parameters:
39
+ - tv_name (str): Name of the TV series.
40
+ - index_season_selected (int): Index of the selected season.
41
+ - index_episode_selected (int): Index of the selected episode.
42
+ """
43
+
44
+ start_message()
45
+
46
+ # Get info about episode
47
+ obj_episode = scrape_serie.obj_episode_manager.episodes[index_episode_selected - 1]
48
+ console.print(f"[yellow]Download: [red]{index_season_selected}:{index_episode_selected} {obj_episode.name}")
49
+ print()
50
+
51
+ # Define filename and path for the downloaded video
52
+ mp4_name = f"{map_episode_title(tv_name, index_season_selected, index_episode_selected, obj_episode.name)}.mp4"
53
+ mp4_path = os.path.join(ROOT_PATH, SITE_NAME, SERIES_FOLDER, tv_name, f"S{index_season_selected}")
54
+
55
+ # Retrieve scws and if available master playlist
56
+ video_source.get_iframe(obj_episode.id)
57
+ video_source.get_content()
58
+ master_playlist = video_source.get_playlist()
59
+
60
+ # Download the episode
61
+ r_proc = HLS_Downloader(
62
+ m3u8_playlist=master_playlist,
63
+ output_filename=os.path.join(mp4_path, mp4_name)
64
+ ).start()
65
+
66
+ if r_proc == 404:
67
+ time.sleep(2)
68
+
69
+ # Re call search function
70
+ if msg.ask("[green]Do you want to continue [white]([red]y[white])[green] or return at home[white]([red]n[white]) ", choices=['y', 'n'], default='y', show_choices=True) == "n":
71
+ frames = get_call_stack()
72
+ execute_search(frames[-4])
73
+
74
+ if r_proc != None:
75
+ console.print("[green]Result: ")
76
+ console.print(r_proc)
77
+
78
+ def download_episode(tv_name: str, index_season_selected: int, download_all: bool = False) -> None:
79
+ """
80
+ Download episodes of a selected season.
81
+
82
+ Parameters:
83
+ - tv_name (str): Name of the TV series.
84
+ - index_season_selected (int): Index of the selected season.
85
+ - download_all (bool): Download all episodes in the season.
86
+ """
87
+
88
+ # Clean memory of all episodes and get the number of the season
89
+ scrape_serie.obj_episode_manager.clear()
90
+ season_number = scrape_serie.obj_season_manager.seasons[index_season_selected - 1].number
91
+
92
+ # Start message and collect information about episodes
93
+ start_message()
94
+ scrape_serie.collect_title_season(season_number)
95
+ episodes_count = scrape_serie.obj_episode_manager.get_length()
96
+
97
+ if download_all:
98
+
99
+ # Download all episodes without asking
100
+ for i_episode in range(1, episodes_count + 1):
101
+ download_video(tv_name, index_season_selected, i_episode)
102
+ console.print(f"\n[red]End downloaded [yellow]season: [red]{index_season_selected}.")
103
+
104
+ else:
105
+
106
+ # Display episodes list and manage user selection
107
+ last_command = display_episodes_list()
108
+ list_episode_select = manage_selection(last_command, episodes_count)
109
+
110
+ try:
111
+ list_episode_select = validate_episode_selection(list_episode_select, episodes_count)
112
+ except ValueError as e:
113
+ console.print(f"[red]{str(e)}")
114
+ return
115
+
116
+ # Download selected episodes
117
+ for i_episode in list_episode_select:
118
+ download_video(tv_name, index_season_selected, i_episode)
119
+
120
+ def download_series(select_season: MediaItem, version: str) -> None:
121
+ """
122
+ Download episodes of a TV series based on user selection.
123
+
124
+ Parameters:
125
+ - select_season (MediaItem): Selected media item (TV series).
126
+ - domain (str): Domain from which to download.
127
+ - version (str): Version of the site.
128
+ """
129
+
130
+ # Start message and set up video source
131
+ start_message()
132
+
133
+ # Setup video source
134
+ scrape_serie.setup(version, select_season.id, select_season.slug)
135
+ video_source.setup(select_season.id)
136
+
137
+ # Collect information about seasons
138
+ scrape_serie.collect_info_seasons()
139
+ seasons_count = scrape_serie.obj_season_manager.get_length()
140
+
141
+ # Prompt user for season selection and download episodes
142
+ console.print(f"\n[green]Seasons found: [red]{seasons_count}")
143
+ index_season_selected = msg.ask(
144
+ "\n[cyan]Insert season number [yellow](e.g., 1), [red]* [cyan]to download all seasons, "
145
+ "[yellow](e.g., 1-2) [cyan]for a range of seasons, or [yellow](e.g., 3-*) [cyan]to download from a specific season to the end"
146
+ )
147
+
148
+ # Manage and validate the selection
149
+ list_season_select = manage_selection(index_season_selected, seasons_count)
150
+
151
+ try:
152
+ list_season_select = validate_selection(list_season_select, seasons_count)
153
+ except ValueError as e:
154
+ console.print(f"[red]{str(e)}")
155
+ return
156
+
157
+ # Loop through the selected seasons and download episodes
158
+ for i_season in list_season_select:
159
+ if len(list_season_select) > 1 or index_season_selected == "*":
160
+
161
+ # Download all episodes if multiple seasons are selected or if '*' is used
162
+ download_episode(select_season.slug, i_season, download_all=True)
163
+ else:
164
+
165
+ # Otherwise, let the user select specific episodes for the single season
166
+ download_episode(select_season.slug, i_season, download_all=False)
167
+
168
+
169
+ def display_episodes_list() -> str:
170
+ """
171
+ Display episodes list and handle user input.
172
+
173
+ Returns:
174
+ last_command (str): Last command entered by the user.
175
+ """
176
+
177
+ # Set up table for displaying episodes
178
+ table_show_manager.set_slice_end(10)
179
+
180
+ # Add columns to the table
181
+ column_info = {
182
+ "Index": {'color': 'red'},
183
+ "Name": {'color': 'magenta'},
184
+ "Duration": {'color': 'green'}
185
+ }
186
+ table_show_manager.add_column(column_info)
187
+
188
+ # Populate the table with episodes information
189
+ for i, media in enumerate(scrape_serie.obj_episode_manager.episodes):
190
+ table_show_manager.add_tv_show({
191
+ 'Index': str(media.number),
192
+ 'Name': media.name,
193
+ 'Duration': str(media.duration)
194
+ })
195
+
196
+ # Run the table and handle user input
197
+ last_command = table_show_manager.run()
198
+
199
+ if last_command == "q":
200
+ console.print("\n[red]Quit [white]...")
201
+ sys.exit(0)
202
+
203
+ return last_command
@@ -0,0 +1,126 @@
1
+ # 10.12.23
2
+
3
+ import sys
4
+ import json
5
+ import logging
6
+
7
+
8
+ # External libraries
9
+ import httpx
10
+ from bs4 import BeautifulSoup
11
+
12
+
13
+ # Internal utilities
14
+ from StreamingCommunity.Src.Util.console import console
15
+ from StreamingCommunity.Src.Util._jsonConfig import config_manager
16
+ from StreamingCommunity.Src.Util.headers import get_headers
17
+ from StreamingCommunity.Src.Util.table import TVShowManager
18
+
19
+
20
+
21
+ # Logic class
22
+ from StreamingCommunity.Src.Api.Template import get_select_title
23
+ from StreamingCommunity.Src.Api.Template.Util import search_domain
24
+ from StreamingCommunity.Src.Api.Template.Class.SearchType import MediaManager
25
+
26
+
27
+ # Config
28
+ from .costant import SITE_NAME
29
+
30
+
31
+ # Variable
32
+ media_search_manager = MediaManager()
33
+ table_show_manager = TVShowManager()
34
+
35
+
36
+
37
+ def get_version(text: str):
38
+ """
39
+ Extracts the version from the HTML text of a webpage.
40
+
41
+ Parameters:
42
+ - text (str): The HTML text of the webpage.
43
+
44
+ Returns:
45
+ str: The version extracted from the webpage.
46
+ list: Top 10 titles headlines for today.
47
+ """
48
+ try:
49
+
50
+ # Parse request to site
51
+ soup = BeautifulSoup(text, "html.parser")
52
+
53
+ # Extract version
54
+ version = json.loads(soup.find("div", {"id": "app"}).get("data-page"))['version']
55
+ #console.print(f"[cyan]Get version [white]=> [red]{version} \n")
56
+
57
+ return version
58
+
59
+ except Exception as e:
60
+ logging.error(f"Error extracting version: {e}")
61
+ raise
62
+
63
+
64
+ def get_version_and_domain():
65
+ """
66
+ Retrieve the current version and domain of the site.
67
+
68
+ This function performs the following steps:
69
+ - Determines the correct domain to use for the site by searching for a specific meta tag.
70
+ - Fetches the content of the site to extract the version information.
71
+ """
72
+
73
+ # Find new domain if prev dont work
74
+ domain_to_use, base_url = search_domain(SITE_NAME, f"https://{SITE_NAME}")
75
+
76
+ # Extract version from the response
77
+ version = get_version(httpx.get(base_url, headers={'user-agent': get_headers()}).text)
78
+
79
+ return version, domain_to_use
80
+
81
+
82
+ def title_search(title_search: str, domain: str) -> int:
83
+ """
84
+ Search for titles based on a search query.
85
+
86
+ Parameters:
87
+ - title_search (str): The title to search for.
88
+ - domain (str): The domain to search on.
89
+
90
+ Returns:
91
+ int: The number of titles found.
92
+ """
93
+
94
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
95
+
96
+ # Send request to search for titles ( replace à to a and space to "+" )
97
+ try:
98
+ response = httpx.get(
99
+ url=f"https://{SITE_NAME}.{domain}/api/search?q={title_search.replace(' ', '+')}",
100
+ headers={'user-agent': get_headers()},
101
+ timeout=max_timeout
102
+ )
103
+ response.raise_for_status()
104
+
105
+ except Exception as e:
106
+ console.print(f"Site: {SITE_NAME}, request search error: {e}")
107
+
108
+ # Add found titles to media search manager
109
+ for dict_title in response.json()['data']:
110
+ media_search_manager.add_media({
111
+ 'id': dict_title.get('id'),
112
+ 'slug': dict_title.get('slug'),
113
+ 'name': dict_title.get('name'),
114
+ 'type': dict_title.get('type'),
115
+ 'score': dict_title.get('score')
116
+ })
117
+
118
+ # Return the number of titles found
119
+ return media_search_manager.get_length()
120
+
121
+
122
+ def run_get_select_title():
123
+ """
124
+ Display a selection of titles and prompt the user to choose one.
125
+ """
126
+ return get_select_title(table_show_manager, media_search_manager)
@@ -0,0 +1,113 @@
1
+ # 01.03.24
2
+
3
+ import logging
4
+
5
+
6
+ # External libraries
7
+ import httpx
8
+
9
+
10
+ # Internal utilities
11
+ from StreamingCommunity.Src.Util.headers import get_headers
12
+ from StreamingCommunity.Src.Util._jsonConfig import config_manager
13
+ from StreamingCommunity.Src.Api.Player.Helper.Vixcloud.util import SeasonManager, EpisodeManager
14
+
15
+
16
+ # Variable
17
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
18
+
19
+
20
+ class ScrapeSerie:
21
+ def __init__(self, site_name: str):
22
+ """
23
+ Initialize the ScrapeSerie class for scraping TV series information.
24
+
25
+ Args:
26
+ site_name (str): Name of the streaming site to scrape from
27
+ """
28
+ self.is_series = False
29
+ self.headers = {'user-agent': get_headers()}
30
+ self.base_name = site_name
31
+ self.domain = config_manager.get_dict('SITE', self.base_name)['domain']
32
+
33
+ def setup(self, version: str = None, media_id: int = None, series_name: str = None):
34
+ """
35
+ Set up the scraper with specific media details.
36
+
37
+ Args:
38
+ version (str, optional): Site version for request headers
39
+ media_id (int, optional): Unique identifier for the media
40
+ series_name (str, optional): Name of the TV series
41
+ """
42
+ self.version = version
43
+ self.media_id = media_id
44
+
45
+ # If series name is provided, initialize series-specific managers
46
+ if series_name is not None:
47
+ self.is_series = True
48
+ self.series_name = series_name
49
+ self.obj_season_manager: SeasonManager = SeasonManager()
50
+ self.obj_episode_manager: EpisodeManager = EpisodeManager()
51
+
52
+ def collect_info_seasons(self) -> None:
53
+ """
54
+ Retrieve season information for a TV series from the streaming site.
55
+
56
+ Raises:
57
+ Exception: If there's an error fetching season information
58
+ """
59
+ self.headers = {
60
+ 'user-agent': get_headers(),
61
+ 'x-inertia': 'true',
62
+ 'x-inertia-version': self.version,
63
+ }
64
+
65
+ try:
66
+
67
+ response = httpx.get(
68
+ url=f"https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}",
69
+ headers=self.headers,
70
+ timeout=max_timeout
71
+ )
72
+ response.raise_for_status()
73
+
74
+ # Extract seasons from JSON response
75
+ json_response = response.json().get('props', {}).get('title', {}).get('seasons', [])
76
+
77
+ # Add each season to the season manager
78
+ for dict_season in json_response:
79
+ self.obj_season_manager.add_season(dict_season)
80
+
81
+ except Exception as e:
82
+ logging.error(f"Error collecting season info: {e}")
83
+ raise
84
+
85
+ def collect_title_season(self, number_season: int) -> None:
86
+ """
87
+ Retrieve episode information for a specific season.
88
+
89
+ Args:
90
+ number_season (int): Season number to fetch episodes for
91
+
92
+ Raises:
93
+ Exception: If there's an error fetching episode information
94
+ """
95
+ try:
96
+
97
+ response = httpx.get(
98
+ url=f'https://{self.base_name}.{self.domain}/titles/{self.media_id}-{self.series_name}/stagione-{number_season}',
99
+ headers=self.headers,
100
+ timeout=max_timeout
101
+ )
102
+ response.raise_for_status()
103
+
104
+ # Extract episodes from JSON response
105
+ json_response = response.json().get('props', {}).get('loadedSeason', {}).get('episodes', [])
106
+
107
+ # Add each episode to the episode manager
108
+ for dict_episode in json_response:
109
+ self.obj_episode_manager.add_episode(dict_episode)
110
+
111
+ except Exception as e:
112
+ logging.error(f"Error collecting title season info: {e}")
113
+ raise
@@ -0,0 +1,101 @@
1
+ # 07.07.24
2
+
3
+ from typing import List, TypedDict
4
+
5
+
6
+ class MediaItemData(TypedDict, total=False):
7
+ id: int # GENERAL
8
+ name: str # GENERAL
9
+ type: str # GENERAL
10
+ url: str # GENERAL
11
+ size: str # GENERAL
12
+ score: str # GENERAL
13
+ date: str # GENERAL
14
+ desc: str # GENERAL
15
+
16
+ seeder: int # TOR
17
+ leecher: int # TOR
18
+
19
+ slug: str # SC
20
+
21
+
22
+
23
+ class MediaItemMeta(type):
24
+ def __new__(cls, name, bases, dct):
25
+ def init(self, **kwargs):
26
+ for key, value in kwargs.items():
27
+ setattr(self, key, value)
28
+
29
+ dct['__init__'] = init
30
+
31
+ def get_attr(self, item):
32
+ return self.__dict__.get(item, None)
33
+
34
+ dct['__getattr__'] = get_attr
35
+
36
+ def set_attr(self, key, value):
37
+ self.__dict__[key] = value
38
+
39
+ dct['__setattr__'] = set_attr
40
+
41
+ return super().__new__(cls, name, bases, dct)
42
+
43
+
44
+ class MediaItem(metaclass=MediaItemMeta):
45
+ id: int # GENERAL
46
+ name: str # GENERAL
47
+ type: str # GENERAL
48
+ url: str # GENERAL
49
+ size: str # GENERAL
50
+ score: str # GENERAL
51
+ date: str # GENERAL
52
+ desc: str # GENERAL
53
+
54
+ seeder: int # TOR
55
+ leecher: int # TOR
56
+
57
+ slug: str # SC
58
+
59
+
60
+ class MediaManager:
61
+ def __init__(self):
62
+ self.media_list: List[MediaItem] = []
63
+
64
+ def add_media(self, data: dict) -> None:
65
+ """
66
+ Add media to the list.
67
+
68
+ Args:
69
+ data (dict): Media data to add.
70
+ """
71
+ self.media_list.append(MediaItem(**data))
72
+
73
+ def get(self, index: int) -> MediaItem:
74
+ """
75
+ Get a media item from the list by index.
76
+
77
+ Args:
78
+ index (int): The index of the media item to retrieve.
79
+
80
+ Returns:
81
+ MediaItem: The media item at the specified index.
82
+ """
83
+ return self.media_list[index]
84
+
85
+ def get_length(self) -> int:
86
+ """
87
+ Get the number of media items in the list.
88
+
89
+ Returns:
90
+ int: Number of media items.
91
+ """
92
+ return len(self.media_list)
93
+
94
+ def clear(self) -> None:
95
+ """
96
+ This method clears the media list.
97
+ """
98
+ self.media_list.clear()
99
+
100
+ def __str__(self):
101
+ return f"MediaManager(num_media={len(self.media_list)})"
@@ -0,0 +1,5 @@
1
+ # 23.11.24
2
+
3
+ from .recall_search import execute_search
4
+ from .get_domain import search_domain
5
+ from .manage_ep import manage_selection, map_episode_title, validate_episode_selection, validate_selection
@@ -0,0 +1,137 @@
1
+ # 18.06.24
2
+
3
+ import sys
4
+ from urllib.parse import urlparse
5
+
6
+
7
+ # External libraries
8
+ import httpx
9
+ from googlesearch import search
10
+
11
+
12
+ # Internal utilities
13
+ from StreamingCommunity.Src.Util.headers import get_headers
14
+ from StreamingCommunity.Src.Util.console import console, msg
15
+ from StreamingCommunity.Src.Util._jsonConfig import config_manager
16
+
17
+
18
+ def google_search(query):
19
+ """
20
+ Perform a Google search and return the first result.
21
+
22
+ Args:
23
+ query (str): The search query to execute on Google.
24
+
25
+ Returns:
26
+ str: The first URL result from the search, or None if no result is found.
27
+ """
28
+ # Perform the search on Google and limit to 1 result
29
+ search_results = search(query, num_results=1)
30
+
31
+ # Extract the first result
32
+ first_result = next(search_results, None)
33
+
34
+ if not first_result:
35
+ console.print("[red]No results found.[/red]")
36
+
37
+ return first_result
38
+
39
+ def get_final_redirect_url(initial_url, max_timeout):
40
+ """
41
+ Follow redirects from the initial URL and return the final URL after all redirects.
42
+
43
+ Args:
44
+ initial_url (str): The URL to start with and follow redirects.
45
+
46
+ Returns:
47
+ str: The final URL after all redirects are followed.
48
+ """
49
+
50
+ # Create a client with redirects enabled
51
+ try:
52
+ with httpx.Client(follow_redirects=True, timeout=max_timeout, headers={'user-agent': get_headers()}) as client:
53
+ response = client.get(initial_url)
54
+ response.raise_for_status()
55
+
56
+ # Capture the final URL after all redirects
57
+ final_url = response.url
58
+
59
+ return final_url
60
+
61
+ except Exception as e:
62
+ console.print(f"[cyan]Test url[white]: [red]{initial_url}, [cyan]error[white]: [red]{e}")
63
+ return None
64
+
65
+ def search_domain(site_name: str, base_url: str):
66
+ """
67
+ Search for a valid domain for the given site name and base URL.
68
+
69
+ Parameters:
70
+ - site_name (str): The name of the site to search the domain for.
71
+ - base_url (str): The base URL to construct complete URLs.
72
+ - follow_redirects (bool): To follow redirect url or not.
73
+
74
+ Returns:
75
+ tuple: The found domain and the complete URL.
76
+ """
77
+
78
+ # Extract config domain
79
+ max_timeout = config_manager.get_int("REQUESTS", "timeout")
80
+ domain = str(config_manager.get_dict("SITE", site_name)['domain'])
81
+
82
+ try:
83
+
84
+ # Test the current domain
85
+ response_follow = httpx.get(f"{base_url}.{domain}", headers={'user-agent': get_headers()}, timeout=max_timeout, follow_redirects=True)
86
+ response_follow.raise_for_status()
87
+
88
+ except Exception as e:
89
+
90
+ query = base_url.split("/")[-1]
91
+ first_url = google_search(query)
92
+ console.print(f"[green]First url from google seach[white]: [red]{first_url}")
93
+
94
+ if first_url:
95
+ final_url = get_final_redirect_url(first_url, max_timeout)
96
+
97
+ if final_url != None:
98
+ console.print(f"\n[bold yellow]Suggestion:[/bold yellow] [white](Experimental)\n"
99
+ f"[cyan]New final URL[white]: [green]{final_url}")
100
+
101
+ def extract_domain(url):
102
+ parsed_url = urlparse(url)
103
+ domain = parsed_url.netloc
104
+ return domain.split(".")[-1]
105
+
106
+ new_domain_extract = extract_domain(str(final_url))
107
+
108
+ if msg.ask(f"[red]Do you want to auto update config.json - '[green]{site_name}[red]' with domain: [green]{new_domain_extract}", choices=["y", "n"], default="y").lower() == "y":
109
+
110
+ # Update domain in config.json
111
+ config_manager.config['SITE'][site_name]['domain'] = new_domain_extract
112
+ config_manager.write_config()
113
+
114
+ # Return config domain
115
+ #console.print(f"[cyan]Return domain: [red]{new_domain_extract} \n")
116
+ return new_domain_extract, f"{base_url}.{new_domain_extract}"
117
+
118
+ else:
119
+ console.print("[bold red]\nManually change the domain in the JSON file.[/bold red]")
120
+ raise
121
+
122
+ else:
123
+ console.print("[bold red]No valid URL to follow redirects.[/bold red]")
124
+
125
+ # Ensure the URL is in string format before parsing
126
+ parsed_url = urlparse(str(response_follow.url))
127
+ parse_domain = parsed_url.netloc
128
+ tld = parse_domain.split('.')[-1]
129
+
130
+ if tld is not None:
131
+
132
+ # Update domain in config.json
133
+ config_manager.config['SITE'][site_name]['domain'] = tld
134
+ config_manager.write_config()
135
+
136
+ # Return config domain
137
+ return tld, f"{base_url}.{tld}"