flix-cli 1.6.2__tar.gz → 1.6.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: flix-cli
3
- Version: 1.6.2
3
+ Version: 1.6.4
4
4
  Summary: A high efficient, powerful and fast movie scraper.
5
5
  License: GPLv3
6
6
  Author: DemonKingSwarn
@@ -10,10 +10,12 @@ Classifier: License :: Other/Proprietary License
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Classifier: Programming Language :: Python :: 3.13
13
15
  Requires-Dist: beautifulsoup4 (==4.10.0)
14
16
  Requires-Dist: colorama (==0.4.5)
15
- Requires-Dist: httpx (==0.23.0)
16
- Requires-Dist: krfzf-py (>=0.0.4,<0.0.5)
17
+ Requires-Dist: httpx (==0.28.1)
18
+ Requires-Dist: krfzf-py (==0.0.4)
17
19
  Requires-Dist: pycryptodomex (==3.14.1)
18
20
  Description-Content-Type: text/plain
19
21
 
@@ -0,0 +1,490 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import httpx
4
+ import regex as re
5
+ from fzf import fzf_prompt
6
+ import subprocess
7
+ import platform
8
+ import os
9
+
10
+ from .utils.__player__ import play
11
+ from .utils.__downloader__ import download
12
+ from .__version__ import __core__
13
+
14
+ try:
15
+ import orjson as json
16
+ except ImportError:
17
+ import json
18
+
19
+ import sys
20
+ from urllib.parse import urljoin, quote
21
+ import time
22
+ from bs4 import BeautifulSoup
23
+
24
+ headers = {
25
+ "User-Agent": f"flix-cli/{__core__}",
26
+ "Referer": "https://flixhq.to/",
27
+ "X-Requested-With": "XMLHttpRequest"
28
+ }
29
+
30
+ client = httpx.Client(headers=headers, follow_redirects=True, timeout=None)
31
+
32
+ FLIXHQ_BASE_URL = "https://flixhq.to"
33
+ FLIXHQ_SEARCH_URL = f"{FLIXHQ_BASE_URL}/search"
34
+ FLIXHQ_AJAX_URL = f"{FLIXHQ_BASE_URL}/ajax"
35
+ DECODER = "https://dec.eatmynerds.live"
36
+
37
+ selected_media = None
38
+ selected_subtitles = []
39
+
40
+ def decode_url(url: str):
41
+ """Decode the stream URL using the decoding service"""
42
+ try:
43
+ print(f"Debug: Decoding URL: {url}")
44
+ decoder_endpoint = f"{DECODER}?url={quote(url)}"
45
+
46
+ resp = client.get(decoder_endpoint, headers={
47
+ "User-Agent": headers["User-Agent"],
48
+ "Referer": FLIXHQ_BASE_URL
49
+ })
50
+
51
+ if resp.status_code == 200:
52
+ try:
53
+ data = resp.json()
54
+ if 'sources' in data and data['sources']:
55
+ video_link = data['sources'][0].get('file', '')
56
+ if video_link and '.m3u8' in video_link:
57
+ print(f"Debug: Found m3u8 URL: {video_link}")
58
+ subtitles = []
59
+ if 'tracks' in data:
60
+ for track in data['tracks']:
61
+ if track.get('kind') == 'captions' and track.get('file'):
62
+ subtitles.append(track['file'])
63
+ return video_link, subtitles
64
+
65
+ # Try other common fields
66
+ for key in ['link', 'url', 'file']:
67
+ if key in data and data[key]:
68
+ return data[key], []
69
+
70
+ except json.JSONDecodeError:
71
+ text_response = resp.text
72
+ m3u8_match = re.search(r'"file":"([^"]*\.m3u8[^"]*)"', text_response)
73
+ if m3u8_match:
74
+ decoded_url = m3u8_match.group(1)
75
+ print(f"Debug: Regex extracted m3u8: {decoded_url}")
76
+ return decoded_url, []
77
+
78
+ print(f"Debug: Failed to decode, using original URL")
79
+ return url, []
80
+
81
+ except Exception as e:
82
+ print(f"Debug: Error decoding URL: {e}")
83
+ return url, []
84
+
85
+ def search_content(query: str):
86
+ """Search for content on flixhq.to"""
87
+ try:
88
+ search_params = query.replace(" ", "-")
89
+ response = client.get(f"{FLIXHQ_SEARCH_URL}/{search_params}")
90
+ response.raise_for_status()
91
+
92
+ soup = BeautifulSoup(response.text, 'html.parser')
93
+ items = soup.find_all('div', class_='flw-item')
94
+
95
+ if not items:
96
+ print("No results found")
97
+ return None
98
+
99
+ results = []
100
+ urls = []
101
+
102
+ for i, item in enumerate(items[:10]):
103
+ poster_link = item.find('div', class_='film-poster')
104
+ detail_section = item.find('div', class_='film-detail')
105
+
106
+ if poster_link and detail_section:
107
+ link_elem = poster_link.find('a')
108
+ title_elem = detail_section.find('h2', class_='film-name')
109
+
110
+ if link_elem and title_elem:
111
+ href = link_elem.get('href', '')
112
+ title_link = title_elem.find('a')
113
+ title = title_link.get('title', 'Unknown Title') if title_link else 'Unknown Title'
114
+
115
+ info_elem = detail_section.find('div', class_='fd-infor')
116
+ year = ""
117
+ content_type = ""
118
+
119
+ if info_elem:
120
+ spans = info_elem.find_all('span')
121
+ if spans:
122
+ year = spans[0].text.strip() if spans else ""
123
+ if len(spans) > 1:
124
+ content_type = spans[1].text.strip()
125
+
126
+ display_title = f"{i+1}. {title}"
127
+ if year:
128
+ display_title += f" ({year})"
129
+ if content_type:
130
+ display_title += f" [{content_type}]"
131
+
132
+ results.append(display_title)
133
+ urls.append(urljoin(FLIXHQ_BASE_URL, href))
134
+
135
+ if not results:
136
+ print("No valid results found")
137
+ return None
138
+
139
+ selected = fzf_prompt(results)
140
+ if not selected:
141
+ return None
142
+
143
+ selected_index = int(selected[0]) - 1
144
+ return urls[selected_index]
145
+
146
+ except Exception as e:
147
+ print(f"Search failed: {e}")
148
+ return None
149
+
150
+ def get_tv_seasons(media_id: str):
151
+ """Get TV show seasons using lobster's approach"""
152
+ try:
153
+ seasons_url = f"{FLIXHQ_AJAX_URL}/v2/tv/seasons/{media_id}"
154
+
155
+ response = client.get(seasons_url)
156
+ print(f"Debug: Seasons URL: {seasons_url}")
157
+ print(f"Debug: Seasons response status: {response.status_code}")
158
+
159
+ if response.status_code == 200:
160
+ # Parse like lobster: extract season title and ID from href
161
+ season_pattern = re.compile(r'href="[^"]*-(\d+)"[^>]*>([^<]*)</a>')
162
+ matches = season_pattern.findall(response.text)
163
+
164
+ seasons = []
165
+ for season_id, season_title in matches:
166
+ seasons.append({
167
+ 'id': season_id,
168
+ 'title': season_title.strip()
169
+ })
170
+ print(f"Debug: Found season: {season_title.strip()} (ID: {season_id})")
171
+
172
+ return seasons
173
+
174
+ return []
175
+
176
+ except Exception as e:
177
+ print(f"Failed to get TV seasons: {e}")
178
+ return []
179
+
180
+ def get_season_episodes(season_id: str):
181
+ """Get episodes for a season using lobster's approach"""
182
+ try:
183
+ episodes_url = f"{FLIXHQ_AJAX_URL}/v2/season/episodes/{season_id}"
184
+
185
+ response = client.get(episodes_url)
186
+ print(f"Debug: Episodes URL: {episodes_url}")
187
+ print(f"Debug: Episodes response status: {response.status_code}")
188
+
189
+ if response.status_code == 200:
190
+ # Parse like lobster: look for data-id and title in nav-item elements
191
+ # First, split by class="nav-item" like lobster does
192
+ content = response.text.replace('\n', '').replace('class="nav-item"', '\nclass="nav-item"')
193
+
194
+ episode_pattern = re.compile(r'data-id="(\d+)"[^>]*title="([^"]*)"')
195
+ matches = episode_pattern.findall(content)
196
+
197
+ episodes = []
198
+ for data_id, episode_title in matches:
199
+ episodes.append({
200
+ 'data_id': data_id,
201
+ 'title': episode_title.strip()
202
+ })
203
+ print(f"Debug: Found episode: {episode_title.strip()} (data-id: {data_id})")
204
+
205
+ return episodes
206
+
207
+ return []
208
+
209
+ except Exception as e:
210
+ print(f"Failed to get season episodes: {e}")
211
+ return []
212
+
213
+ def get_episode_servers(data_id: str, preferred_provider: str = "Vidcloud"):
214
+ """Get episode servers using lobster's approach"""
215
+ try:
216
+ servers_url = f"{FLIXHQ_AJAX_URL}/v2/episode/servers/{data_id}"
217
+
218
+ response = client.get(servers_url)
219
+ print(f"Debug: Servers URL: {servers_url}")
220
+ print(f"Debug: Servers response status: {response.status_code}")
221
+
222
+ if response.status_code == 200:
223
+ # Parse like lobster: look for data-id and title in nav-item elements
224
+ content = response.text.replace('\n', '').replace('class="nav-item"', '\nclass="nav-item"')
225
+
226
+ server_pattern = re.compile(r'data-id="(\d+)"[^>]*title="([^"]*)"')
227
+ matches = server_pattern.findall(content)
228
+
229
+ servers = []
230
+ for server_id, server_name in matches:
231
+ servers.append({
232
+ 'id': server_id,
233
+ 'name': server_name.strip()
234
+ })
235
+ print(f"Debug: Found server: {server_name.strip()} (ID: {server_id})")
236
+
237
+ # Find preferred provider like lobster does
238
+ for server in servers:
239
+ if preferred_provider.lower() in server['name'].lower():
240
+ print(f"Debug: Selected {preferred_provider} server: {server['id']}")
241
+ return server['id']
242
+
243
+ # Fallback to first server
244
+ if servers:
245
+ print(f"Debug: Using fallback server: {servers[0]['id']}")
246
+ return servers[0]['id']
247
+
248
+ return None
249
+
250
+ except Exception as e:
251
+ print(f"Failed to get episode servers: {e}")
252
+ return None
253
+
254
+ def get_embed_link(episode_id: str):
255
+ """Get embed link from episode sources endpoint"""
256
+ try:
257
+ sources_url = f"{FLIXHQ_AJAX_URL}/episode/sources/{episode_id}"
258
+
259
+ response = client.get(sources_url)
260
+ print(f"Debug: Sources URL: {sources_url}")
261
+ print(f"Debug: Sources response status: {response.status_code}")
262
+
263
+ if response.status_code == 200:
264
+ # Extract like lobster: look for "link" in JSON response
265
+ link_match = re.search(r'"link":"([^"]*)"', response.text)
266
+ if link_match:
267
+ embed_link = link_match.group(1)
268
+ print(f"Debug: Found embed link: {embed_link}")
269
+ return embed_link
270
+
271
+ return None
272
+
273
+ except Exception as e:
274
+ print(f"Failed to get embed link: {e}")
275
+ return None
276
+
277
+ def movie():
278
+ """Handle movie streaming"""
279
+ global selected_media, selected_subtitles
280
+
281
+ # Extract media ID from URL
282
+ media_id_match = re.search(r'/movie/[^/]*-(\d+)', get_id.selected_url)
283
+ if not media_id_match:
284
+ raise RuntimeError("Could not extract media ID from URL")
285
+
286
+ media_id = media_id_match.group(1)
287
+ print(f"Debug: Movie media ID: {media_id}")
288
+
289
+ # For movies, use the movie/episodes endpoint like lobster
290
+ try:
291
+ movie_episodes_url = f"{FLIXHQ_AJAX_URL}/movie/episodes/{media_id}"
292
+ response = client.get(movie_episodes_url)
293
+
294
+ if response.status_code == 200:
295
+ # Extract like lobster: find href with provider name
296
+ content = response.text.replace('\n', '').replace('class="nav-item"', '\nclass="nav-item"')
297
+
298
+ # Look for Vidcloud provider first
299
+ provider_pattern = re.compile(r'href="([^"]*)"[^>]*title="Vidcloud"')
300
+ match = provider_pattern.search(content)
301
+
302
+ if match:
303
+ movie_page_url = FLIXHQ_BASE_URL + match.group(1)
304
+ # Extract episode ID like lobster: -(\d+).(\d+)$ -> take the second number
305
+ episode_match = re.search(r'-(\d+)\.(\d+)$', movie_page_url)
306
+ if episode_match:
307
+ episode_id = episode_match.group(2)
308
+ print(f"Debug: Movie episode ID: {episode_id}")
309
+
310
+ # Get embed link
311
+ embed_link = get_embed_link(episode_id)
312
+ if embed_link:
313
+ selected_media = {
314
+ 'file': embed_link,
315
+ 'label': 'Movie Stream',
316
+ 'type': 'embed'
317
+ }
318
+ selected_subtitles = []
319
+ return
320
+
321
+ except Exception as e:
322
+ print(f"Movie processing failed: {e}")
323
+
324
+ raise RuntimeError("Could not get movie stream")
325
+
326
+ def series():
327
+ """Handle series streaming using lobster's exact approach"""
328
+ global selected_media, selected_subtitles
329
+
330
+ season = input("Enter season: ")
331
+ episode = input("Enter episode: ")
332
+
333
+ try:
334
+ season_num = int(season)
335
+ episode_num = int(episode)
336
+ except ValueError:
337
+ print("Invalid season or episode number")
338
+ raise RuntimeError("Invalid season or episode number")
339
+
340
+ # Extract media ID from URL
341
+ media_id_match = re.search(r'/tv/[^/]*-(\d+)', get_id.selected_url)
342
+ if not media_id_match:
343
+ raise RuntimeError("Could not extract media ID from URL")
344
+
345
+ media_id = media_id_match.group(1)
346
+ print(f"Debug: TV media ID: {media_id}")
347
+
348
+ # Step 1: Get seasons
349
+ seasons = get_tv_seasons(media_id)
350
+ if not seasons:
351
+ raise RuntimeError("Could not get seasons")
352
+
353
+ # Step 2: Find the target season (try exact match first, then positional)
354
+ target_season_id = None
355
+ for season_data in seasons:
356
+ season_title = season_data['title'].lower()
357
+ if f"season {season_num}" in season_title or f"s{season_num}" in season_title:
358
+ target_season_id = season_data['id']
359
+ break
360
+
361
+ # Fallback: assume seasons are in order
362
+ if not target_season_id and season_num <= len(seasons):
363
+ target_season_id = seasons[season_num - 1]['id']
364
+
365
+ if not target_season_id:
366
+ raise RuntimeError(f"Could not find season {season_num}")
367
+
368
+ print(f"Debug: Target season ID: {target_season_id}")
369
+
370
+ # Step 3: Get episodes for this season
371
+ episodes = get_season_episodes(target_season_id)
372
+ if not episodes:
373
+ raise RuntimeError(f"Could not get episodes for season {season_num}")
374
+
375
+ # Step 4: Find the target episode (assume episodes are in order)
376
+ if episode_num > len(episodes):
377
+ raise RuntimeError(f"Episode {episode_num} not found (only {len(episodes)} episodes available)")
378
+
379
+ target_episode = episodes[episode_num - 1] # Episodes are 1-indexed
380
+ print(f"Debug: Target episode: {target_episode['title']} (data-id: {target_episode['data_id']})")
381
+
382
+ # Step 5: Get episode servers and select Vidcloud
383
+ episode_id = get_episode_servers(target_episode['data_id'], "Vidcloud")
384
+ if not episode_id:
385
+ raise RuntimeError("Could not get episode server ID")
386
+
387
+ # Step 6: Get embed link
388
+ embed_link = get_embed_link(episode_id)
389
+ if not embed_link:
390
+ raise RuntimeError("Could not get embed link")
391
+
392
+ selected_media = {
393
+ 'file': embed_link,
394
+ 'label': f'S{season_num}E{episode_num} Stream',
395
+ 'type': 'embed'
396
+ }
397
+ selected_subtitles = []
398
+
399
+ def get_id(query: str):
400
+ """Search and select content"""
401
+ selected_url = search_content(query)
402
+ if not selected_url:
403
+ print("No content selected")
404
+ exit(0)
405
+
406
+ get_id.selected_url = selected_url
407
+
408
+ # Determine content type from URL
409
+ if '/movie/' in selected_url:
410
+ get_id.content_type = 'movie'
411
+ elif '/tv/' in selected_url:
412
+ get_id.content_type = 'series'
413
+ else:
414
+ get_id.content_type = 'unknown'
415
+
416
+ return selected_url
417
+
418
+ def poison():
419
+ """Choose content type"""
420
+ if hasattr(get_id, 'content_type') and get_id.content_type in ['movie', 'series']:
421
+ if get_id.content_type == 'movie':
422
+ movie()
423
+ elif get_id.content_type == 'series':
424
+ series()
425
+ else:
426
+ ch = fzf_prompt(["movie", "series"])
427
+ if ch == "movie":
428
+ movie()
429
+ elif ch == "series":
430
+ series()
431
+ else:
432
+ exit(0)
433
+ else:
434
+ ch = fzf_prompt(["movie", "series"])
435
+ if ch == "movie":
436
+ movie()
437
+ elif ch == "series":
438
+ series()
439
+ else:
440
+ exit(0)
441
+
442
+ def determine_path() -> str:
443
+ plt = platform.system()
444
+ if plt == "Windows":
445
+ return f"C://Users//{os.getenv('username')}//Downloads"
446
+ elif plt == "Linux":
447
+ return f"/home/{os.getlogin()}/Downloads"
448
+ elif plt == "Darwin":
449
+ return f"/Users/{os.getlogin()}/Downloads"
450
+ else:
451
+ print("[!] Make an issue for your OS.")
452
+ exit(0)
453
+
454
+ def dlData(path: str = determine_path()):
455
+ global selected_media
456
+ if selected_media:
457
+ decoded_url, subs = decode_url(selected_media['file'])
458
+ download(path, query, decoded_url, FLIXHQ_BASE_URL)
459
+ else:
460
+ print("No media selected for download")
461
+
462
+ def provideData():
463
+ global selected_media, selected_subtitles
464
+ if selected_media:
465
+ decoded_url, subs = decode_url(selected_media['file'])
466
+ play(decoded_url, query, FLIXHQ_BASE_URL, subs)
467
+ else:
468
+ print("No media selected for playback")
469
+
470
+ def init():
471
+ ch = fzf_prompt(["play", "download", "exit"])
472
+ if ch == "play":
473
+ provideData()
474
+ elif ch == "download":
475
+ dlData()
476
+ else:
477
+ exit(0)
478
+
479
+ if len(sys.argv) == 1:
480
+ query = input("Search: ")
481
+ if query == "":
482
+ print("ValueError: no query parameter provided")
483
+ exit(0)
484
+ else:
485
+ query = " ".join(sys.argv[1:])
486
+
487
+ get_id(query)
488
+ poison()
489
+ init()
490
+
@@ -0,0 +1 @@
1
+ __core__ = "1.6.4"
@@ -1,4 +1,4 @@
1
- import platform
1
+ import platform as plt
2
2
  import subprocess
3
3
 
4
4
  MPV_EXECUTABLE = "mpv"
@@ -7,7 +7,7 @@ IINA_EXECUTABLE = "iina"
7
7
 
8
8
  def play(file, name, referer, subtitles):
9
9
  try:
10
- if(platform.system() == "Linux" or platform.system() == "Windows"):
10
+ if(plt.system() == 'Linux' or plt.system() == 'Windows'):
11
11
  args = [
12
12
  MPV_EXECUTABLE,
13
13
  file,
@@ -20,7 +20,7 @@ def play(file, name, referer, subtitles):
20
20
 
21
21
  mpv_process.wait()
22
22
 
23
- elif(platform.system() == "Darwin"):
23
+ elif(plt.system() == 'Darwin'):
24
24
  args = [
25
25
  IINA_EXECUTABLE,
26
26
  "--no-stdin",
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "flix-cli"
7
- version = "1.6.2"
7
+ version = "1.6.4"
8
8
  description = "A high efficient, powerful and fast movie scraper."
9
9
  authors = ["DemonKingSwarn <rockingswarn@gmail.com>"]
10
10
  license = "GPLv3"
@@ -12,11 +12,11 @@ readme = "readme.txt"
12
12
 
13
13
  [tool.poetry.dependencies]
14
14
  python = "^3.10"
15
- httpx = "0.23.0"
15
+ httpx = "0.28.1"
16
16
  pycryptodomex = "3.14.1"
17
17
  beautifulsoup4 = "4.10.0"
18
18
  colorama = "0.4.5"
19
- krfzf-py = "^0.0.4"
19
+ krfzf-py = "0.0.4"
20
20
 
21
21
  [tool.poetry.dev-dependencies]
22
22
 
@@ -1,270 +0,0 @@
1
- import httpx
2
- from Cryptodome.Cipher import AES
3
- import re
4
- from fzf import fzf_prompt
5
-
6
- import base64
7
- import subprocess
8
- import platform
9
- import os
10
-
11
- from .utils.__player__ import play
12
- from .utils.__downloader__ import download
13
-
14
- try:
15
- import orjson as json
16
- except ImportError:
17
- import json
18
-
19
-
20
- #from colorama import Fore, Style
21
- import sys
22
-
23
-
24
- def pad(data):
25
- return data + chr(len(data) % 16) * (16 - len(data) % 16)
26
-
27
-
28
- def aes_encrypt(data: str, *, key, iv):
29
- return base64.b64encode(
30
- AES.new(key, AES.MODE_CBC, iv=iv).encrypt(pad(data).encode())
31
- )
32
-
33
-
34
- def aes_decrypt(data: str, *, key, iv):
35
- return (
36
- AES.new(key, AES.MODE_CBC, iv=iv)
37
- .decrypt(base64.b64decode(data))
38
- .strip(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10")
39
- )
40
-
41
-
42
- headers = {"User-Agent": "flix-cli/1.5.7"}
43
-
44
- client = httpx.Client(headers=headers, follow_redirects=True, timeout=None)
45
- #cyan = lambda a: f"{Fore.CYAN}{a}{Style.RESET_ALL}"
46
-
47
- SECRET = b"25742532592138496744665879883281"
48
- IV = b"9225679083961858"
49
-
50
- ENCRYPT_AJAX_ENDPOINT = "https://membed1.com/encrypt-ajax.php"
51
-
52
- DEFAULT_MEDIA_REFERER = "https://membed1.com"
53
-
54
- GDRIVE_PLAYER_M_ENDPOINT = "https://databasegdriveplayer.xyz/player.php"
55
- GDRIVE_PLAYER_S_ENDPOINT = "https://databasegdriveplayer.xyz/player.php?type=series"
56
-
57
- CONTENT_ID_REGEX = re.compile(r"streaming\.php\?id=([^&?/#]+)")
58
-
59
- #r = client.get(GDRIVE_PLAYER_ENDPOINT)
60
-
61
- #link = "https://" + re.findall(r'<a href="(.*?)"',r.text)[2]
62
-
63
- #yarl query to get id from link
64
- #id = yarl.URL(link).query.get('id')
65
-
66
- def movie():
67
-
68
- content_id = CONTENT_ID_REGEX.search(
69
- client.get(
70
- GDRIVE_PLAYER_M_ENDPOINT,
71
- params={
72
- "imdb": get_id.imdb_ids[get_id.c-1],
73
- },
74
- ).text
75
- ).group(1)
76
-
77
-
78
- content = json.loads(
79
- aes_decrypt(
80
- json.loads(
81
- httpx.get(
82
- ENCRYPT_AJAX_ENDPOINT,
83
- params={"id": aes_encrypt(content_id, key=SECRET, iv=IV).decode()},
84
- headers={"x-requested-with": "XMLHttpRequest"},
85
- ).text
86
- )["data"],
87
- key=SECRET,
88
- iv=IV,
89
- )
90
- )
91
-
92
- #print(content)
93
-
94
- movie.subtitles = (_.get("file") for _ in content.get("track", {}).get("tracks", []))
95
-
96
- media = (content.get("source", []) or []) + (content.get("source_bk", []) or [])
97
-
98
- if not media:
99
- raise RuntimeError("Could not find any media for playback.")
100
-
101
- if len(media) > 2:
102
- for content_index, source in enumerate(media):
103
- if(content_index+1 != len(media)):
104
- print(f" > {content_index+1} / {source['label']} / {source['type']}")
105
- try:
106
- while not (
107
- (user_selection := input("Take it or leave it, index: ")).isdigit()
108
- and (parsed_us := int(user_selection)-1) in range(content_index)
109
- ):
110
- print("Nice joke. Now you have to TRY AGAIN!!!")
111
- movie.selected = media[parsed_us]
112
- except KeyboardInterrupt:
113
- exit(0)
114
- else:
115
- movie.selected = media[0]
116
-
117
- def series():
118
- season = input("Enter season: ")
119
- episode = input("Enter episode: ")
120
-
121
-
122
- content_id = CONTENT_ID_REGEX.search(
123
- client.get(
124
- GDRIVE_PLAYER_S_ENDPOINT,
125
- params={
126
- "imdb": get_id.imdb_ids[get_id.c-1],
127
- "season": season,
128
- "episode": episode,
129
- },
130
- ).text
131
- ).group(1)
132
-
133
- content = json.loads(
134
- aes_decrypt(
135
- json.loads(
136
- httpx.get(
137
- ENCRYPT_AJAX_ENDPOINT,
138
- params={"id": aes_encrypt(content_id, key=SECRET, iv=IV).decode()},
139
- headers={"x-requested-with": "XMLHttpRequest"},
140
- ).text
141
- )["data"],
142
- key=SECRET,
143
- iv=IV,
144
- )
145
- )
146
-
147
-
148
- series.subtitles = (_.get("file") for _ in content.get("track", {}).get("tracks", []))
149
-
150
- media = (content.get("source", []) or []) + (content.get("source_bk", []) or [])
151
-
152
- if not media:
153
- raise RuntimeError("Could not find any media for playback.")
154
-
155
- if len(media) > 2:
156
- for content_index, source in enumerate(media):
157
- if(content_index+1 != len(media)):
158
- print(f" > {content_index+1} / {source['label']} / {source['type']}")
159
- try:
160
- while not (
161
- (user_selection := input("Take it or leave it, index: ")).isdigit()
162
- and (parsed_us := int(user_selection)-1) in range(content_index)
163
- ):
164
- print("Nice joke. Now you have to TRY AGAIN!!!")
165
- series.selected = media[parsed_us]
166
- except KeyboardInterrupt:
167
- exit(0)
168
- else:
169
- series.selected = media[0]
170
-
171
-
172
- def get_id(query: str):
173
- query = query.replace(" ","_")
174
-
175
- url = f"https://v2.sg.media-imdb.com/suggestion/{query[0]}/{query}.json"
176
-
177
- r=client.get(url)
178
-
179
- get_id.imdb_ids = [i["id"] for i in r.json().get("d")]
180
- names = [i["l"] for i in r.json().get("d")]
181
-
182
- #print("[*]Results: ")
183
- shows = []
184
- for i in range(len(names)):
185
- #print(f"{i+1}. {names[i]}")
186
- #fzf_prompt(f"{i+1}. {names[i]}")
187
- shows.append(f"{i+1}. {names[i]}")
188
-
189
- uwu = fzf_prompt(shows)
190
- #get_id.c = int(input(cyan("[*]Enter number: ")))
191
- get_id.c = int(uwu[0])
192
- print(get_id.c)
193
- return get_id.imdb_ids[get_id.c-1]
194
-
195
-
196
- if len(sys.argv) == 1:
197
- query = input("Search: ")
198
- if query == "":
199
- print("ValueError: no query parameter provided")
200
- exit(0)
201
- else:
202
- query = " ".join(sys.argv[1:])
203
-
204
- get_id(query)
205
-
206
- def poison():
207
- #print("\nChoose your poison!!!")
208
- ch = fzf_prompt(["movie", "series"])
209
-
210
- #ch = input(": ")
211
-
212
- if ch == "movie":
213
- movie()
214
- elif ch == "series":
215
- series()
216
- else:
217
- exit(0)
218
-
219
- poison()
220
-
221
- def determine_path() -> str:
222
-
223
- plt = platform.system()
224
-
225
- if plt == "Windows":
226
- return f"C://Users//{os.getenv('username')}//Downloads"
227
-
228
- elif (plt == "Linux"):
229
- return f"/home/{os.getlogin()}/Downloads"
230
-
231
- elif (plt == "Darwin"):
232
- return f"/Users/{os.getlogin()}/Downloads"
233
-
234
- else:
235
- print("[!] Make an issue for your OS.")
236
- exit(0)
237
-
238
- def dlData(path: str = determine_path()):
239
- try:
240
- dl(movie.selected, path)
241
- except Exception as e:
242
- dl(series.selected, path)
243
-
244
- def dl(selected, path):
245
- download(path, query, selected['file'], DEFAULT_MEDIA_REFERER)
246
-
247
- def provideData():
248
- try:
249
- launchPlayer(movie.selected, movie.subtitles)
250
- except Exception as e:
251
- launchPlayer(series.selected, series.subtitles)
252
-
253
- def launchPlayer(selected, subtitles):
254
- play(selected['file'], query, DEFAULT_MEDIA_REFERER, subtitles)
255
-
256
-
257
- def init():
258
- ch = fzf_prompt(["play", "download", "exit"])
259
-
260
- if ch == "play":
261
- provideData()
262
- elif ch == "download":
263
- dlData()
264
- else:
265
- exit(0)
266
-
267
-
268
- #main()
269
-
270
- init()
@@ -1 +0,0 @@
1
- __core__ = "1.6.2"
flix_cli-1.6.2/setup.py DELETED
@@ -1,38 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- from setuptools import setup
3
-
4
- packages = \
5
- ['flix_cli', 'flix_cli.core', 'flix_cli.core.utils']
6
-
7
- package_data = \
8
- {'': ['*']}
9
-
10
- install_requires = \
11
- ['beautifulsoup4==4.10.0',
12
- 'colorama==0.4.5',
13
- 'httpx==0.23.0',
14
- 'krfzf-py>=0.0.4,<0.0.5',
15
- 'pycryptodomex==3.14.1']
16
-
17
- entry_points = \
18
- {'console_scripts': ['flix-cli = flix_cli.__main__:__flixcli__']}
19
-
20
- setup_kwargs = {
21
- 'name': 'flix-cli',
22
- 'version': '1.6.2',
23
- 'description': 'A high efficient, powerful and fast movie scraper.',
24
- 'long_description': '',
25
- 'author': 'DemonKingSwarn',
26
- 'author_email': 'rockingswarn@gmail.com',
27
- 'maintainer': 'None',
28
- 'maintainer_email': 'None',
29
- 'url': 'None',
30
- 'packages': packages,
31
- 'package_data': package_data,
32
- 'install_requires': install_requires,
33
- 'entry_points': entry_points,
34
- 'python_requires': '>=3.10,<4.0',
35
- }
36
-
37
-
38
- setup(**setup_kwargs)
File without changes
File without changes
File without changes
File without changes