flix-cli 1.6.4__tar.gz → 1.6.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: flix-cli
3
- Version: 1.6.4
3
+ Version: 1.6.6
4
4
  Summary: A high efficient, powerful and fast movie scraper.
5
5
  License: GPLv3
6
6
  Author: DemonKingSwarn
@@ -13,10 +13,9 @@ Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
14
  Classifier: Programming Language :: Python :: 3.13
15
15
  Requires-Dist: beautifulsoup4 (==4.10.0)
16
- Requires-Dist: colorama (==0.4.5)
17
16
  Requires-Dist: httpx (==0.28.1)
18
17
  Requires-Dist: krfzf-py (==0.0.4)
19
- Requires-Dist: pycryptodomex (==3.14.1)
18
+ Requires-Dist: regex (==2025.9.1)
20
19
  Description-Content-Type: text/plain
21
20
 
22
21
 
@@ -19,6 +19,7 @@ except ImportError:
19
19
  import sys
20
20
  from urllib.parse import urljoin, quote
21
21
  import time
22
+
22
23
  from bs4 import BeautifulSoup
23
24
 
24
25
  headers = {
@@ -62,7 +63,6 @@ def decode_url(url: str):
62
63
  subtitles.append(track['file'])
63
64
  return video_link, subtitles
64
65
 
65
- # Try other common fields
66
66
  for key in ['link', 'url', 'file']:
67
67
  if key in data and data[key]:
68
68
  return data[key], []
@@ -157,7 +157,6 @@ def get_tv_seasons(media_id: str):
157
157
  print(f"Debug: Seasons response status: {response.status_code}")
158
158
 
159
159
  if response.status_code == 200:
160
- # Parse like lobster: extract season title and ID from href
161
160
  season_pattern = re.compile(r'href="[^"]*-(\d+)"[^>]*>([^<]*)</a>')
162
161
  matches = season_pattern.findall(response.text)
163
162
 
@@ -187,8 +186,6 @@ def get_season_episodes(season_id: str):
187
186
  print(f"Debug: Episodes response status: {response.status_code}")
188
187
 
189
188
  if response.status_code == 200:
190
- # Parse like lobster: look for data-id and title in nav-item elements
191
- # First, split by class="nav-item" like lobster does
192
189
  content = response.text.replace('\n', '').replace('class="nav-item"', '\nclass="nav-item"')
193
190
 
194
191
  episode_pattern = re.compile(r'data-id="(\d+)"[^>]*title="([^"]*)"')
@@ -220,7 +217,6 @@ def get_episode_servers(data_id: str, preferred_provider: str = "Vidcloud"):
220
217
  print(f"Debug: Servers response status: {response.status_code}")
221
218
 
222
219
  if response.status_code == 200:
223
- # Parse like lobster: look for data-id and title in nav-item elements
224
220
  content = response.text.replace('\n', '').replace('class="nav-item"', '\nclass="nav-item"')
225
221
 
226
222
  server_pattern = re.compile(r'data-id="(\d+)"[^>]*title="([^"]*)"')
@@ -234,13 +230,11 @@ def get_episode_servers(data_id: str, preferred_provider: str = "Vidcloud"):
234
230
  })
235
231
  print(f"Debug: Found server: {server_name.strip()} (ID: {server_id})")
236
232
 
237
- # Find preferred provider like lobster does
238
233
  for server in servers:
239
234
  if preferred_provider.lower() in server['name'].lower():
240
235
  print(f"Debug: Selected {preferred_provider} server: {server['id']}")
241
236
  return server['id']
242
237
 
243
- # Fallback to first server
244
238
  if servers:
245
239
  print(f"Debug: Using fallback server: {servers[0]['id']}")
246
240
  return servers[0]['id']
@@ -261,7 +255,6 @@ def get_embed_link(episode_id: str):
261
255
  print(f"Debug: Sources response status: {response.status_code}")
262
256
 
263
257
  if response.status_code == 200:
264
- # Extract like lobster: look for "link" in JSON response
265
258
  link_match = re.search(r'"link":"([^"]*)"', response.text)
266
259
  if link_match:
267
260
  embed_link = link_match.group(1)
@@ -278,7 +271,6 @@ def movie():
278
271
  """Handle movie streaming"""
279
272
  global selected_media, selected_subtitles
280
273
 
281
- # Extract media ID from URL
282
274
  media_id_match = re.search(r'/movie/[^/]*-(\d+)', get_id.selected_url)
283
275
  if not media_id_match:
284
276
  raise RuntimeError("Could not extract media ID from URL")
@@ -286,22 +278,18 @@ def movie():
286
278
  media_id = media_id_match.group(1)
287
279
  print(f"Debug: Movie media ID: {media_id}")
288
280
 
289
- # For movies, use the movie/episodes endpoint like lobster
290
281
  try:
291
282
  movie_episodes_url = f"{FLIXHQ_AJAX_URL}/movie/episodes/{media_id}"
292
283
  response = client.get(movie_episodes_url)
293
284
 
294
285
  if response.status_code == 200:
295
- # Extract like lobster: find href with provider name
296
286
  content = response.text.replace('\n', '').replace('class="nav-item"', '\nclass="nav-item"')
297
287
 
298
- # Look for Vidcloud provider first
299
288
  provider_pattern = re.compile(r'href="([^"]*)"[^>]*title="Vidcloud"')
300
289
  match = provider_pattern.search(content)
301
290
 
302
291
  if match:
303
292
  movie_page_url = FLIXHQ_BASE_URL + match.group(1)
304
- # Extract episode ID like lobster: -(\d+).(\d+)$ -> take the second number
305
293
  episode_match = re.search(r'-(\d+)\.(\d+)$', movie_page_url)
306
294
  if episode_match:
307
295
  episode_id = episode_match.group(2)
@@ -324,7 +312,7 @@ def movie():
324
312
  raise RuntimeError("Could not get movie stream")
325
313
 
326
314
  def series():
327
- """Handle series streaming using lobster's exact approach"""
315
+ """Handle series streaming"""
328
316
  global selected_media, selected_subtitles
329
317
 
330
318
  season = input("Enter season: ")
@@ -337,7 +325,6 @@ def series():
337
325
  print("Invalid season or episode number")
338
326
  raise RuntimeError("Invalid season or episode number")
339
327
 
340
- # Extract media ID from URL
341
328
  media_id_match = re.search(r'/tv/[^/]*-(\d+)', get_id.selected_url)
342
329
  if not media_id_match:
343
330
  raise RuntimeError("Could not extract media ID from URL")
@@ -345,12 +332,10 @@ def series():
345
332
  media_id = media_id_match.group(1)
346
333
  print(f"Debug: TV media ID: {media_id}")
347
334
 
348
- # Step 1: Get seasons
349
335
  seasons = get_tv_seasons(media_id)
350
336
  if not seasons:
351
337
  raise RuntimeError("Could not get seasons")
352
338
 
353
- # Step 2: Find the target season (try exact match first, then positional)
354
339
  target_season_id = None
355
340
  for season_data in seasons:
356
341
  season_title = season_data['title'].lower()
@@ -358,7 +343,6 @@ def series():
358
343
  target_season_id = season_data['id']
359
344
  break
360
345
 
361
- # Fallback: assume seasons are in order
362
346
  if not target_season_id and season_num <= len(seasons):
363
347
  target_season_id = seasons[season_num - 1]['id']
364
348
 
@@ -367,24 +351,20 @@ def series():
367
351
 
368
352
  print(f"Debug: Target season ID: {target_season_id}")
369
353
 
370
- # Step 3: Get episodes for this season
371
354
  episodes = get_season_episodes(target_season_id)
372
355
  if not episodes:
373
356
  raise RuntimeError(f"Could not get episodes for season {season_num}")
374
357
 
375
- # Step 4: Find the target episode (assume episodes are in order)
376
358
  if episode_num > len(episodes):
377
359
  raise RuntimeError(f"Episode {episode_num} not found (only {len(episodes)} episodes available)")
378
360
 
379
- target_episode = episodes[episode_num - 1] # Episodes are 1-indexed
361
+ target_episode = episodes[episode_num - 1]
380
362
  print(f"Debug: Target episode: {target_episode['title']} (data-id: {target_episode['data_id']})")
381
363
 
382
- # Step 5: Get episode servers and select Vidcloud
383
364
  episode_id = get_episode_servers(target_episode['data_id'], "Vidcloud")
384
365
  if not episode_id:
385
366
  raise RuntimeError("Could not get episode server ID")
386
367
 
387
- # Step 6: Get embed link
388
368
  embed_link = get_embed_link(episode_id)
389
369
  if not embed_link:
390
370
  raise RuntimeError("Could not get embed link")
@@ -405,7 +385,6 @@ def get_id(query: str):
405
385
 
406
386
  get_id.selected_url = selected_url
407
387
 
408
- # Determine content type from URL
409
388
  if '/movie/' in selected_url:
410
389
  get_id.content_type = 'movie'
411
390
  elif '/tv/' in selected_url:
@@ -443,7 +422,7 @@ def determine_path() -> str:
443
422
  plt = platform.system()
444
423
  if plt == "Windows":
445
424
  return f"C://Users//{os.getenv('username')}//Downloads"
446
- elif plt == "Linux":
425
+ elif plt == "Linux" or plt == "FreeBSD":
447
426
  return f"/home/{os.getlogin()}/Downloads"
448
427
  elif plt == "Darwin":
449
428
  return f"/Users/{os.getlogin()}/Downloads"
@@ -0,0 +1 @@
1
+ __core__ = "1.6.6"
@@ -7,7 +7,7 @@ IINA_EXECUTABLE = "iina"
7
7
 
8
8
  def play(file, name, referer, subtitles):
9
9
  try:
10
- if(plt.system() == 'Linux' or plt.system() == 'Windows'):
10
+ if(plt.system() == 'Linux' or plt.system() == 'Windows' or plt.system() == 'FreeBSD'):
11
11
  args = [
12
12
  MPV_EXECUTABLE,
13
13
  file,
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
4
4
 
5
5
  [tool.poetry]
6
6
  name = "flix-cli"
7
- version = "1.6.4"
7
+ version = "1.6.6"
8
8
  description = "A high efficient, powerful and fast movie scraper."
9
9
  authors = ["DemonKingSwarn <rockingswarn@gmail.com>"]
10
10
  license = "GPLv3"
@@ -13,9 +13,8 @@ readme = "readme.txt"
13
13
  [tool.poetry.dependencies]
14
14
  python = "^3.10"
15
15
  httpx = "0.28.1"
16
- pycryptodomex = "3.14.1"
17
16
  beautifulsoup4 = "4.10.0"
18
- colorama = "0.4.5"
17
+ regex = "2025.9.1"
19
18
  krfzf-py = "0.0.4"
20
19
 
21
20
  [tool.poetry.dev-dependencies]
@@ -1 +0,0 @@
1
- __core__ = "1.6.4"
File without changes
File without changes
File without changes
File without changes