quasarr 2.4.8__py3-none-any.whl → 2.4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. quasarr/__init__.py +134 -70
  2. quasarr/api/__init__.py +40 -31
  3. quasarr/api/arr/__init__.py +116 -108
  4. quasarr/api/captcha/__init__.py +262 -137
  5. quasarr/api/config/__init__.py +76 -46
  6. quasarr/api/packages/__init__.py +138 -102
  7. quasarr/api/sponsors_helper/__init__.py +29 -16
  8. quasarr/api/statistics/__init__.py +19 -19
  9. quasarr/downloads/__init__.py +165 -72
  10. quasarr/downloads/linkcrypters/al.py +35 -18
  11. quasarr/downloads/linkcrypters/filecrypt.py +107 -52
  12. quasarr/downloads/linkcrypters/hide.py +5 -6
  13. quasarr/downloads/packages/__init__.py +342 -177
  14. quasarr/downloads/sources/al.py +191 -100
  15. quasarr/downloads/sources/by.py +31 -13
  16. quasarr/downloads/sources/dd.py +27 -14
  17. quasarr/downloads/sources/dj.py +1 -3
  18. quasarr/downloads/sources/dl.py +126 -71
  19. quasarr/downloads/sources/dt.py +11 -5
  20. quasarr/downloads/sources/dw.py +28 -14
  21. quasarr/downloads/sources/he.py +32 -24
  22. quasarr/downloads/sources/mb.py +19 -9
  23. quasarr/downloads/sources/nk.py +14 -10
  24. quasarr/downloads/sources/nx.py +8 -18
  25. quasarr/downloads/sources/sf.py +45 -20
  26. quasarr/downloads/sources/sj.py +1 -3
  27. quasarr/downloads/sources/sl.py +9 -5
  28. quasarr/downloads/sources/wd.py +32 -12
  29. quasarr/downloads/sources/wx.py +35 -21
  30. quasarr/providers/auth.py +42 -37
  31. quasarr/providers/cloudflare.py +28 -30
  32. quasarr/providers/hostname_issues.py +2 -1
  33. quasarr/providers/html_images.py +2 -2
  34. quasarr/providers/html_templates.py +22 -14
  35. quasarr/providers/imdb_metadata.py +149 -80
  36. quasarr/providers/jd_cache.py +131 -39
  37. quasarr/providers/log.py +1 -1
  38. quasarr/providers/myjd_api.py +260 -196
  39. quasarr/providers/notifications.py +53 -41
  40. quasarr/providers/obfuscated.py +9 -4
  41. quasarr/providers/sessions/al.py +71 -55
  42. quasarr/providers/sessions/dd.py +21 -14
  43. quasarr/providers/sessions/dl.py +30 -19
  44. quasarr/providers/sessions/nx.py +23 -14
  45. quasarr/providers/shared_state.py +292 -141
  46. quasarr/providers/statistics.py +75 -43
  47. quasarr/providers/utils.py +33 -27
  48. quasarr/providers/version.py +45 -14
  49. quasarr/providers/web_server.py +10 -5
  50. quasarr/search/__init__.py +30 -18
  51. quasarr/search/sources/al.py +124 -73
  52. quasarr/search/sources/by.py +110 -59
  53. quasarr/search/sources/dd.py +57 -35
  54. quasarr/search/sources/dj.py +69 -48
  55. quasarr/search/sources/dl.py +159 -100
  56. quasarr/search/sources/dt.py +110 -74
  57. quasarr/search/sources/dw.py +121 -61
  58. quasarr/search/sources/fx.py +108 -62
  59. quasarr/search/sources/he.py +78 -49
  60. quasarr/search/sources/mb.py +96 -48
  61. quasarr/search/sources/nk.py +80 -50
  62. quasarr/search/sources/nx.py +91 -62
  63. quasarr/search/sources/sf.py +171 -106
  64. quasarr/search/sources/sj.py +69 -48
  65. quasarr/search/sources/sl.py +115 -71
  66. quasarr/search/sources/wd.py +67 -44
  67. quasarr/search/sources/wx.py +188 -123
  68. quasarr/storage/config.py +65 -52
  69. quasarr/storage/setup.py +238 -140
  70. quasarr/storage/sqlite_database.py +10 -4
  71. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/METADATA +2 -2
  72. quasarr-2.4.9.dist-info/RECORD +81 -0
  73. quasarr-2.4.8.dist-info/RECORD +0 -81
  74. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/WHEEL +0 -0
  75. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/entry_points.txt +0 -0
  76. {quasarr-2.4.8.dist-info → quasarr-2.4.9.dist-info}/licenses/LICENSE +0 -0
@@ -24,7 +24,7 @@ from quasarr.downloads.sources.sj import get_sj_download_links
24
24
  from quasarr.downloads.sources.sl import get_sl_download_links
25
25
  from quasarr.downloads.sources.wd import get_wd_download_links
26
26
  from quasarr.downloads.sources.wx import get_wx_download_links
27
- from quasarr.providers.hostname_issues import mark_hostname_issue, clear_hostname_issue
27
+ from quasarr.providers.hostname_issues import clear_hostname_issue, mark_hostname_issue
28
28
  from quasarr.providers.log import info
29
29
  from quasarr.providers.notifications import send_discord_message
30
30
  from quasarr.providers.statistics import StatsHelper
@@ -36,35 +36,35 @@ from quasarr.providers.utils import filter_offline_links
36
36
 
37
37
  # Patterns match crypter name only - TLDs may change
38
38
  AUTO_DECRYPT_PATTERNS = {
39
- 'hide': re.compile(r'hide\.', re.IGNORECASE),
39
+ "hide": re.compile(r"hide\.", re.IGNORECASE),
40
40
  }
41
41
 
42
42
  PROTECTED_PATTERNS = {
43
- 'filecrypt': re.compile(r'filecrypt\.', re.IGNORECASE),
44
- 'tolink': re.compile(r'tolink\.', re.IGNORECASE),
45
- 'keeplinks': re.compile(r'keeplinks\.', re.IGNORECASE),
43
+ "filecrypt": re.compile(r"filecrypt\.", re.IGNORECASE),
44
+ "tolink": re.compile(r"tolink\.", re.IGNORECASE),
45
+ "keeplinks": re.compile(r"keeplinks\.", re.IGNORECASE),
46
46
  }
47
47
 
48
48
  # Source key -> getter function mapping
49
49
  # All getters have signature: (shared_state, url, mirror, title, password)
50
50
  # AL uses password as release_id, others ignore it
51
51
  SOURCE_GETTERS = {
52
- 'al': get_al_download_links,
53
- 'by': get_by_download_links,
54
- 'dd': get_dd_download_links,
55
- 'dj': get_dj_download_links,
56
- 'dl': get_dl_download_links,
57
- 'dt': get_dt_download_links,
58
- 'dw': get_dw_download_links,
59
- 'he': get_he_download_links,
60
- 'mb': get_mb_download_links,
61
- 'nk': get_nk_download_links,
62
- 'nx': get_nx_download_links,
63
- 'sf': get_sf_download_links,
64
- 'sj': get_sj_download_links,
65
- 'sl': get_sl_download_links,
66
- 'wd': get_wd_download_links,
67
- 'wx': get_wx_download_links,
52
+ "al": get_al_download_links,
53
+ "by": get_by_download_links,
54
+ "dd": get_dd_download_links,
55
+ "dj": get_dj_download_links,
56
+ "dl": get_dl_download_links,
57
+ "dt": get_dt_download_links,
58
+ "dw": get_dw_download_links,
59
+ "he": get_he_download_links,
60
+ "mb": get_mb_download_links,
61
+ "nk": get_nk_download_links,
62
+ "nx": get_nx_download_links,
63
+ "sf": get_sf_download_links,
64
+ "sj": get_sj_download_links,
65
+ "sl": get_sl_download_links,
66
+ "wd": get_wd_download_links,
67
+ "wx": get_wx_download_links,
68
68
  }
69
69
 
70
70
 
@@ -72,6 +72,7 @@ SOURCE_GETTERS = {
72
72
  # DETERMINISTIC PACKAGE ID GENERATION
73
73
  # =============================================================================
74
74
 
75
+
75
76
  def extract_client_type(request_from):
76
77
  """
77
78
  Extract client type from User-Agent, stripping version info.
@@ -85,15 +86,15 @@ def extract_client_type(request_from):
85
86
  return "unknown"
86
87
 
87
88
  # Extract the client name before the version (first part before '/')
88
- client = request_from.split('/')[0].lower().strip()
89
+ client = request_from.split("/")[0].lower().strip()
89
90
 
90
91
  # Normalize known clients
91
- if 'radarr' in client:
92
- return 'radarr'
93
- elif 'sonarr' in client:
94
- return 'sonarr'
95
- elif 'lazylibrarian' in client:
96
- return 'lazylibrarian'
92
+ if "radarr" in client:
93
+ return "radarr"
94
+ elif "sonarr" in client:
95
+ return "sonarr"
96
+ elif "lazylibrarian" in client:
97
+ return "lazylibrarian"
97
98
 
98
99
  return client
99
100
 
@@ -119,16 +120,12 @@ def generate_deterministic_package_id(title, source_key, client_type):
119
120
  normalized_client = client_type.lower().strip() if client_type else "unknown"
120
121
 
121
122
  # Category mapping (for compatibility with existing package ID format)
122
- category_map = {
123
- "lazylibrarian": "docs",
124
- "radarr": "movies",
125
- "sonarr": "tv"
126
- }
123
+ category_map = {"lazylibrarian": "docs", "radarr": "movies", "sonarr": "tv"}
127
124
  category = category_map.get(normalized_client, "tv")
128
125
 
129
126
  # Create deterministic hash from combination using SHA256
130
127
  hash_input = f"{normalized_title}|{normalized_source}|{normalized_client}"
131
- hash_bytes = hashlib.sha256(hash_input.encode('utf-8')).hexdigest()
128
+ hash_bytes = hashlib.sha256(hash_input.encode("utf-8")).hexdigest()
132
129
 
133
130
  # Use first 32 characters for good collision resistance (128-bit)
134
131
  return f"Quasarr_{category}_{hash_bytes[:32]}"
@@ -138,14 +135,15 @@ def generate_deterministic_package_id(title, source_key, client_type):
138
135
  # LINK CLASSIFICATION
139
136
  # =============================================================================
140
137
 
138
+
141
139
  def detect_crypter(url):
142
140
  """Returns (crypter_name, 'auto'|'protected') or (None, None)."""
143
141
  for name, pattern in AUTO_DECRYPT_PATTERNS.items():
144
142
  if pattern.search(url):
145
- return name, 'auto'
143
+ return name, "auto"
146
144
  for name, pattern in PROTECTED_PATTERNS.items():
147
145
  if pattern.search(url):
148
- return name, 'protected'
146
+ return name, "protected"
149
147
  return None, None
150
148
 
151
149
 
@@ -163,23 +161,23 @@ def classify_links(links, shared_state):
163
161
  Direct = anything that's not a known crypter or junkies link.
164
162
  Mirror names from source are preserved.
165
163
  """
166
- classified = {'direct': [], 'auto': [], 'protected': []}
164
+ classified = {"direct": [], "auto": [], "protected": []}
167
165
 
168
166
  for link in links:
169
167
  url = link[0]
170
168
 
171
169
  if is_junkies_link(url, shared_state):
172
- classified['protected'].append(link)
170
+ classified["protected"].append(link)
173
171
  continue
174
172
 
175
173
  crypter, crypter_type = detect_crypter(url)
176
- if crypter_type == 'auto':
177
- classified['auto'].append(link)
178
- elif crypter_type == 'protected':
179
- classified['protected'].append(link)
174
+ if crypter_type == "auto":
175
+ classified["auto"].append(link)
176
+ elif crypter_type == "protected":
177
+ classified["protected"].append(link)
180
178
  else:
181
179
  # Not a known crypter = direct hoster link
182
- classified['direct'].append(link)
180
+ classified["direct"].append(link)
183
181
 
184
182
  return classified
185
183
 
@@ -188,6 +186,7 @@ def classify_links(links, shared_state):
188
186
  # LINK PROCESSING
189
187
  # =============================================================================
190
188
 
189
+
191
190
  def handle_direct_links(shared_state, links, title, password, package_id):
192
191
  """Send direct hoster links to JDownloader."""
193
192
  urls = [link[0] for link in links]
@@ -196,7 +195,10 @@ def handle_direct_links(shared_state, links, title, password, package_id):
196
195
  if shared_state.download_package(urls, title, password, package_id):
197
196
  StatsHelper(shared_state).increment_package_with_links(urls)
198
197
  return {"success": True}
199
- return {"success": False, "reason": f'Failed to add {len(urls)} links to linkgrabber'}
198
+ return {
199
+ "success": False,
200
+ "reason": f"Failed to add {len(urls)} links to linkgrabber",
201
+ }
200
202
 
201
203
 
202
204
  def handle_auto_decrypt_links(shared_state, links, title, password, package_id):
@@ -218,25 +220,50 @@ def handle_auto_decrypt_links(shared_state, links, title, password, package_id):
218
220
  return {"success": False, "reason": "Failed to add decrypted links to linkgrabber"}
219
221
 
220
222
 
221
- def store_protected_links(shared_state, links, title, password, package_id, size_mb=None, original_url=None):
223
+ def store_protected_links(
224
+ shared_state, links, title, password, package_id, size_mb=None, original_url=None
225
+ ):
222
226
  """Store protected links for CAPTCHA UI."""
223
- blob_data = {"title": title, "links": links, "password": password, "size_mb": size_mb}
227
+ blob_data = {
228
+ "title": title,
229
+ "links": links,
230
+ "password": password,
231
+ "size_mb": size_mb,
232
+ }
224
233
  if original_url:
225
234
  blob_data["original_url"] = original_url
226
235
 
227
- shared_state.values["database"]("protected").update_store(package_id, json.dumps(blob_data))
228
- info(f'CAPTCHA-Solution required for "{title}" at: "{shared_state.values["external_address"]}/captcha"')
236
+ shared_state.values["database"]("protected").update_store(
237
+ package_id, json.dumps(blob_data)
238
+ )
239
+ info(
240
+ f'CAPTCHA-Solution required for "{title}" at: "{shared_state.values["external_address"]}/captcha"'
241
+ )
229
242
  return {"success": True}
230
243
 
231
244
 
232
- def process_links(shared_state, source_result, title, password, package_id, imdb_id, source_url, size_mb, label):
245
+ def process_links(
246
+ shared_state,
247
+ source_result,
248
+ title,
249
+ password,
250
+ package_id,
251
+ imdb_id,
252
+ source_url,
253
+ size_mb,
254
+ label,
255
+ ):
233
256
  """
234
257
  Central link processor with priority: direct → auto-decrypt → protected.
235
258
  If ANY direct links exist, use them and ignore crypted fallbacks.
236
259
  """
237
260
  if not source_result:
238
- return fail(title, package_id, shared_state,
239
- reason=f'Source returned no data for "{title}" on {label} - "{source_url}"')
261
+ return fail(
262
+ title,
263
+ package_id,
264
+ shared_state,
265
+ reason=f'Source returned no data for "{title}" on {label} - "{source_url}"',
266
+ )
240
267
 
241
268
  links = source_result.get("links", [])
242
269
  password = source_result.get("password") or password
@@ -244,59 +271,105 @@ def process_links(shared_state, source_result, title, password, package_id, imdb
244
271
  title = source_result.get("title") or title
245
272
 
246
273
  if not links:
247
- return fail(title, package_id, shared_state,
248
- reason=f'No links found for "{title}" on {label} - "{source_url}"')
274
+ return fail(
275
+ title,
276
+ package_id,
277
+ shared_state,
278
+ reason=f'No links found for "{title}" on {label} - "{source_url}"',
279
+ )
249
280
 
250
281
  # Filter out 404 links
251
282
  valid_links = [link for link in links if "/404.html" not in link[0]]
252
283
  if not valid_links:
253
- return fail(title, package_id, shared_state,
254
- reason=f'All links are offline or IP is banned for "{title}" on {label} - "{source_url}"')
284
+ return fail(
285
+ title,
286
+ package_id,
287
+ shared_state,
288
+ reason=f'All links are offline or IP is banned for "{title}" on {label} - "{source_url}"',
289
+ )
255
290
  links = valid_links
256
291
 
257
292
  # Filter out verifiably offline links
258
293
  links = filter_offline_links(links, shared_state=shared_state, log_func=info)
259
294
  if not links:
260
- return fail(title, package_id, shared_state,
261
- reason=f'All verifiable links are offline for "{title}" on {label} - "{source_url}"')
295
+ return fail(
296
+ title,
297
+ package_id,
298
+ shared_state,
299
+ reason=f'All verifiable links are offline for "{title}" on {label} - "{source_url}"',
300
+ )
262
301
 
263
302
  classified = classify_links(links, shared_state)
264
303
 
265
304
  # PRIORITY 1: Direct hoster links
266
- if classified['direct']:
305
+ if classified["direct"]:
267
306
  info(f"Found {len(classified['direct'])} direct hoster links for {title}")
268
- send_discord_message(shared_state, title=title, case="unprotected", imdb_id=imdb_id, source=source_url)
269
- result = handle_direct_links(shared_state, classified['direct'], title, password, package_id)
307
+ send_discord_message(
308
+ shared_state,
309
+ title=title,
310
+ case="unprotected",
311
+ imdb_id=imdb_id,
312
+ source=source_url,
313
+ )
314
+ result = handle_direct_links(
315
+ shared_state, classified["direct"], title, password, package_id
316
+ )
270
317
  if result["success"]:
271
318
  return {"success": True, "title": title}
272
319
  return fail(title, package_id, shared_state, reason=result.get("reason"))
273
320
 
274
321
  # PRIORITY 2: Auto-decryptable (hide.cx)
275
- if classified['auto']:
322
+ if classified["auto"]:
276
323
  info(f"Found {len(classified['auto'])} auto-decryptable links for {title}")
277
- result = handle_auto_decrypt_links(shared_state, classified['auto'], title, password, package_id)
324
+ result = handle_auto_decrypt_links(
325
+ shared_state, classified["auto"], title, password, package_id
326
+ )
278
327
  if result["success"]:
279
- send_discord_message(shared_state, title=title, case="unprotected", imdb_id=imdb_id, source=source_url)
328
+ send_discord_message(
329
+ shared_state,
330
+ title=title,
331
+ case="unprotected",
332
+ imdb_id=imdb_id,
333
+ source=source_url,
334
+ )
280
335
  return {"success": True, "title": title}
281
336
  info(f"Auto-decrypt failed for {title}, falling back to manual CAPTCHA...")
282
- classified['protected'].extend(classified['auto'])
337
+ classified["protected"].extend(classified["auto"])
283
338
 
284
339
  # PRIORITY 3: Protected (filecrypt, tolink, keeplinks, junkies)
285
- if classified['protected']:
340
+ if classified["protected"]:
286
341
  info(f"Found {len(classified['protected'])} protected links for {title}")
287
- send_discord_message(shared_state, title=title, case="captcha", imdb_id=imdb_id, source=source_url)
288
- store_protected_links(shared_state, classified['protected'], title, password, package_id,
289
- size_mb=size_mb, original_url=source_url)
342
+ send_discord_message(
343
+ shared_state,
344
+ title=title,
345
+ case="captcha",
346
+ imdb_id=imdb_id,
347
+ source=source_url,
348
+ )
349
+ store_protected_links(
350
+ shared_state,
351
+ classified["protected"],
352
+ title,
353
+ password,
354
+ package_id,
355
+ size_mb=size_mb,
356
+ original_url=source_url,
357
+ )
290
358
  return {"success": True, "title": title}
291
359
 
292
- return fail(title, package_id, shared_state,
293
- reason=f'No usable links found for "{title}" on {label} - "{source_url}"')
360
+ return fail(
361
+ title,
362
+ package_id,
363
+ shared_state,
364
+ reason=f'No usable links found for "{title}" on {label} - "{source_url}"',
365
+ )
294
366
 
295
367
 
296
368
  # =============================================================================
297
369
  # MAIN ENTRY POINT
298
370
  # =============================================================================
299
371
 
372
+
300
373
  def package_id_exists(shared_state, package_id):
301
374
  # DB checks
302
375
  if shared_state.get_db("protected").retrieve(package_id):
@@ -314,7 +387,17 @@ def package_id_exists(shared_state, package_id):
314
387
  return False
315
388
 
316
389
 
317
- def download(shared_state, request_from, title, url, mirror, size_mb, password, imdb_id=None, source_key=None):
390
+ def download(
391
+ shared_state,
392
+ request_from,
393
+ title,
394
+ url,
395
+ mirror,
396
+ size_mb,
397
+ password,
398
+ imdb_id=None,
399
+ source_key=None,
400
+ ):
318
401
  """
319
402
  Main download entry point.
320
403
 
@@ -384,7 +467,17 @@ def download(shared_state, request_from, title, url, mirror, size_mb, password,
384
467
  StatsHelper(shared_state).increment_failed_downloads()
385
468
  return {"success": False, "package_id": package_id, "title": title}
386
469
 
387
- result = process_links(shared_state, source_result, title, password, package_id, imdb_id, url, size_mb, label)
470
+ result = process_links(
471
+ shared_state,
472
+ source_result,
473
+ title,
474
+ password,
475
+ package_id,
476
+ imdb_id,
477
+ url,
478
+ size_mb,
479
+ label,
480
+ )
388
481
  return {"package_id": package_id, **result}
389
482
 
390
483
 
@@ -8,7 +8,7 @@ from io import BytesIO
8
8
  from Cryptodome.Cipher import AES
9
9
  from PIL import Image, ImageChops
10
10
 
11
- from quasarr.providers.log import info, debug
11
+ from quasarr.providers.log import debug, info
12
12
  from quasarr.providers.utils import is_flaresolverr_available
13
13
 
14
14
 
@@ -107,7 +107,9 @@ def decrypt_content(content_items: list[dict], mirror: str | None) -> list[str]:
107
107
  filtered = []
108
108
 
109
109
  if mirror and not filtered:
110
- info(f"No items found for mirror='{mirror}'. Falling back to all content_items.")
110
+ info(
111
+ f"No items found for mirror='{mirror}'. Falling back to all content_items."
112
+ )
111
113
  filtered = content_items.copy()
112
114
 
113
115
  if not mirror:
@@ -120,7 +122,9 @@ def decrypt_content(content_items: list[dict], mirror: str | None) -> list[str]:
120
122
 
121
123
  for idx, item in enumerate(items_to_process):
122
124
  if not isinstance(item, dict):
123
- info(f"[Item {idx}] Invalid item format; expected dict, got {type(item).__name__}")
125
+ info(
126
+ f"[Item {idx}] Invalid item format; expected dict, got {type(item).__name__}"
127
+ )
124
128
  continue
125
129
 
126
130
  hoster_name = item.get("hoster", "<unknown>")
@@ -129,7 +133,9 @@ def decrypt_content(content_items: list[dict], mirror: str | None) -> list[str]:
129
133
  crypted = cnl_info.get("crypted", "")
130
134
 
131
135
  if not jnk or not crypted:
132
- info(f"[Item {idx} | hoster={hoster_name}] Missing 'jk' or 'crypted' → skipping")
136
+ info(
137
+ f"[Item {idx} | hoster={hoster_name}] Missing 'jk' or 'crypted' → skipping"
138
+ )
133
139
  continue
134
140
 
135
141
  try:
@@ -160,11 +166,15 @@ def calculate_pixel_based_difference(img1, img2):
160
166
  return (non_zero * 100) / total_elements
161
167
 
162
168
 
163
- def solve_captcha(hostname, shared_state, fetch_via_flaresolverr, fetch_via_requests_session):
169
+ def solve_captcha(
170
+ hostname, shared_state, fetch_via_flaresolverr, fetch_via_requests_session
171
+ ):
164
172
  # Check if FlareSolverr is available
165
173
  if not is_flaresolverr_available(shared_state):
166
- raise RuntimeError("FlareSolverr is required for CAPTCHA solving but is not configured. "
167
- "Please configure FlareSolverr in the web UI.")
174
+ raise RuntimeError(
175
+ "FlareSolverr is required for CAPTCHA solving but is not configured. "
176
+ "Please configure FlareSolverr in the web UI."
177
+ )
168
178
 
169
179
  al = shared_state.values["config"]("Hostnames").get(hostname)
170
180
  captcha_base = f"https://www.{al}/files/captcha"
@@ -174,7 +184,7 @@ def solve_captcha(hostname, shared_state, fetch_via_flaresolverr, fetch_via_requ
174
184
  method="POST",
175
185
  target_url=captcha_base,
176
186
  post_data={"cID": 0, "rT": 1},
177
- timeout=30
187
+ timeout=30,
178
188
  )
179
189
 
180
190
  try:
@@ -189,9 +199,13 @@ def solve_captcha(hostname, shared_state, fetch_via_flaresolverr, fetch_via_requ
189
199
  images = []
190
200
  for img_id in image_ids:
191
201
  img_url = f"{captcha_base}?cid=0&hash={img_id}"
192
- r_img = fetch_via_requests_session(shared_state, method="GET", target_url=img_url, timeout=30)
202
+ r_img = fetch_via_requests_session(
203
+ shared_state, method="GET", target_url=img_url, timeout=30
204
+ )
193
205
  if r_img.status_code != 200:
194
- raise RuntimeError(f"Failed to download captcha image {img_id} (HTTP {r_img.status_code})")
206
+ raise RuntimeError(
207
+ f"Failed to download captcha image {img_id} (HTTP {r_img.status_code})"
208
+ )
195
209
  elif not r_img.content:
196
210
  raise RuntimeError(f"Captcha image {img_id} is empty or invalid.")
197
211
  images.append((img_id, r_img.content))
@@ -225,19 +239,22 @@ def solve_captcha(hostname, shared_state, fetch_via_flaresolverr, fetch_via_requ
225
239
  total_difference += calculate_pixel_based_difference(img_i, img_j)
226
240
  images_pixel_differences.append((img_id_i, total_difference))
227
241
 
228
- identified_captcha_image, cumulated_percentage = max(images_pixel_differences, key=lambda x: x[1])
229
- different_pixels_percentage = int(cumulated_percentage / len(images)) if images else int(cumulated_percentage)
230
- info(f'CAPTCHA image "{identified_captcha_image}" - difference to others: {different_pixels_percentage}%')
242
+ identified_captcha_image, cumulated_percentage = max(
243
+ images_pixel_differences, key=lambda x: x[1]
244
+ )
245
+ different_pixels_percentage = (
246
+ int(cumulated_percentage / len(images)) if images else int(cumulated_percentage)
247
+ )
248
+ info(
249
+ f'CAPTCHA image "{identified_captcha_image}" - difference to others: {different_pixels_percentage}%'
250
+ )
231
251
 
232
252
  result = fetch_via_flaresolverr(
233
253
  shared_state,
234
254
  method="POST",
235
255
  target_url=captcha_base,
236
256
  post_data={"cID": 0, "pC": identified_captcha_image, "rT": 2},
237
- timeout=60
257
+ timeout=60,
238
258
  )
239
259
 
240
- return {
241
- "response": result["text"],
242
- "captcha_id": identified_captcha_image
243
- }
260
+ return {"response": result["text"], "captcha_id": identified_captcha_image}