weeb-cli 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- weeb_cli/__init__.py +1 -0
- weeb_cli/__main__.py +4 -0
- weeb_cli/commands/downloads.py +126 -0
- weeb_cli/commands/search.py +428 -0
- weeb_cli/commands/settings.py +254 -0
- weeb_cli/commands/setup.py +26 -0
- weeb_cli/commands/watchlist.py +130 -0
- weeb_cli/config.py +50 -0
- weeb_cli/i18n.py +65 -0
- weeb_cli/locales/en.json +168 -0
- weeb_cli/locales/tr.json +168 -0
- weeb_cli/main.py +85 -0
- weeb_cli/providers/__init__.py +21 -0
- weeb_cli/providers/animecix.py +276 -0
- weeb_cli/providers/anizle.py +450 -0
- weeb_cli/providers/base.py +98 -0
- weeb_cli/providers/registry.py +45 -0
- weeb_cli/providers/turkanime.py +499 -0
- weeb_cli/services/__init__.py +0 -0
- weeb_cli/services/dependency_manager.py +321 -0
- weeb_cli/services/details.py +32 -0
- weeb_cli/services/downloader.py +308 -0
- weeb_cli/services/player.py +47 -0
- weeb_cli/services/progress.py +136 -0
- weeb_cli/services/scraper.py +91 -0
- weeb_cli/services/search.py +16 -0
- weeb_cli/services/updater.py +199 -0
- weeb_cli/services/watch.py +19 -0
- weeb_cli/ui/__init__.py +1 -0
- weeb_cli/ui/header.py +30 -0
- weeb_cli/ui/menu.py +59 -0
- weeb_cli/ui/prompt.py +120 -0
- weeb_cli-1.0.0.dist-info/METADATA +148 -0
- weeb_cli-1.0.0.dist-info/RECORD +38 -0
- weeb_cli-1.0.0.dist-info/WHEEL +5 -0
- weeb_cli-1.0.0.dist-info/entry_points.txt +2 -0
- weeb_cli-1.0.0.dist-info/licenses/LICENSE +390 -0
- weeb_cli-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,499 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import re
|
|
3
|
+
import json
|
|
4
|
+
from typing import List, Optional, Dict
|
|
5
|
+
from hashlib import md5
|
|
6
|
+
from base64 import b64decode
|
|
7
|
+
|
|
8
|
+
from weeb_cli.providers.base import (
|
|
9
|
+
BaseProvider,
|
|
10
|
+
AnimeResult,
|
|
11
|
+
AnimeDetails,
|
|
12
|
+
Episode,
|
|
13
|
+
StreamLink
|
|
14
|
+
)
|
|
15
|
+
from weeb_cli.providers.registry import register_provider
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
from curl_cffi import requests as curl_requests
|
|
19
|
+
HAS_CURL_CFFI = True
|
|
20
|
+
except ImportError:
|
|
21
|
+
HAS_CURL_CFFI = False
|
|
22
|
+
|
|
23
|
+
try:
|
|
24
|
+
from Crypto.Cipher import AES
|
|
25
|
+
HAS_CRYPTO = True
|
|
26
|
+
except ImportError:
|
|
27
|
+
HAS_CRYPTO = False
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
from appdirs import user_cache_dir
|
|
31
|
+
HAS_APPDIRS = True
|
|
32
|
+
except ImportError:
|
|
33
|
+
HAS_APPDIRS = False
|
|
34
|
+
|
|
35
|
+
BASE_URL = "https://turkanime.tv"
|
|
36
|
+
_session = None
|
|
37
|
+
_base_url = None
|
|
38
|
+
_key_cache = None
|
|
39
|
+
_csrf_cache = None
|
|
40
|
+
|
|
41
|
+
SUPPORTED_PLAYERS = [
|
|
42
|
+
"YADISK", "MAIL", "ALUCARD(BETA)", "PIXELDRAIN", "AMATERASU(BETA)",
|
|
43
|
+
"HDVID", "ODNOKLASSNIKI", "GDRIVE", "MP4UPLOAD", "DAILYMOTION",
|
|
44
|
+
"SIBNET", "VK", "VIDMOLY", "YOURUPLOAD", "SENDVID", "MYVI", "UQLOAD"
|
|
45
|
+
]
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _init_session():
|
|
49
|
+
global _session, _base_url
|
|
50
|
+
|
|
51
|
+
if _session is not None:
|
|
52
|
+
return _session
|
|
53
|
+
|
|
54
|
+
if HAS_CURL_CFFI:
|
|
55
|
+
_session = curl_requests.Session(impersonate="firefox", allow_redirects=True)
|
|
56
|
+
else:
|
|
57
|
+
import requests
|
|
58
|
+
_session = requests.Session()
|
|
59
|
+
|
|
60
|
+
_base_url = BASE_URL
|
|
61
|
+
|
|
62
|
+
try:
|
|
63
|
+
res = _session.get(BASE_URL + "/", timeout=30)
|
|
64
|
+
if res.status_code == 200:
|
|
65
|
+
final_url = res.url if hasattr(res, 'url') else BASE_URL
|
|
66
|
+
_base_url = final_url.rstrip('/')
|
|
67
|
+
except Exception:
|
|
68
|
+
_base_url = BASE_URL
|
|
69
|
+
|
|
70
|
+
return _session
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _fetch(path: str, headers: Dict[str, str] = None) -> str:
|
|
74
|
+
global _base_url
|
|
75
|
+
session = _init_session()
|
|
76
|
+
|
|
77
|
+
if path is None:
|
|
78
|
+
return ""
|
|
79
|
+
|
|
80
|
+
if _base_url is None:
|
|
81
|
+
_base_url = BASE_URL
|
|
82
|
+
|
|
83
|
+
path = path if path.startswith("/") else "/" + path
|
|
84
|
+
url = _base_url + path
|
|
85
|
+
|
|
86
|
+
default_headers = {
|
|
87
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:121.0) Gecko/20100101 Firefox/121.0",
|
|
88
|
+
"X-Requested-With": "XMLHttpRequest",
|
|
89
|
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
|
90
|
+
"Accept-Language": "tr-TR,tr;q=0.9,en-US;q=0.8,en;q=0.7",
|
|
91
|
+
}
|
|
92
|
+
if headers:
|
|
93
|
+
default_headers.update(headers)
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
response = session.get(url, headers=default_headers, timeout=30)
|
|
97
|
+
return response.text
|
|
98
|
+
except Exception:
|
|
99
|
+
return ""
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _obtain_key() -> bytes:
|
|
103
|
+
global _key_cache
|
|
104
|
+
|
|
105
|
+
if _key_cache:
|
|
106
|
+
return _key_cache
|
|
107
|
+
|
|
108
|
+
if HAS_APPDIRS:
|
|
109
|
+
try:
|
|
110
|
+
cache_file = os.path.join(user_cache_dir(), "turkanimu_key.cache")
|
|
111
|
+
if os.path.isfile(cache_file):
|
|
112
|
+
with open(cache_file, "r", encoding="utf-8") as f:
|
|
113
|
+
cached = f.read().strip().encode()
|
|
114
|
+
if cached:
|
|
115
|
+
_key_cache = cached
|
|
116
|
+
return _key_cache
|
|
117
|
+
except Exception:
|
|
118
|
+
pass
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
embed_html = _fetch("/embed/#/url/")
|
|
122
|
+
js_files = re.findall(r"/embed/js/embeds\..*?\.js", embed_html)
|
|
123
|
+
|
|
124
|
+
if len(js_files) < 2:
|
|
125
|
+
return b""
|
|
126
|
+
|
|
127
|
+
js1 = _fetch(js_files[1])
|
|
128
|
+
js1_imports = re.findall("[a-z0-9]{16}", js1)
|
|
129
|
+
|
|
130
|
+
if not js1_imports:
|
|
131
|
+
return b""
|
|
132
|
+
|
|
133
|
+
j2 = _fetch(f'/embed/js/embeds.{js1_imports[0]}.js')
|
|
134
|
+
if "'decrypt'" not in j2 and len(js1_imports) > 1:
|
|
135
|
+
j2 = _fetch(f'/embed/js/embeds.{js1_imports[1]}.js')
|
|
136
|
+
|
|
137
|
+
match = re.search(
|
|
138
|
+
r'function a\d_0x[\w]{1,4}\(\)\{var _0x\w{3,8}=\[(.*?)\];', j2
|
|
139
|
+
)
|
|
140
|
+
if not match:
|
|
141
|
+
return b""
|
|
142
|
+
|
|
143
|
+
obfuscate_list = match.group(1)
|
|
144
|
+
_key_cache = max(
|
|
145
|
+
obfuscate_list.split("','"),
|
|
146
|
+
key=lambda i: len(re.sub(r"\\x\d\d", "?", i))
|
|
147
|
+
).encode()
|
|
148
|
+
|
|
149
|
+
if HAS_APPDIRS and _key_cache:
|
|
150
|
+
try:
|
|
151
|
+
cache_dir = user_cache_dir()
|
|
152
|
+
os.makedirs(cache_dir, exist_ok=True)
|
|
153
|
+
cache_file = os.path.join(cache_dir, "turkanimu_key.cache")
|
|
154
|
+
with open(cache_file, "w", encoding="utf-8") as f:
|
|
155
|
+
f.write(_key_cache.decode("utf-8"))
|
|
156
|
+
except Exception:
|
|
157
|
+
pass
|
|
158
|
+
|
|
159
|
+
return _key_cache
|
|
160
|
+
|
|
161
|
+
except Exception:
|
|
162
|
+
return b""
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _decrypt_cipher(key: bytes, data: bytes) -> str:
|
|
166
|
+
if not HAS_CRYPTO:
|
|
167
|
+
return ""
|
|
168
|
+
|
|
169
|
+
def salted_key(data: bytes, salt: bytes, output: int = 48):
|
|
170
|
+
data += salt
|
|
171
|
+
k = md5(data).digest()
|
|
172
|
+
final_key = k
|
|
173
|
+
while len(final_key) < output:
|
|
174
|
+
k = md5(k + data).digest()
|
|
175
|
+
final_key += k
|
|
176
|
+
return final_key[:output]
|
|
177
|
+
|
|
178
|
+
def unpad(data: bytes) -> bytes:
|
|
179
|
+
return data[:-(data[-1] if isinstance(data[-1], int) else ord(data[-1]))]
|
|
180
|
+
|
|
181
|
+
try:
|
|
182
|
+
b64 = b64decode(data)
|
|
183
|
+
cipher = json.loads(b64)
|
|
184
|
+
cipher_text = b64decode(cipher["ct"])
|
|
185
|
+
iv = bytes.fromhex(cipher["iv"])
|
|
186
|
+
salt = bytes.fromhex(cipher["s"])
|
|
187
|
+
|
|
188
|
+
crypt = AES.new(salted_key(key, salt, output=32), iv=iv, mode=AES.MODE_CBC)
|
|
189
|
+
return unpad(crypt.decrypt(cipher_text)).decode("utf-8")
|
|
190
|
+
except Exception:
|
|
191
|
+
return ""
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _get_real_url(url_cipher: str) -> str:
|
|
195
|
+
if HAS_APPDIRS:
|
|
196
|
+
try:
|
|
197
|
+
cache_file = os.path.join(user_cache_dir(), "turkanimu_key.cache")
|
|
198
|
+
if os.path.isfile(cache_file):
|
|
199
|
+
with open(cache_file, "r", encoding="utf-8") as f:
|
|
200
|
+
cached_key = f.read().strip().encode()
|
|
201
|
+
plaintext = _decrypt_cipher(cached_key, url_cipher.encode())
|
|
202
|
+
if plaintext:
|
|
203
|
+
return "https:" + json.loads(plaintext)
|
|
204
|
+
except Exception:
|
|
205
|
+
pass
|
|
206
|
+
|
|
207
|
+
key = _obtain_key()
|
|
208
|
+
if not key:
|
|
209
|
+
return ""
|
|
210
|
+
|
|
211
|
+
plaintext = _decrypt_cipher(key, url_cipher.encode())
|
|
212
|
+
if not plaintext:
|
|
213
|
+
return ""
|
|
214
|
+
|
|
215
|
+
try:
|
|
216
|
+
return "https:" + json.loads(plaintext)
|
|
217
|
+
except Exception:
|
|
218
|
+
return ""
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _decrypt_jsjiamiv7(ciphertext: str, key: str) -> str:
|
|
222
|
+
_CUSTOM = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/"
|
|
223
|
+
_STD = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
|
|
224
|
+
_TRANSLATE = str.maketrans(_CUSTOM, _STD)
|
|
225
|
+
|
|
226
|
+
t = ciphertext.translate(_TRANSLATE)
|
|
227
|
+
t += "=" * (-len(t) % 4)
|
|
228
|
+
|
|
229
|
+
try:
|
|
230
|
+
data = b64decode(t).decode("utf-8")
|
|
231
|
+
except Exception:
|
|
232
|
+
return ""
|
|
233
|
+
|
|
234
|
+
S = list(range(256))
|
|
235
|
+
j = 0
|
|
236
|
+
klen = len(key)
|
|
237
|
+
|
|
238
|
+
for i in range(256):
|
|
239
|
+
j = (j + S[i] + ord(key[i % klen])) & 0xff
|
|
240
|
+
S[i], S[j] = S[j], S[i]
|
|
241
|
+
|
|
242
|
+
i = j = 0
|
|
243
|
+
out = []
|
|
244
|
+
for ch in data:
|
|
245
|
+
i = (i + 1) & 0xff
|
|
246
|
+
j = (j + S[i]) & 0xff
|
|
247
|
+
S[i], S[j] = S[j], S[i]
|
|
248
|
+
out.append(chr(ord(ch) ^ S[(S[i] + S[j]) & 0xff]))
|
|
249
|
+
|
|
250
|
+
return "".join(out)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def _obtain_csrf() -> Optional[str]:
|
|
254
|
+
global _csrf_cache
|
|
255
|
+
|
|
256
|
+
if _csrf_cache:
|
|
257
|
+
return _csrf_cache
|
|
258
|
+
|
|
259
|
+
try:
|
|
260
|
+
res = _fetch("/js/player.js")
|
|
261
|
+
|
|
262
|
+
key_match = re.findall(r"csrf-token':[^\n\)]+'([^']+)'\)", res, re.IGNORECASE)
|
|
263
|
+
candidates = re.findall(r"'([a-zA-Z\d\+\/]{96,156})',", res)
|
|
264
|
+
|
|
265
|
+
if not key_match or not candidates:
|
|
266
|
+
return None
|
|
267
|
+
|
|
268
|
+
key = key_match[0]
|
|
269
|
+
|
|
270
|
+
for ct in candidates:
|
|
271
|
+
decrypted = _decrypt_jsjiamiv7(ct, key)
|
|
272
|
+
if re.search(r"^[a-zA-Z/\+]+$", decrypted):
|
|
273
|
+
_csrf_cache = decrypted
|
|
274
|
+
return _csrf_cache
|
|
275
|
+
|
|
276
|
+
return None
|
|
277
|
+
except Exception:
|
|
278
|
+
return None
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def _unmask_real_url(url_mask: str) -> str:
|
|
282
|
+
if "turkanime" not in url_mask:
|
|
283
|
+
return url_mask
|
|
284
|
+
|
|
285
|
+
csrf = _obtain_csrf()
|
|
286
|
+
if not csrf:
|
|
287
|
+
return url_mask
|
|
288
|
+
|
|
289
|
+
try:
|
|
290
|
+
mask = url_mask.split("/player/")[1]
|
|
291
|
+
headers = {"Csrf-Token": csrf, "cf_clearance": "dull"}
|
|
292
|
+
res = _fetch(f"/sources/{mask}/false", headers)
|
|
293
|
+
|
|
294
|
+
data = json.loads(res)
|
|
295
|
+
url = data["response"]["sources"][-1]["file"]
|
|
296
|
+
|
|
297
|
+
if url.startswith("//"):
|
|
298
|
+
url = "https:" + url
|
|
299
|
+
|
|
300
|
+
return url
|
|
301
|
+
except Exception:
|
|
302
|
+
return url_mask
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
@register_provider("turkanime", lang="tr", region="TR")
|
|
306
|
+
class TurkAnimeProvider(BaseProvider):
|
|
307
|
+
|
|
308
|
+
def __init__(self):
|
|
309
|
+
super().__init__()
|
|
310
|
+
|
|
311
|
+
def search(self, query: str) -> List[AnimeResult]:
|
|
312
|
+
html = _fetch("/ajax/tamliste")
|
|
313
|
+
if not html:
|
|
314
|
+
return []
|
|
315
|
+
|
|
316
|
+
matches = re.findall(r'/anime/(.*?)".*?animeAdi">(.*?)<', html)
|
|
317
|
+
|
|
318
|
+
results = []
|
|
319
|
+
query_lower = query.lower()
|
|
320
|
+
|
|
321
|
+
for slug, title in matches:
|
|
322
|
+
title_clean = re.sub(r'<[^>]+>', '', title)
|
|
323
|
+
if query_lower in title_clean.lower() or query_lower in slug.lower():
|
|
324
|
+
results.append(AnimeResult(
|
|
325
|
+
id=slug,
|
|
326
|
+
title=title_clean
|
|
327
|
+
))
|
|
328
|
+
|
|
329
|
+
return results[:20]
|
|
330
|
+
|
|
331
|
+
def get_details(self, anime_id: str) -> Optional[AnimeDetails]:
|
|
332
|
+
html = _fetch(f'/anime/{anime_id}')
|
|
333
|
+
if not html:
|
|
334
|
+
return None
|
|
335
|
+
|
|
336
|
+
title_match = re.findall(r'<title>(.*?)</title>', html)
|
|
337
|
+
title = title_match[0] if title_match else anime_id
|
|
338
|
+
|
|
339
|
+
img_match = re.findall(r'twitter.image" content="(.*?)"', html)
|
|
340
|
+
cover = img_match[0] if img_match else None
|
|
341
|
+
|
|
342
|
+
anime_id_match = re.findall(r'serilerb/(.*?)\.jpg', html)
|
|
343
|
+
internal_id = anime_id_match[0] if anime_id_match else ""
|
|
344
|
+
|
|
345
|
+
description = None
|
|
346
|
+
desc_match = re.search(r'twitter:description"\s+content="([^"]+)"', html)
|
|
347
|
+
if not desc_match:
|
|
348
|
+
desc_match = re.search(r'og:description"\s+content="([^"]+)"', html)
|
|
349
|
+
if desc_match:
|
|
350
|
+
import html as html_module
|
|
351
|
+
description = html_module.unescape(desc_match.group(1)).strip()
|
|
352
|
+
|
|
353
|
+
info = {}
|
|
354
|
+
info_table = re.findall(r'<div id="animedetay">(<table.*?</table>)', html, re.DOTALL)
|
|
355
|
+
if info_table:
|
|
356
|
+
raw_m = re.findall(r"<tr>.*?<b>(.*?)</b>.*?width.*?>(.*?)</td>.*?</tr>", info_table[0], re.DOTALL)
|
|
357
|
+
for key, val in raw_m:
|
|
358
|
+
val = re.sub(r"<[^>]*>", "", val).strip()
|
|
359
|
+
info[key] = val
|
|
360
|
+
|
|
361
|
+
genres = []
|
|
362
|
+
if "Anime Türü" in info:
|
|
363
|
+
genres = [g.strip() for g in info["Anime Türü"].split(" ") if g.strip()]
|
|
364
|
+
|
|
365
|
+
episodes = self._get_episodes_internal(internal_id) if internal_id else []
|
|
366
|
+
|
|
367
|
+
return AnimeDetails(
|
|
368
|
+
id=anime_id,
|
|
369
|
+
title=title,
|
|
370
|
+
description=description,
|
|
371
|
+
cover=cover,
|
|
372
|
+
genres=genres,
|
|
373
|
+
status=info.get("Kategori"),
|
|
374
|
+
episodes=episodes,
|
|
375
|
+
total_episodes=len(episodes)
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
def get_episodes(self, anime_id: str) -> List[Episode]:
|
|
379
|
+
html = _fetch(f'/anime/{anime_id}')
|
|
380
|
+
if not html:
|
|
381
|
+
return []
|
|
382
|
+
|
|
383
|
+
anime_id_match = re.findall(r'serilerb/(.*?)\.jpg', html)
|
|
384
|
+
internal_id = anime_id_match[0] if anime_id_match else ""
|
|
385
|
+
|
|
386
|
+
if not internal_id:
|
|
387
|
+
return []
|
|
388
|
+
|
|
389
|
+
return self._get_episodes_internal(internal_id)
|
|
390
|
+
|
|
391
|
+
def _get_episodes_internal(self, internal_id: str) -> List[Episode]:
|
|
392
|
+
html = _fetch(f'/ajax/bolumler&animeId={internal_id}')
|
|
393
|
+
if not html:
|
|
394
|
+
return []
|
|
395
|
+
|
|
396
|
+
matches = re.findall(r'/video/(.*?)\\?".*?title=.*?"(.*?)\\?"', html)
|
|
397
|
+
|
|
398
|
+
episodes = []
|
|
399
|
+
for i, (slug, title) in enumerate(matches, 1):
|
|
400
|
+
title_clean = re.sub(r'\\["\']', '', title)
|
|
401
|
+
ep_num = self._parse_episode_number(title_clean, i)
|
|
402
|
+
episodes.append(Episode(
|
|
403
|
+
id=slug,
|
|
404
|
+
number=ep_num,
|
|
405
|
+
title=title_clean
|
|
406
|
+
))
|
|
407
|
+
|
|
408
|
+
return episodes
|
|
409
|
+
|
|
410
|
+
def get_streams(self, anime_id: str, episode_id: str) -> List[StreamLink]:
|
|
411
|
+
html = _fetch(f'/video/{episode_id}')
|
|
412
|
+
if not html:
|
|
413
|
+
return []
|
|
414
|
+
|
|
415
|
+
streams = []
|
|
416
|
+
|
|
417
|
+
if not re.search(r".*birden fazla grup", html):
|
|
418
|
+
fansub_match = re.findall(r"</span> ([^\\<>]*)</button>.*?iframe", html)
|
|
419
|
+
fansub = fansub_match[0] if fansub_match else "Unknown"
|
|
420
|
+
|
|
421
|
+
video_matches = re.findall(
|
|
422
|
+
r'/embed/#/url/(.*?)\?status=0".*?</span> ([^ ]*?) ?</button>',
|
|
423
|
+
html
|
|
424
|
+
)
|
|
425
|
+
video_matches += re.findall(
|
|
426
|
+
r'(ajax/videosec&b=[A-Za-z0-9]+&v=.*?)\'.*?</span> ?(.*?)</button',
|
|
427
|
+
html
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
for cipher_or_path, player in video_matches:
|
|
431
|
+
stream = self._process_video(cipher_or_path, player, fansub)
|
|
432
|
+
if stream:
|
|
433
|
+
streams.append(stream)
|
|
434
|
+
else:
|
|
435
|
+
fansub_matches = re.findall(r"(ajax/videosec&.*?)'.*?</span> ?(.*?)</a>", html)
|
|
436
|
+
|
|
437
|
+
for path, fansub in fansub_matches:
|
|
438
|
+
src = _fetch(path)
|
|
439
|
+
|
|
440
|
+
video_matches = re.findall(
|
|
441
|
+
r'/embed/#/url/(.*?)\?status=0".*?</span> ([^ ]*?) ?</button>',
|
|
442
|
+
src
|
|
443
|
+
)
|
|
444
|
+
video_matches += re.findall(
|
|
445
|
+
r'(ajax/videosec&b=[A-Za-z0-9]+&v=.*?)\'.*?</span> ?(.*?)</button',
|
|
446
|
+
src
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
for cipher_or_path, player in video_matches:
|
|
450
|
+
stream = self._process_video(cipher_or_path, player, fansub)
|
|
451
|
+
if stream:
|
|
452
|
+
streams.append(stream)
|
|
453
|
+
|
|
454
|
+
return streams
|
|
455
|
+
|
|
456
|
+
def _process_video(self, cipher_or_path: str, player: str, fansub: str) -> Optional[StreamLink]:
|
|
457
|
+
if player.upper() not in SUPPORTED_PLAYERS:
|
|
458
|
+
return None
|
|
459
|
+
|
|
460
|
+
if "/" in cipher_or_path:
|
|
461
|
+
src = _fetch(cipher_or_path)
|
|
462
|
+
cipher_match = re.findall(r'/embed/#/url/(.*?)\?status', src)
|
|
463
|
+
if not cipher_match:
|
|
464
|
+
return None
|
|
465
|
+
cipher = cipher_match[0]
|
|
466
|
+
else:
|
|
467
|
+
cipher = cipher_or_path
|
|
468
|
+
|
|
469
|
+
url = _get_real_url(cipher)
|
|
470
|
+
if not url:
|
|
471
|
+
return None
|
|
472
|
+
|
|
473
|
+
url = url.replace("uqload.io", "uqload.com")
|
|
474
|
+
|
|
475
|
+
if "turkanime" in url:
|
|
476
|
+
url = _unmask_real_url(url)
|
|
477
|
+
if "turkanime" in url:
|
|
478
|
+
return None
|
|
479
|
+
|
|
480
|
+
return StreamLink(
|
|
481
|
+
url=url,
|
|
482
|
+
quality="auto",
|
|
483
|
+
server=f"{fansub} - {player}"
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
def _parse_episode_number(self, title: str, fallback: int) -> int:
|
|
487
|
+
patterns = [
|
|
488
|
+
r'(\d+)\.\s*[Bb]ölüm',
|
|
489
|
+
r'[Bb]ölüm\s*(\d+)',
|
|
490
|
+
r'[Ee]pisode\s*(\d+)',
|
|
491
|
+
r'^(\d+)$'
|
|
492
|
+
]
|
|
493
|
+
|
|
494
|
+
for pattern in patterns:
|
|
495
|
+
match = re.search(pattern, title)
|
|
496
|
+
if match:
|
|
497
|
+
return int(match.group(1))
|
|
498
|
+
|
|
499
|
+
return fallback
|
|
File without changes
|