weeb-cli 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- weeb_cli/__init__.py +1 -0
- weeb_cli/__main__.py +4 -0
- weeb_cli/commands/downloads.py +126 -0
- weeb_cli/commands/search.py +428 -0
- weeb_cli/commands/settings.py +254 -0
- weeb_cli/commands/setup.py +26 -0
- weeb_cli/commands/watchlist.py +130 -0
- weeb_cli/config.py +50 -0
- weeb_cli/i18n.py +65 -0
- weeb_cli/locales/en.json +168 -0
- weeb_cli/locales/tr.json +168 -0
- weeb_cli/main.py +85 -0
- weeb_cli/providers/__init__.py +21 -0
- weeb_cli/providers/animecix.py +276 -0
- weeb_cli/providers/anizle.py +450 -0
- weeb_cli/providers/base.py +98 -0
- weeb_cli/providers/registry.py +45 -0
- weeb_cli/providers/turkanime.py +499 -0
- weeb_cli/services/__init__.py +0 -0
- weeb_cli/services/dependency_manager.py +321 -0
- weeb_cli/services/details.py +32 -0
- weeb_cli/services/downloader.py +308 -0
- weeb_cli/services/player.py +47 -0
- weeb_cli/services/progress.py +136 -0
- weeb_cli/services/scraper.py +91 -0
- weeb_cli/services/search.py +16 -0
- weeb_cli/services/updater.py +199 -0
- weeb_cli/services/watch.py +19 -0
- weeb_cli/ui/__init__.py +1 -0
- weeb_cli/ui/header.py +30 -0
- weeb_cli/ui/menu.py +59 -0
- weeb_cli/ui/prompt.py +120 -0
- weeb_cli-1.0.0.dist-info/METADATA +148 -0
- weeb_cli-1.0.0.dist-info/RECORD +38 -0
- weeb_cli-1.0.0.dist-info/WHEEL +5 -0
- weeb_cli-1.0.0.dist-info/entry_points.txt +2 -0
- weeb_cli-1.0.0.dist-info/licenses/LICENSE +390 -0
- weeb_cli-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,450 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import json
|
|
3
|
+
from typing import List, Optional, Dict, Any
|
|
4
|
+
from difflib import SequenceMatcher
|
|
5
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
6
|
+
|
|
7
|
+
from weeb_cli.providers.base import (
|
|
8
|
+
BaseProvider,
|
|
9
|
+
AnimeResult,
|
|
10
|
+
AnimeDetails,
|
|
11
|
+
Episode,
|
|
12
|
+
StreamLink
|
|
13
|
+
)
|
|
14
|
+
from weeb_cli.providers.registry import register_provider
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
from curl_cffi import requests as curl_requests
|
|
18
|
+
HAS_CURL_CFFI = True
|
|
19
|
+
except ImportError:
|
|
20
|
+
import requests as std_requests
|
|
21
|
+
HAS_CURL_CFFI = False
|
|
22
|
+
|
|
23
|
+
BASE_URL = "https://anizm.pro"
|
|
24
|
+
API_BASE_URL = "https://anizle.org"
|
|
25
|
+
ANIME_LIST_URL = f"{BASE_URL}/getAnimeListForSearch"
|
|
26
|
+
PLAYER_BASE_URL = "https://anizmplayer.com"
|
|
27
|
+
|
|
28
|
+
_anime_database: List[Dict[str, Any]] = []
|
|
29
|
+
_database_loaded: bool = False
|
|
30
|
+
_session = None
|
|
31
|
+
|
|
32
|
+
DEFAULT_HEADERS = {
|
|
33
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
34
|
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
|
|
35
|
+
"Accept-Language": "tr-TR,tr;q=0.9,en-US;q=0.8,en;q=0.7",
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _get_session():
|
|
40
|
+
global _session
|
|
41
|
+
if _session is None:
|
|
42
|
+
if HAS_CURL_CFFI:
|
|
43
|
+
_session = curl_requests.Session(impersonate="chrome110")
|
|
44
|
+
else:
|
|
45
|
+
_session = std_requests.Session()
|
|
46
|
+
return _session
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _http_get(url: str, headers: Dict = None, timeout: int = 60):
|
|
50
|
+
session = _get_session()
|
|
51
|
+
h = {**DEFAULT_HEADERS}
|
|
52
|
+
if headers:
|
|
53
|
+
h.update(headers)
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
return session.get(url, headers=h, timeout=timeout)
|
|
57
|
+
except Exception:
|
|
58
|
+
return None
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _http_post(url: str, headers: Dict = None, data: Dict = None, timeout: int = 60):
|
|
62
|
+
session = _get_session()
|
|
63
|
+
h = {**DEFAULT_HEADERS, "X-Requested-With": "XMLHttpRequest", "Accept": "application/json"}
|
|
64
|
+
if headers:
|
|
65
|
+
h.update(headers)
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
return session.post(url, headers=h, data=data, timeout=timeout)
|
|
69
|
+
except Exception:
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _strip_html(text: str) -> str:
|
|
74
|
+
if not text:
|
|
75
|
+
return ""
|
|
76
|
+
text = re.sub(r'<[^>]+>', '', text)
|
|
77
|
+
text = re.sub(r'&[a-zA-Z]+;', ' ', text)
|
|
78
|
+
text = re.sub(r'&#\d+;', ' ', text)
|
|
79
|
+
text = re.sub(r'\s+', ' ', text)
|
|
80
|
+
return text.strip()
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _load_database() -> List[Dict[str, Any]]:
|
|
84
|
+
global _anime_database, _database_loaded
|
|
85
|
+
|
|
86
|
+
if _database_loaded:
|
|
87
|
+
return _anime_database
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
response = _http_get(ANIME_LIST_URL, timeout=120)
|
|
91
|
+
if response and response.status_code == 200:
|
|
92
|
+
data = response.json()
|
|
93
|
+
if isinstance(data, list):
|
|
94
|
+
_anime_database = data
|
|
95
|
+
_database_loaded = True
|
|
96
|
+
except Exception:
|
|
97
|
+
pass
|
|
98
|
+
|
|
99
|
+
return _anime_database
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _unpack_js(p: str, a: int, c: int, k: List[str]) -> str:
|
|
103
|
+
def e(c: int, a: int) -> str:
|
|
104
|
+
first = '' if c < a else e(c // a, a)
|
|
105
|
+
c = c % a
|
|
106
|
+
if c > 35:
|
|
107
|
+
second = chr(c + 29)
|
|
108
|
+
elif c > 9:
|
|
109
|
+
second = chr(c + 87)
|
|
110
|
+
else:
|
|
111
|
+
second = str(c)
|
|
112
|
+
return first + second
|
|
113
|
+
|
|
114
|
+
d = {}
|
|
115
|
+
temp_c = c
|
|
116
|
+
while temp_c:
|
|
117
|
+
temp_c -= 1
|
|
118
|
+
key = e(temp_c, a)
|
|
119
|
+
d[key] = k[temp_c] if temp_c < len(k) and k[temp_c] else key
|
|
120
|
+
|
|
121
|
+
def replace_func(match):
|
|
122
|
+
return d.get(match.group(0), match.group(0))
|
|
123
|
+
|
|
124
|
+
return re.sub(r'\b\w+\b', replace_func, p)
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def _extract_fireplayer_id(player_html: str) -> Optional[str]:
|
|
128
|
+
eval_match = re.search(
|
|
129
|
+
r"eval\(function\(p,a,c,k,e,d\)\{.*?\}return p\}\('(.*?)',(\d+),(\d+),'([^']+)'\.split\('\|'\),0,\{\}\)\)",
|
|
130
|
+
player_html, re.S
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
if eval_match:
|
|
134
|
+
p = eval_match.group(1)
|
|
135
|
+
a = int(eval_match.group(2))
|
|
136
|
+
c = int(eval_match.group(3))
|
|
137
|
+
k = eval_match.group(4).split('|')
|
|
138
|
+
|
|
139
|
+
try:
|
|
140
|
+
decoded = _unpack_js(p, a, c, k)
|
|
141
|
+
id_match = re.search(r'FirePlayer\s*\(\s*["\']([a-f0-9]{32})["\']', decoded)
|
|
142
|
+
if id_match:
|
|
143
|
+
return id_match.group(1)
|
|
144
|
+
except Exception:
|
|
145
|
+
pass
|
|
146
|
+
|
|
147
|
+
fp_direct = re.search(r'FirePlayer\s*\(["\']([a-f0-9]{32})["\']', player_html)
|
|
148
|
+
if fp_direct:
|
|
149
|
+
return fp_direct.group(1)
|
|
150
|
+
|
|
151
|
+
return None
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
@register_provider("anizle", lang="tr", region="TR")
|
|
155
|
+
class AnizleProvider(BaseProvider):
|
|
156
|
+
|
|
157
|
+
def __init__(self):
|
|
158
|
+
super().__init__()
|
|
159
|
+
|
|
160
|
+
def search(self, query: str) -> List[AnimeResult]:
|
|
161
|
+
database = _load_database()
|
|
162
|
+
if not database:
|
|
163
|
+
return []
|
|
164
|
+
|
|
165
|
+
results = []
|
|
166
|
+
for anime in database:
|
|
167
|
+
scores = [
|
|
168
|
+
self._similarity(query, anime.get("info_title", "")),
|
|
169
|
+
self._similarity(query, anime.get("info_titleoriginal", "")),
|
|
170
|
+
self._similarity(query, anime.get("info_titleenglish", "")),
|
|
171
|
+
]
|
|
172
|
+
max_score = max(scores)
|
|
173
|
+
|
|
174
|
+
if max_score > 0.3:
|
|
175
|
+
year_str = anime.get("info_year", "")
|
|
176
|
+
year = int(year_str) if year_str and str(year_str).isdigit() else None
|
|
177
|
+
|
|
178
|
+
results.append((max_score, AnimeResult(
|
|
179
|
+
id=anime.get("info_slug", ""),
|
|
180
|
+
title=anime.get("info_title", ""),
|
|
181
|
+
cover=self._get_poster_url(anime.get("info_poster", "")),
|
|
182
|
+
year=year
|
|
183
|
+
)))
|
|
184
|
+
|
|
185
|
+
results.sort(key=lambda x: x[0], reverse=True)
|
|
186
|
+
return [r[1] for r in results[:20]]
|
|
187
|
+
|
|
188
|
+
def get_details(self, anime_id: str) -> Optional[AnimeDetails]:
|
|
189
|
+
database = _load_database()
|
|
190
|
+
anime_data = None
|
|
191
|
+
|
|
192
|
+
for anime in database:
|
|
193
|
+
if anime.get("info_slug") == anime_id:
|
|
194
|
+
anime_data = anime
|
|
195
|
+
break
|
|
196
|
+
|
|
197
|
+
episodes = self.get_episodes(anime_id)
|
|
198
|
+
|
|
199
|
+
if not anime_data:
|
|
200
|
+
return AnimeDetails(
|
|
201
|
+
id=anime_id,
|
|
202
|
+
title=anime_id.replace("-", " ").title(),
|
|
203
|
+
episodes=episodes,
|
|
204
|
+
total_episodes=len(episodes)
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
categories = []
|
|
208
|
+
for cat in anime_data.get("categories", []):
|
|
209
|
+
if isinstance(cat, dict) and "tag_title" in cat:
|
|
210
|
+
categories.append(cat["tag_title"])
|
|
211
|
+
|
|
212
|
+
year_str = anime_data.get("info_year", "")
|
|
213
|
+
year = int(year_str) if year_str and str(year_str).isdigit() else None
|
|
214
|
+
|
|
215
|
+
description = _strip_html(anime_data.get("info_summary", ""))
|
|
216
|
+
|
|
217
|
+
return AnimeDetails(
|
|
218
|
+
id=anime_id,
|
|
219
|
+
title=anime_data.get("info_title", ""),
|
|
220
|
+
description=description,
|
|
221
|
+
cover=self._get_poster_url(anime_data.get("info_poster", "")),
|
|
222
|
+
genres=categories,
|
|
223
|
+
year=year,
|
|
224
|
+
episodes=episodes,
|
|
225
|
+
total_episodes=len(episodes)
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
def get_episodes(self, anime_id: str) -> List[Episode]:
|
|
229
|
+
url = f"{BASE_URL}/{anime_id}"
|
|
230
|
+
response = _http_get(url)
|
|
231
|
+
|
|
232
|
+
if not response or response.status_code != 200:
|
|
233
|
+
return []
|
|
234
|
+
|
|
235
|
+
html = response.text
|
|
236
|
+
episodes = []
|
|
237
|
+
seen = set()
|
|
238
|
+
|
|
239
|
+
pattern1 = r'href="/?([^"]+?-bolum[^"]*)"[^>]*data-order="(\d+)"[^>]*>([^<]+)'
|
|
240
|
+
matches1 = re.findall(pattern1, html, re.IGNORECASE)
|
|
241
|
+
|
|
242
|
+
for ep_slug, order, title in matches1:
|
|
243
|
+
ep_slug = ep_slug.strip('/').replace('https://anizm.pro/', '').replace('https://anizle.org/', '')
|
|
244
|
+
try:
|
|
245
|
+
order_num = int(order)
|
|
246
|
+
if order_num not in seen:
|
|
247
|
+
seen.add(order_num)
|
|
248
|
+
episodes.append(Episode(
|
|
249
|
+
id=ep_slug,
|
|
250
|
+
number=order_num,
|
|
251
|
+
title=title.strip()
|
|
252
|
+
))
|
|
253
|
+
except ValueError:
|
|
254
|
+
pass
|
|
255
|
+
|
|
256
|
+
pattern2 = r'href="/?([^"]+?-(\d+)-bolum[^"]*)"[^>]*>([^<]*)'
|
|
257
|
+
matches2 = re.findall(pattern2, html, re.IGNORECASE)
|
|
258
|
+
|
|
259
|
+
for ep_slug, ep_num, title in matches2:
|
|
260
|
+
ep_slug = ep_slug.strip('/').replace('https://anizm.pro/', '').replace('https://anizle.org/', '')
|
|
261
|
+
try:
|
|
262
|
+
order_num = int(ep_num)
|
|
263
|
+
if order_num not in seen:
|
|
264
|
+
seen.add(order_num)
|
|
265
|
+
final_title = title.strip() if title.strip() else f"{ep_num}. Bölüm"
|
|
266
|
+
episodes.append(Episode(
|
|
267
|
+
id=ep_slug,
|
|
268
|
+
number=order_num,
|
|
269
|
+
title=final_title
|
|
270
|
+
))
|
|
271
|
+
except ValueError:
|
|
272
|
+
pass
|
|
273
|
+
|
|
274
|
+
episodes.sort(key=lambda x: x.number)
|
|
275
|
+
return episodes
|
|
276
|
+
|
|
277
|
+
def get_streams(self, anime_id: str, episode_id: str) -> List[StreamLink]:
|
|
278
|
+
translators = self._get_translators(episode_id)
|
|
279
|
+
if not translators:
|
|
280
|
+
return []
|
|
281
|
+
|
|
282
|
+
all_videos = []
|
|
283
|
+
for tr in translators:
|
|
284
|
+
videos = self._get_translator_videos(tr["url"])
|
|
285
|
+
for v in videos:
|
|
286
|
+
all_videos.append({
|
|
287
|
+
"url": v["url"],
|
|
288
|
+
"name": v["name"],
|
|
289
|
+
"fansub": tr["name"]
|
|
290
|
+
})
|
|
291
|
+
|
|
292
|
+
if not all_videos:
|
|
293
|
+
return []
|
|
294
|
+
|
|
295
|
+
streams = []
|
|
296
|
+
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
297
|
+
futures = {executor.submit(self._process_video, v): v for v in all_videos[:8]}
|
|
298
|
+
for future in as_completed(futures, timeout=60):
|
|
299
|
+
try:
|
|
300
|
+
result = future.result(timeout=30)
|
|
301
|
+
if result:
|
|
302
|
+
streams.append(result)
|
|
303
|
+
except Exception:
|
|
304
|
+
pass
|
|
305
|
+
|
|
306
|
+
return streams
|
|
307
|
+
|
|
308
|
+
def _similarity(self, query: str, text: str) -> float:
|
|
309
|
+
if not text:
|
|
310
|
+
return 0.0
|
|
311
|
+
q = query.lower()
|
|
312
|
+
t = text.lower()
|
|
313
|
+
if q == t:
|
|
314
|
+
return 1.0
|
|
315
|
+
if q in t:
|
|
316
|
+
return 0.9
|
|
317
|
+
return SequenceMatcher(None, q, t).ratio()
|
|
318
|
+
|
|
319
|
+
def _get_poster_url(self, poster: str) -> str:
|
|
320
|
+
if not poster:
|
|
321
|
+
return ""
|
|
322
|
+
if poster.startswith("http"):
|
|
323
|
+
return poster
|
|
324
|
+
return f"https://anizm.pro/uploads/img/{poster}"
|
|
325
|
+
|
|
326
|
+
def _get_translators(self, episode_slug: str) -> List[Dict[str, str]]:
|
|
327
|
+
clean_slug = episode_slug.lstrip("/")
|
|
328
|
+
url = f"{API_BASE_URL}/{clean_slug}"
|
|
329
|
+
|
|
330
|
+
response = _http_get(url)
|
|
331
|
+
if not response or response.status_code != 200:
|
|
332
|
+
return []
|
|
333
|
+
|
|
334
|
+
html = response.text
|
|
335
|
+
translators = []
|
|
336
|
+
pattern = r'translator="([^"]+)"[^>]*data-fansub-name="([^"]*)"'
|
|
337
|
+
matches = re.findall(pattern, html)
|
|
338
|
+
|
|
339
|
+
seen = set()
|
|
340
|
+
for tr_url, fansub in matches:
|
|
341
|
+
if tr_url not in seen:
|
|
342
|
+
seen.add(tr_url)
|
|
343
|
+
translators.append({"url": tr_url, "name": fansub or "Fansub"})
|
|
344
|
+
|
|
345
|
+
return translators
|
|
346
|
+
|
|
347
|
+
def _get_translator_videos(self, translator_url: str) -> List[Dict[str, str]]:
|
|
348
|
+
response = _http_get(
|
|
349
|
+
translator_url,
|
|
350
|
+
headers={
|
|
351
|
+
"X-Requested-With": "XMLHttpRequest",
|
|
352
|
+
"Accept": "application/json",
|
|
353
|
+
"Referer": API_BASE_URL,
|
|
354
|
+
}
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
if not response or response.status_code != 200:
|
|
358
|
+
return []
|
|
359
|
+
|
|
360
|
+
try:
|
|
361
|
+
data = response.json()
|
|
362
|
+
html = data.get("data", "")
|
|
363
|
+
|
|
364
|
+
videos = []
|
|
365
|
+
pattern = r'video="([^"]+)"[^>]*data-video-name="([^"]*)"'
|
|
366
|
+
matches = re.findall(pattern, html)
|
|
367
|
+
|
|
368
|
+
for video_url, video_name in matches:
|
|
369
|
+
videos.append({"url": video_url, "name": video_name or "Player"})
|
|
370
|
+
|
|
371
|
+
if not videos:
|
|
372
|
+
pattern2 = r'data-video-name="([^"]*)"[^>]*video="([^"]+)"'
|
|
373
|
+
matches2 = re.findall(pattern2, html)
|
|
374
|
+
for video_name, video_url in matches2:
|
|
375
|
+
videos.append({"url": video_url, "name": video_name or "Player"})
|
|
376
|
+
|
|
377
|
+
return videos
|
|
378
|
+
except Exception:
|
|
379
|
+
return []
|
|
380
|
+
|
|
381
|
+
def _process_video(self, video_info: Dict[str, str]) -> Optional[StreamLink]:
|
|
382
|
+
try:
|
|
383
|
+
video_url = video_info["url"]
|
|
384
|
+
fansub = video_info["fansub"]
|
|
385
|
+
name = video_info["name"]
|
|
386
|
+
|
|
387
|
+
response = _http_get(
|
|
388
|
+
video_url,
|
|
389
|
+
headers={
|
|
390
|
+
"X-Requested-With": "XMLHttpRequest",
|
|
391
|
+
"Accept": "application/json",
|
|
392
|
+
"Referer": API_BASE_URL,
|
|
393
|
+
}
|
|
394
|
+
)
|
|
395
|
+
|
|
396
|
+
if not response or response.status_code != 200:
|
|
397
|
+
return None
|
|
398
|
+
|
|
399
|
+
data = response.json()
|
|
400
|
+
player_html = data.get("player", "")
|
|
401
|
+
|
|
402
|
+
iframe_match = re.search(r'/player/(\d+)', player_html)
|
|
403
|
+
if not iframe_match:
|
|
404
|
+
return None
|
|
405
|
+
|
|
406
|
+
player_id = iframe_match.group(1)
|
|
407
|
+
|
|
408
|
+
player_response = _http_get(
|
|
409
|
+
f"{API_BASE_URL}/player/{player_id}",
|
|
410
|
+
headers={"Referer": f"{API_BASE_URL}/"}
|
|
411
|
+
)
|
|
412
|
+
|
|
413
|
+
if not player_response or player_response.status_code != 200:
|
|
414
|
+
return None
|
|
415
|
+
|
|
416
|
+
fireplayer_id = _extract_fireplayer_id(player_response.text)
|
|
417
|
+
if not fireplayer_id:
|
|
418
|
+
return None
|
|
419
|
+
|
|
420
|
+
video_response = _http_post(
|
|
421
|
+
f"{PLAYER_BASE_URL}/player/index.php?data={fireplayer_id}&do=getVideo",
|
|
422
|
+
headers={
|
|
423
|
+
"Referer": f"{PLAYER_BASE_URL}/player/{fireplayer_id}",
|
|
424
|
+
"Origin": PLAYER_BASE_URL,
|
|
425
|
+
}
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
if not video_response or video_response.status_code != 200:
|
|
429
|
+
return None
|
|
430
|
+
|
|
431
|
+
video_data = video_response.json()
|
|
432
|
+
|
|
433
|
+
if video_data.get("securedLink"):
|
|
434
|
+
return StreamLink(
|
|
435
|
+
url=video_data["securedLink"],
|
|
436
|
+
quality="auto",
|
|
437
|
+
server=f"{fansub} - {name}"
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
if video_data.get("videoSource"):
|
|
441
|
+
return StreamLink(
|
|
442
|
+
url=video_data["videoSource"],
|
|
443
|
+
quality="auto",
|
|
444
|
+
server=f"{fansub} - {name}"
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
return None
|
|
448
|
+
|
|
449
|
+
except Exception:
|
|
450
|
+
return None
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from dataclasses import dataclass, field
|
|
3
|
+
from typing import List, Optional, Dict, Any, Tuple
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ProviderError(Exception):
|
|
7
|
+
def __init__(self, code: str, message: str = ""):
|
|
8
|
+
self.code = code
|
|
9
|
+
self.message = message
|
|
10
|
+
super().__init__(f"{code}: {message}")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class AnimeResult:
|
|
15
|
+
id: str
|
|
16
|
+
title: str
|
|
17
|
+
type: str = "series"
|
|
18
|
+
cover: Optional[str] = None
|
|
19
|
+
year: Optional[int] = None
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class Episode:
|
|
24
|
+
id: str
|
|
25
|
+
number: int
|
|
26
|
+
title: Optional[str] = None
|
|
27
|
+
season: int = 1
|
|
28
|
+
url: Optional[str] = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class StreamLink:
|
|
33
|
+
url: str
|
|
34
|
+
quality: str = "auto"
|
|
35
|
+
server: str = "default"
|
|
36
|
+
headers: Dict[str, str] = field(default_factory=dict)
|
|
37
|
+
subtitles: Optional[str] = None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class AnimeDetails:
|
|
42
|
+
id: str
|
|
43
|
+
title: str
|
|
44
|
+
description: Optional[str] = None
|
|
45
|
+
cover: Optional[str] = None
|
|
46
|
+
genres: List[str] = field(default_factory=list)
|
|
47
|
+
year: Optional[int] = None
|
|
48
|
+
status: Optional[str] = None
|
|
49
|
+
episodes: List[Episode] = field(default_factory=list)
|
|
50
|
+
total_episodes: Optional[int] = None
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class BaseProvider(ABC):
|
|
54
|
+
|
|
55
|
+
name: str = "base"
|
|
56
|
+
lang: str = "tr"
|
|
57
|
+
region: str = "TR"
|
|
58
|
+
|
|
59
|
+
def __init__(self):
|
|
60
|
+
self.headers = {
|
|
61
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
|
62
|
+
'Accept': 'application/json, text/html, */*',
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
@abstractmethod
|
|
66
|
+
def search(self, query: str) -> List[AnimeResult]:
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
@abstractmethod
|
|
70
|
+
def get_details(self, anime_id: str) -> Optional[AnimeDetails]:
|
|
71
|
+
pass
|
|
72
|
+
|
|
73
|
+
@abstractmethod
|
|
74
|
+
def get_episodes(self, anime_id: str) -> List[Episode]:
|
|
75
|
+
pass
|
|
76
|
+
|
|
77
|
+
@abstractmethod
|
|
78
|
+
def get_streams(self, anime_id: str, episode_id: str) -> List[StreamLink]:
|
|
79
|
+
pass
|
|
80
|
+
|
|
81
|
+
def _request(self, url: str, params: dict = None, json_response: bool = True) -> Any:
|
|
82
|
+
import requests
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
response = requests.get(
|
|
86
|
+
url,
|
|
87
|
+
headers=self.headers,
|
|
88
|
+
params=params,
|
|
89
|
+
timeout=15
|
|
90
|
+
)
|
|
91
|
+
response.raise_for_status()
|
|
92
|
+
|
|
93
|
+
if json_response:
|
|
94
|
+
return response.json()
|
|
95
|
+
return response.text
|
|
96
|
+
|
|
97
|
+
except requests.RequestException:
|
|
98
|
+
return None
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from typing import Dict, List, Type, Optional
|
|
2
|
+
from weeb_cli.providers.base import BaseProvider
|
|
3
|
+
|
|
4
|
+
_providers: Dict[str, Type[BaseProvider]] = {}
|
|
5
|
+
_provider_meta: Dict[str, dict] = {}
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def register_provider(name: str, lang: str = "tr", region: str = "TR"):
|
|
9
|
+
def decorator(cls: Type[BaseProvider]):
|
|
10
|
+
cls.name = name
|
|
11
|
+
cls.lang = lang
|
|
12
|
+
cls.region = region
|
|
13
|
+
|
|
14
|
+
_providers[name] = cls
|
|
15
|
+
_provider_meta[name] = {
|
|
16
|
+
"name": name,
|
|
17
|
+
"lang": lang,
|
|
18
|
+
"region": region,
|
|
19
|
+
"class": cls.__name__
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
return cls
|
|
23
|
+
return decorator
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def get_provider(name: str) -> Optional[BaseProvider]:
|
|
27
|
+
if name in _providers:
|
|
28
|
+
return _providers[name]()
|
|
29
|
+
return None
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def get_providers_for_lang(lang: str) -> List[str]:
|
|
33
|
+
return [
|
|
34
|
+
name for name, meta in _provider_meta.items()
|
|
35
|
+
if meta["lang"] == lang
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def list_providers() -> List[dict]:
|
|
40
|
+
return list(_provider_meta.values())
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def get_default_provider(lang: str) -> Optional[str]:
|
|
44
|
+
providers = get_providers_for_lang(lang)
|
|
45
|
+
return providers[0] if providers else None
|