weeb-cli 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- weeb_cli/__init__.py +1 -0
- weeb_cli/__main__.py +4 -0
- weeb_cli/commands/downloads.py +126 -0
- weeb_cli/commands/search.py +428 -0
- weeb_cli/commands/settings.py +254 -0
- weeb_cli/commands/setup.py +26 -0
- weeb_cli/commands/watchlist.py +130 -0
- weeb_cli/config.py +50 -0
- weeb_cli/i18n.py +65 -0
- weeb_cli/locales/en.json +168 -0
- weeb_cli/locales/tr.json +168 -0
- weeb_cli/main.py +85 -0
- weeb_cli/providers/__init__.py +21 -0
- weeb_cli/providers/animecix.py +276 -0
- weeb_cli/providers/anizle.py +450 -0
- weeb_cli/providers/base.py +98 -0
- weeb_cli/providers/registry.py +45 -0
- weeb_cli/providers/turkanime.py +499 -0
- weeb_cli/services/__init__.py +0 -0
- weeb_cli/services/dependency_manager.py +321 -0
- weeb_cli/services/details.py +32 -0
- weeb_cli/services/downloader.py +308 -0
- weeb_cli/services/player.py +47 -0
- weeb_cli/services/progress.py +136 -0
- weeb_cli/services/scraper.py +91 -0
- weeb_cli/services/search.py +16 -0
- weeb_cli/services/updater.py +199 -0
- weeb_cli/services/watch.py +19 -0
- weeb_cli/ui/__init__.py +1 -0
- weeb_cli/ui/header.py +30 -0
- weeb_cli/ui/menu.py +59 -0
- weeb_cli/ui/prompt.py +120 -0
- weeb_cli-1.0.0.dist-info/METADATA +148 -0
- weeb_cli-1.0.0.dist-info/RECORD +38 -0
- weeb_cli-1.0.0.dist-info/WHEEL +5 -0
- weeb_cli-1.0.0.dist-info/entry_points.txt +2 -0
- weeb_cli-1.0.0.dist-info/licenses/LICENSE +390 -0
- weeb_cli-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import time
|
|
3
|
+
import urllib.request
|
|
4
|
+
from urllib.parse import urlparse, parse_qs, quote, urlsplit, urlunsplit
|
|
5
|
+
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
from weeb_cli.providers.base import (
|
|
8
|
+
BaseProvider,
|
|
9
|
+
AnimeResult,
|
|
10
|
+
AnimeDetails,
|
|
11
|
+
Episode,
|
|
12
|
+
StreamLink
|
|
13
|
+
)
|
|
14
|
+
from weeb_cli.providers.registry import register_provider
|
|
15
|
+
|
|
16
|
+
BASE_URL = "https://animecix.tv/"
|
|
17
|
+
ALT_URL = "https://mangacix.net/"
|
|
18
|
+
VIDEO_PLAYERS = ["tau-video.xyz", "sibnet"]
|
|
19
|
+
|
|
20
|
+
HEADERS = {
|
|
21
|
+
"Accept": "application/json",
|
|
22
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _http_get(url: str, timeout: int = 15) -> bytes:
|
|
27
|
+
sp = urlsplit(url)
|
|
28
|
+
safe_path = quote(sp.path, safe="/:%@")
|
|
29
|
+
safe_url = urlunsplit((sp.scheme, sp.netloc, safe_path, sp.query, sp.fragment))
|
|
30
|
+
|
|
31
|
+
req = urllib.request.Request(safe_url, headers=HEADERS)
|
|
32
|
+
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
|
33
|
+
return resp.read()
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _get_json(url: str, timeout: int = 15):
|
|
37
|
+
try:
|
|
38
|
+
data = _http_get(url, timeout)
|
|
39
|
+
return json.loads(data)
|
|
40
|
+
except Exception:
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@register_provider("animecix", lang="tr", region="TR")
|
|
45
|
+
class AnimeCixProvider(BaseProvider):
|
|
46
|
+
|
|
47
|
+
def __init__(self):
|
|
48
|
+
super().__init__()
|
|
49
|
+
|
|
50
|
+
def search(self, query: str) -> List[AnimeResult]:
|
|
51
|
+
q = (query or "").strip().replace(" ", "-")
|
|
52
|
+
q_enc = quote(q, safe="-")
|
|
53
|
+
url = f"{BASE_URL}secure/search/{q_enc}?type=&limit=20"
|
|
54
|
+
|
|
55
|
+
data = _get_json(url)
|
|
56
|
+
if not data or "results" not in data:
|
|
57
|
+
return []
|
|
58
|
+
|
|
59
|
+
results = []
|
|
60
|
+
for item in data["results"]:
|
|
61
|
+
name = item.get("name")
|
|
62
|
+
_id = item.get("id")
|
|
63
|
+
if name and _id:
|
|
64
|
+
results.append(AnimeResult(
|
|
65
|
+
id=str(_id),
|
|
66
|
+
title=str(name),
|
|
67
|
+
type=self._parse_type(item.get("title_type", ""))
|
|
68
|
+
))
|
|
69
|
+
|
|
70
|
+
return results
|
|
71
|
+
|
|
72
|
+
def get_details(self, anime_id: str) -> Optional[AnimeDetails]:
|
|
73
|
+
try:
|
|
74
|
+
safe_id = int(anime_id)
|
|
75
|
+
except (ValueError, TypeError):
|
|
76
|
+
return None
|
|
77
|
+
|
|
78
|
+
url = f"{ALT_URL}secure/related-videos?episode=1&season=1&titleId={safe_id}&videoId=637113"
|
|
79
|
+
data = _get_json(url)
|
|
80
|
+
|
|
81
|
+
title_data = None
|
|
82
|
+
if data and "videos" in data:
|
|
83
|
+
videos = data.get("videos") or []
|
|
84
|
+
if videos:
|
|
85
|
+
title_data = videos[0].get("title")
|
|
86
|
+
|
|
87
|
+
episodes = self.get_episodes(anime_id)
|
|
88
|
+
|
|
89
|
+
if not episodes:
|
|
90
|
+
movie_url = self._get_movie_url(safe_id)
|
|
91
|
+
if movie_url:
|
|
92
|
+
title_name = title_data.get("name", "Film") if title_data else "Film"
|
|
93
|
+
episodes = [Episode(
|
|
94
|
+
id=movie_url,
|
|
95
|
+
number=1,
|
|
96
|
+
title=title_name,
|
|
97
|
+
url=movie_url
|
|
98
|
+
)]
|
|
99
|
+
|
|
100
|
+
if not title_data:
|
|
101
|
+
return AnimeDetails(
|
|
102
|
+
id=anime_id,
|
|
103
|
+
title=anime_id,
|
|
104
|
+
episodes=episodes,
|
|
105
|
+
total_episodes=len(episodes)
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
return AnimeDetails(
|
|
109
|
+
id=anime_id,
|
|
110
|
+
title=title_data.get("name", ""),
|
|
111
|
+
description=title_data.get("description"),
|
|
112
|
+
cover=title_data.get("poster"),
|
|
113
|
+
genres=[g.get("name", "") for g in title_data.get("genres", [])],
|
|
114
|
+
year=title_data.get("year"),
|
|
115
|
+
episodes=episodes,
|
|
116
|
+
total_episodes=len(episodes)
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
def _get_movie_url(self, title_id: int) -> Optional[str]:
|
|
120
|
+
url = f"{ALT_URL}secure/titles/{title_id}"
|
|
121
|
+
data = _get_json(url)
|
|
122
|
+
|
|
123
|
+
if not data or "title" not in data:
|
|
124
|
+
return None
|
|
125
|
+
|
|
126
|
+
title = data["title"]
|
|
127
|
+
videos = title.get("videos") or []
|
|
128
|
+
|
|
129
|
+
if videos:
|
|
130
|
+
return videos[0].get("url")
|
|
131
|
+
|
|
132
|
+
return None
|
|
133
|
+
|
|
134
|
+
def get_episodes(self, anime_id: str) -> List[Episode]:
|
|
135
|
+
try:
|
|
136
|
+
safe_id = int(anime_id)
|
|
137
|
+
except (ValueError, TypeError):
|
|
138
|
+
return []
|
|
139
|
+
|
|
140
|
+
seasons = self._get_seasons(safe_id)
|
|
141
|
+
if not seasons:
|
|
142
|
+
seasons = [0]
|
|
143
|
+
|
|
144
|
+
episodes = []
|
|
145
|
+
seen = set()
|
|
146
|
+
|
|
147
|
+
for sidx in seasons:
|
|
148
|
+
url = f"{ALT_URL}secure/related-videos?episode=1&season={sidx+1}&titleId={safe_id}&videoId=637113"
|
|
149
|
+
data = _get_json(url)
|
|
150
|
+
|
|
151
|
+
if not data or "videos" not in data:
|
|
152
|
+
continue
|
|
153
|
+
|
|
154
|
+
for v in data["videos"]:
|
|
155
|
+
name = v.get("name")
|
|
156
|
+
ep_url = v.get("url")
|
|
157
|
+
|
|
158
|
+
if not name or not ep_url:
|
|
159
|
+
continue
|
|
160
|
+
if name in seen:
|
|
161
|
+
continue
|
|
162
|
+
|
|
163
|
+
seen.add(name)
|
|
164
|
+
ep_num = self._parse_episode_number(name, len(episodes) + 1)
|
|
165
|
+
|
|
166
|
+
episodes.append(Episode(
|
|
167
|
+
id=ep_url,
|
|
168
|
+
number=ep_num,
|
|
169
|
+
title=name,
|
|
170
|
+
season=sidx + 1,
|
|
171
|
+
url=ep_url
|
|
172
|
+
))
|
|
173
|
+
|
|
174
|
+
return episodes
|
|
175
|
+
|
|
176
|
+
def get_streams(self, anime_id: str, episode_id: str) -> List[StreamLink]:
|
|
177
|
+
embed_path = episode_id.lstrip("/")
|
|
178
|
+
|
|
179
|
+
if embed_path.startswith("http"):
|
|
180
|
+
full_url = embed_path
|
|
181
|
+
else:
|
|
182
|
+
full_url = f"{BASE_URL}{quote(embed_path, safe='/:?=&')}"
|
|
183
|
+
|
|
184
|
+
try:
|
|
185
|
+
req = urllib.request.Request(full_url, headers=HEADERS)
|
|
186
|
+
resp = urllib.request.urlopen(req, timeout=15)
|
|
187
|
+
final_url = resp.geturl()
|
|
188
|
+
|
|
189
|
+
time.sleep(1)
|
|
190
|
+
|
|
191
|
+
p = urlparse(final_url)
|
|
192
|
+
parts = p.path.strip("/").split("/")
|
|
193
|
+
|
|
194
|
+
embed_id = None
|
|
195
|
+
if len(parts) >= 2:
|
|
196
|
+
if parts[0] == "embed":
|
|
197
|
+
embed_id = parts[1]
|
|
198
|
+
else:
|
|
199
|
+
embed_id = parts[0]
|
|
200
|
+
elif len(parts) == 1 and parts[0]:
|
|
201
|
+
embed_id = parts[0]
|
|
202
|
+
|
|
203
|
+
qs = parse_qs(p.query)
|
|
204
|
+
vid = (qs.get("vid") or [None])[0]
|
|
205
|
+
|
|
206
|
+
if not embed_id or not vid:
|
|
207
|
+
return []
|
|
208
|
+
|
|
209
|
+
api_url = f"https://{VIDEO_PLAYERS[0]}/api/video/{embed_id}?vid={vid}"
|
|
210
|
+
video_data = _get_json(api_url)
|
|
211
|
+
|
|
212
|
+
if not video_data or "urls" not in video_data:
|
|
213
|
+
return []
|
|
214
|
+
|
|
215
|
+
streams = []
|
|
216
|
+
for u in video_data["urls"]:
|
|
217
|
+
label = u.get("label")
|
|
218
|
+
url = u.get("url")
|
|
219
|
+
if url:
|
|
220
|
+
streams.append(StreamLink(
|
|
221
|
+
url=url,
|
|
222
|
+
quality=label or "auto",
|
|
223
|
+
server="tau-video"
|
|
224
|
+
))
|
|
225
|
+
|
|
226
|
+
return streams
|
|
227
|
+
|
|
228
|
+
except Exception:
|
|
229
|
+
return []
|
|
230
|
+
|
|
231
|
+
def _get_seasons(self, title_id: int) -> List[int]:
|
|
232
|
+
try:
|
|
233
|
+
safe_id = int(title_id)
|
|
234
|
+
except (ValueError, TypeError):
|
|
235
|
+
return [0]
|
|
236
|
+
|
|
237
|
+
url = f"{ALT_URL}secure/related-videos?episode=1&season=1&titleId={safe_id}&videoId=637113"
|
|
238
|
+
data = _get_json(url)
|
|
239
|
+
|
|
240
|
+
if not data or "videos" not in data:
|
|
241
|
+
return [0]
|
|
242
|
+
|
|
243
|
+
videos = data.get("videos") or []
|
|
244
|
+
if not videos:
|
|
245
|
+
return [0]
|
|
246
|
+
|
|
247
|
+
title = (videos[0] or {}).get("title") or {}
|
|
248
|
+
seasons = title.get("seasons") or []
|
|
249
|
+
|
|
250
|
+
if seasons:
|
|
251
|
+
return list(range(len(seasons)))
|
|
252
|
+
return [0]
|
|
253
|
+
|
|
254
|
+
def _parse_type(self, title_type: str) -> str:
|
|
255
|
+
title_type = (title_type or "").lower()
|
|
256
|
+
if "movie" in title_type or "film" in title_type:
|
|
257
|
+
return "movie"
|
|
258
|
+
if "ova" in title_type:
|
|
259
|
+
return "ova"
|
|
260
|
+
return "series"
|
|
261
|
+
|
|
262
|
+
def _parse_episode_number(self, name: str, fallback: int) -> int:
|
|
263
|
+
import re
|
|
264
|
+
patterns = [
|
|
265
|
+
r'(?:bölüm|episode|ep)\s*(\d+)',
|
|
266
|
+
r'(\d+)\.\s*(?:bölüm|episode)',
|
|
267
|
+
r'^(\d+)$'
|
|
268
|
+
]
|
|
269
|
+
|
|
270
|
+
name_lower = name.lower()
|
|
271
|
+
for pattern in patterns:
|
|
272
|
+
match = re.search(pattern, name_lower)
|
|
273
|
+
if match:
|
|
274
|
+
return int(match.group(1))
|
|
275
|
+
|
|
276
|
+
return fallback
|