weeb-cli 1.0.0__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- weeb_cli/__init__.py +1 -1
- weeb_cli/commands/downloads.py +339 -8
- weeb_cli/commands/settings.py +127 -1
- weeb_cli/config.py +17 -27
- weeb_cli/locales/en.json +31 -3
- weeb_cli/locales/tr.json +31 -3
- weeb_cli/providers/__init__.py +2 -0
- weeb_cli/providers/allanime.py +252 -0
- weeb_cli/providers/extractors/__init__.py +0 -0
- weeb_cli/providers/extractors/megacloud.py +238 -0
- weeb_cli/providers/hianime.py +294 -0
- weeb_cli/services/database.py +313 -0
- weeb_cli/services/downloader.py +60 -49
- weeb_cli/services/local_library.py +263 -0
- weeb_cli/services/logger.py +55 -0
- weeb_cli/services/notifier.py +41 -0
- weeb_cli/services/progress.py +27 -77
- weeb_cli/ui/header.py +1 -1
- weeb_cli/ui/menu.py +2 -2
- weeb_cli-2.0.0.dist-info/METADATA +199 -0
- weeb_cli-2.0.0.dist-info/RECORD +46 -0
- weeb_cli-1.0.0.dist-info/METADATA +0 -148
- weeb_cli-1.0.0.dist-info/RECORD +0 -38
- {weeb_cli-1.0.0.dist-info → weeb_cli-2.0.0.dist-info}/WHEEL +0 -0
- {weeb_cli-1.0.0.dist-info → weeb_cli-2.0.0.dist-info}/entry_points.txt +0 -0
- {weeb_cli-1.0.0.dist-info → weeb_cli-2.0.0.dist-info}/licenses/LICENSE +0 -0
- {weeb_cli-1.0.0.dist-info → weeb_cli-2.0.0.dist-info}/top_level.txt +0 -0
weeb_cli/locales/tr.json
CHANGED
|
@@ -48,7 +48,18 @@
|
|
|
48
48
|
"source_changed": "Kaynak {source} olarak değiştirildi.",
|
|
49
49
|
"no_sources": "Bu dil için kaynak bulunamadı.",
|
|
50
50
|
"toggle_on": "{tool} aktif edildi.",
|
|
51
|
-
"toggle_off": "{tool} devre dışı bırakıldı."
|
|
51
|
+
"toggle_off": "{tool} devre dışı bırakıldı.",
|
|
52
|
+
"external_drives": "Harici Diskler",
|
|
53
|
+
"add_drive": "Disk Ekle",
|
|
54
|
+
"enter_drive_path": "Disk yolunu girin (örn. D:\\Anime)",
|
|
55
|
+
"enter_drive_name": "Disk için takma isim",
|
|
56
|
+
"drive_not_found": "Belirtilen yol bulunamadı.",
|
|
57
|
+
"drive_added": "Disk eklendi.",
|
|
58
|
+
"rename_drive": "İsmi Değiştir",
|
|
59
|
+
"remove_drive": "Diski Kaldır",
|
|
60
|
+
"confirm_remove": "Bu diski kaldırmak istediğinize emin misiniz?",
|
|
61
|
+
"drive_renamed": "Disk ismi değiştirildi.",
|
|
62
|
+
"drive_removed": "Disk kaldırıldı."
|
|
52
63
|
},
|
|
53
64
|
"setup": {
|
|
54
65
|
"welcome": "Weeb CLI'a Hoş Geldiniz!",
|
|
@@ -139,7 +150,7 @@
|
|
|
139
150
|
"no_streams": "Stream bulunamadı"
|
|
140
151
|
},
|
|
141
152
|
"downloads": {
|
|
142
|
-
"title": "
|
|
153
|
+
"title": "İndirmeler",
|
|
143
154
|
"empty": "Henüz hiç indirme yapmamışsınız.",
|
|
144
155
|
"status": "Durum",
|
|
145
156
|
"progress": "İlerleme",
|
|
@@ -163,6 +174,23 @@
|
|
|
163
174
|
"queue_stopped": "Kuyruk durduruldu.",
|
|
164
175
|
"queue_running": "Çalışıyor",
|
|
165
176
|
"pending_count": "{count} indirme bekliyor",
|
|
166
|
-
"cleared": "Temizlendi."
|
|
177
|
+
"cleared": "Temizlendi.",
|
|
178
|
+
"completed_downloads": "Tamamlanan İndirmeler",
|
|
179
|
+
"active_downloads": "Aktif İndirmeler",
|
|
180
|
+
"manage_queue": "Kuyruk Yönetimi",
|
|
181
|
+
"select_anime": "Anime Seçin",
|
|
182
|
+
"search_library": "Ara",
|
|
183
|
+
"search_anime": "Anime adı girin",
|
|
184
|
+
"search_all": "Tum Dizinlerde Ara",
|
|
185
|
+
"offline": "Cevrimdisi",
|
|
186
|
+
"reindex": "Yeniden Indexle",
|
|
187
|
+
"indexing": "Indexleniyor...",
|
|
188
|
+
"indexed": "{count} anime indexlendi",
|
|
189
|
+
"no_indexed": "Indexlenmis anime yok. Once bir disk baglayip indexleyin.",
|
|
190
|
+
"drive_not_connected": "Disk bağlı değil",
|
|
191
|
+
"connect_drive": "{name} diskini bağlayın",
|
|
192
|
+
"notification_title": "Weeb CLI",
|
|
193
|
+
"notification_complete": "{anime} - {episode}. bölüm indirildi",
|
|
194
|
+
"episode_short": "Blm"
|
|
167
195
|
}
|
|
168
196
|
}
|
weeb_cli/providers/__init__.py
CHANGED
|
@@ -10,6 +10,8 @@ from weeb_cli.providers.registry import (
|
|
|
10
10
|
from weeb_cli.providers import animecix
|
|
11
11
|
from weeb_cli.providers import anizle
|
|
12
12
|
from weeb_cli.providers import turkanime
|
|
13
|
+
from weeb_cli.providers import hianime
|
|
14
|
+
from weeb_cli.providers import allanime
|
|
13
15
|
|
|
14
16
|
__all__ = [
|
|
15
17
|
"BaseProvider",
|
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import re
|
|
3
|
+
import urllib.request
|
|
4
|
+
from urllib.parse import quote, urlencode
|
|
5
|
+
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
from weeb_cli.providers.base import (
|
|
8
|
+
BaseProvider,
|
|
9
|
+
AnimeResult,
|
|
10
|
+
AnimeDetails,
|
|
11
|
+
Episode,
|
|
12
|
+
StreamLink
|
|
13
|
+
)
|
|
14
|
+
from weeb_cli.providers.registry import register_provider
|
|
15
|
+
|
|
16
|
+
API_URL = "https://api.allanime.day/api"
|
|
17
|
+
REFERER = "https://allmanga.to"
|
|
18
|
+
|
|
19
|
+
HEADERS = {
|
|
20
|
+
"Accept": "application/json",
|
|
21
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/121.0",
|
|
22
|
+
"Referer": REFERER
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
DECODE_MAP = {
|
|
26
|
+
'79': 'A', '7a': 'B', '7b': 'C', '7c': 'D', '7d': 'E', '7e': 'F', '7f': 'G',
|
|
27
|
+
'70': 'H', '71': 'I', '72': 'J', '73': 'K', '74': 'L', '75': 'M', '76': 'N',
|
|
28
|
+
'77': 'O', '68': 'P', '69': 'Q', '6a': 'R', '6b': 'S', '6c': 'T', '6d': 'U',
|
|
29
|
+
'6e': 'V', '6f': 'W', '60': 'X', '61': 'Y', '62': 'Z', '59': 'a', '5a': 'b',
|
|
30
|
+
'5b': 'c', '5c': 'd', '5d': 'e', '5e': 'f', '5f': 'g', '50': 'h', '51': 'i',
|
|
31
|
+
'52': 'j', '53': 'k', '54': 'l', '55': 'm', '56': 'n', '57': 'o', '48': 'p',
|
|
32
|
+
'49': 'q', '4a': 'r', '4b': 's', '4c': 't', '4d': 'u', '4e': 'v', '4f': 'w',
|
|
33
|
+
'40': 'x', '41': 'y', '42': 'z', '08': '0', '09': '1', '0a': '2', '0b': '3',
|
|
34
|
+
'0c': '4', '0d': '5', '0e': '6', '0f': '7', '00': '8', '01': '9', '15': '-',
|
|
35
|
+
'16': '.', '67': '_', '46': '~', '02': ':', '17': '/', '07': '?', '1b': '#',
|
|
36
|
+
'63': '[', '65': ']', '78': '@', '19': '!', '1c': '$', '1e': '&', '10': '(',
|
|
37
|
+
'11': ')', '12': '*', '13': '+', '14': ',', '03': ';', '05': '=', '1d': '%'
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _decode_provider_id(encoded: str) -> str:
|
|
42
|
+
result = []
|
|
43
|
+
i = 0
|
|
44
|
+
while i < len(encoded):
|
|
45
|
+
pair = encoded[i:i+2]
|
|
46
|
+
if pair in DECODE_MAP:
|
|
47
|
+
result.append(DECODE_MAP[pair])
|
|
48
|
+
i += 2
|
|
49
|
+
decoded = ''.join(result)
|
|
50
|
+
return decoded.replace('/clock', '/clock.json')
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _http_get(url: str, headers: dict = None, timeout: int = 15) -> bytes:
|
|
54
|
+
req = urllib.request.Request(url, headers=headers or HEADERS)
|
|
55
|
+
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
|
56
|
+
return resp.read()
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _get_json(url: str, headers: dict = None, timeout: int = 15):
|
|
60
|
+
try:
|
|
61
|
+
data = _http_get(url, headers, timeout)
|
|
62
|
+
return json.loads(data)
|
|
63
|
+
except Exception:
|
|
64
|
+
return None
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _graphql_request(query: str, variables: dict) -> dict:
|
|
68
|
+
params = {
|
|
69
|
+
'variables': json.dumps(variables),
|
|
70
|
+
'query': query
|
|
71
|
+
}
|
|
72
|
+
url = f"{API_URL}?{urlencode(params)}"
|
|
73
|
+
return _get_json(url, HEADERS)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@register_provider("allanime", lang="en", region="US")
|
|
77
|
+
class AllAnimeProvider(BaseProvider):
|
|
78
|
+
|
|
79
|
+
def __init__(self):
|
|
80
|
+
super().__init__()
|
|
81
|
+
self.mode = "sub"
|
|
82
|
+
self.headers = HEADERS.copy()
|
|
83
|
+
|
|
84
|
+
def search(self, query: str) -> List[AnimeResult]:
|
|
85
|
+
q = (query or "").strip()
|
|
86
|
+
if not q:
|
|
87
|
+
return []
|
|
88
|
+
|
|
89
|
+
gql = '''query($search: SearchInput $limit: Int $page: Int $translationType: VaildTranslationTypeEnumType $countryOrigin: VaildCountryOriginEnumType) {
|
|
90
|
+
shows(search: $search limit: $limit page: $page translationType: $translationType countryOrigin: $countryOrigin) {
|
|
91
|
+
edges {
|
|
92
|
+
_id
|
|
93
|
+
name
|
|
94
|
+
availableEpisodes
|
|
95
|
+
__typename
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
}'''
|
|
99
|
+
|
|
100
|
+
variables = {
|
|
101
|
+
"search": {
|
|
102
|
+
"allowAdult": False,
|
|
103
|
+
"allowUnknown": False,
|
|
104
|
+
"query": q
|
|
105
|
+
},
|
|
106
|
+
"limit": 40,
|
|
107
|
+
"page": 1,
|
|
108
|
+
"translationType": self.mode,
|
|
109
|
+
"countryOrigin": "ALL"
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
data = _graphql_request(gql, variables)
|
|
113
|
+
if not data or 'data' not in data:
|
|
114
|
+
return []
|
|
115
|
+
|
|
116
|
+
shows = data.get('data', {}).get('shows', {}).get('edges', [])
|
|
117
|
+
results = []
|
|
118
|
+
|
|
119
|
+
for show in shows:
|
|
120
|
+
anime_id = show.get('_id')
|
|
121
|
+
name = show.get('name')
|
|
122
|
+
episodes = show.get('availableEpisodes', {})
|
|
123
|
+
|
|
124
|
+
if not anime_id or not name:
|
|
125
|
+
continue
|
|
126
|
+
|
|
127
|
+
ep_count = episodes.get(self.mode, 0)
|
|
128
|
+
if ep_count == 0:
|
|
129
|
+
continue
|
|
130
|
+
|
|
131
|
+
results.append(AnimeResult(
|
|
132
|
+
id=anime_id,
|
|
133
|
+
title=f"{name} ({ep_count} episodes)",
|
|
134
|
+
type="series"
|
|
135
|
+
))
|
|
136
|
+
|
|
137
|
+
return results
|
|
138
|
+
|
|
139
|
+
def get_details(self, anime_id: str) -> Optional[AnimeDetails]:
|
|
140
|
+
episodes = self.get_episodes(anime_id)
|
|
141
|
+
|
|
142
|
+
if not episodes:
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
title = anime_id.replace('-', ' ').title()
|
|
146
|
+
|
|
147
|
+
return AnimeDetails(
|
|
148
|
+
id=anime_id,
|
|
149
|
+
title=title,
|
|
150
|
+
episodes=episodes,
|
|
151
|
+
total_episodes=len(episodes)
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
def get_episodes(self, anime_id: str) -> List[Episode]:
|
|
155
|
+
gql = '''query ($showId: String!) {
|
|
156
|
+
show(_id: $showId) {
|
|
157
|
+
_id
|
|
158
|
+
availableEpisodesDetail
|
|
159
|
+
}
|
|
160
|
+
}'''
|
|
161
|
+
|
|
162
|
+
variables = {"showId": anime_id}
|
|
163
|
+
data = _graphql_request(gql, variables)
|
|
164
|
+
|
|
165
|
+
if not data or 'data' not in data:
|
|
166
|
+
return []
|
|
167
|
+
|
|
168
|
+
show = data.get('data', {}).get('show', {})
|
|
169
|
+
ep_detail = show.get('availableEpisodesDetail', {})
|
|
170
|
+
ep_list = ep_detail.get(self.mode, [])
|
|
171
|
+
|
|
172
|
+
episodes = []
|
|
173
|
+
for i, ep_num in enumerate(sorted(ep_list, key=lambda x: float(x) if x.replace('.', '').isdigit() else 0)):
|
|
174
|
+
episodes.append(Episode(
|
|
175
|
+
id=f"{anime_id}::ep={ep_num}",
|
|
176
|
+
number=i + 1,
|
|
177
|
+
title=f"Episode {ep_num}"
|
|
178
|
+
))
|
|
179
|
+
|
|
180
|
+
return episodes
|
|
181
|
+
|
|
182
|
+
def get_streams(self, anime_id: str, episode_id: str) -> List[StreamLink]:
|
|
183
|
+
if '::ep=' in episode_id:
|
|
184
|
+
parts = episode_id.split('::ep=')
|
|
185
|
+
show_id = parts[0]
|
|
186
|
+
ep_no = parts[1]
|
|
187
|
+
else:
|
|
188
|
+
show_id = anime_id
|
|
189
|
+
ep_no = episode_id
|
|
190
|
+
|
|
191
|
+
gql = '''query ($showId: String!, $translationType: VaildTranslationTypeEnumType!, $episodeString: String!) {
|
|
192
|
+
episode(showId: $showId translationType: $translationType episodeString: $episodeString) {
|
|
193
|
+
episodeString
|
|
194
|
+
sourceUrls
|
|
195
|
+
}
|
|
196
|
+
}'''
|
|
197
|
+
|
|
198
|
+
variables = {
|
|
199
|
+
"showId": show_id,
|
|
200
|
+
"translationType": self.mode,
|
|
201
|
+
"episodeString": ep_no
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
data = _graphql_request(gql, variables)
|
|
205
|
+
if not data or 'data' not in data:
|
|
206
|
+
return []
|
|
207
|
+
|
|
208
|
+
episode = data.get('data', {}).get('episode', {})
|
|
209
|
+
source_urls = episode.get('sourceUrls', [])
|
|
210
|
+
|
|
211
|
+
streams = []
|
|
212
|
+
|
|
213
|
+
for source in source_urls:
|
|
214
|
+
try:
|
|
215
|
+
source_url = source.get('sourceUrl', '')
|
|
216
|
+
source_name = source.get('sourceName', 'unknown')
|
|
217
|
+
|
|
218
|
+
if not source_url or not source_url.startswith('--'):
|
|
219
|
+
continue
|
|
220
|
+
|
|
221
|
+
encoded = source_url[2:]
|
|
222
|
+
decoded_path = _decode_provider_id(encoded)
|
|
223
|
+
|
|
224
|
+
if not decoded_path:
|
|
225
|
+
continue
|
|
226
|
+
|
|
227
|
+
full_url = f"https://allanime.day{decoded_path}"
|
|
228
|
+
|
|
229
|
+
stream_data = _get_json(full_url, self.headers)
|
|
230
|
+
if not stream_data:
|
|
231
|
+
continue
|
|
232
|
+
|
|
233
|
+
links = stream_data.get('links', [])
|
|
234
|
+
for link in links:
|
|
235
|
+
link_url = link.get('link')
|
|
236
|
+
resolution = link.get('resolutionStr', 'auto')
|
|
237
|
+
|
|
238
|
+
if link_url:
|
|
239
|
+
streams.append(StreamLink(
|
|
240
|
+
url=link_url,
|
|
241
|
+
quality=resolution,
|
|
242
|
+
server=source_name.lower(),
|
|
243
|
+
headers={"Referer": REFERER}
|
|
244
|
+
))
|
|
245
|
+
except Exception:
|
|
246
|
+
continue
|
|
247
|
+
|
|
248
|
+
return streams
|
|
249
|
+
|
|
250
|
+
def set_mode(self, mode: str):
|
|
251
|
+
if mode in ['sub', 'dub']:
|
|
252
|
+
self.mode = mode
|
|
File without changes
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
import urllib.request
|
|
5
|
+
from typing import Optional, Dict, Any, List
|
|
6
|
+
from bs4 import BeautifulSoup
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from Crypto.Cipher import AES
|
|
10
|
+
from Crypto.Util.Padding import unpad
|
|
11
|
+
import base64
|
|
12
|
+
HAS_CRYPTO = True
|
|
13
|
+
except ImportError:
|
|
14
|
+
HAS_CRYPTO = False
|
|
15
|
+
|
|
16
|
+
HIANIME_BASE = "https://hianime.to"
|
|
17
|
+
KEY_URL = "https://raw.githubusercontent.com/ryanwtf88/megacloud-keys/refs/heads/master/key.txt"
|
|
18
|
+
MEGAPLAY_URL = "https://megaplay.buzz/stream/s-2/"
|
|
19
|
+
VIDWISH_URL = "https://vidwish.live/stream/s-2/"
|
|
20
|
+
REFERER = "https://megacloud.tv"
|
|
21
|
+
|
|
22
|
+
HEADERS = {
|
|
23
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
|
|
24
|
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
_cached_key = None
|
|
28
|
+
_key_fetched_at = 0
|
|
29
|
+
KEY_CACHE_DURATION = 3600
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _http_get(url: str, headers: dict = None, timeout: int = 15) -> bytes:
|
|
33
|
+
req = urllib.request.Request(url, headers=headers or HEADERS)
|
|
34
|
+
with urllib.request.urlopen(req, timeout=timeout) as resp:
|
|
35
|
+
return resp.read()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _get_json(url: str, headers: dict = None) -> Optional[dict]:
|
|
39
|
+
try:
|
|
40
|
+
data = _http_get(url, headers)
|
|
41
|
+
return json.loads(data)
|
|
42
|
+
except:
|
|
43
|
+
return None
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _get_html(url: str, headers: dict = None) -> str:
|
|
47
|
+
try:
|
|
48
|
+
data = _http_get(url, headers)
|
|
49
|
+
return data.decode('utf-8')
|
|
50
|
+
except:
|
|
51
|
+
return ""
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _get_decryption_key() -> Optional[str]:
|
|
55
|
+
global _cached_key, _key_fetched_at
|
|
56
|
+
|
|
57
|
+
now = time.time()
|
|
58
|
+
if _cached_key and (now - _key_fetched_at) < KEY_CACHE_DURATION:
|
|
59
|
+
return _cached_key
|
|
60
|
+
|
|
61
|
+
try:
|
|
62
|
+
data = _http_get(KEY_URL)
|
|
63
|
+
_cached_key = data.decode('utf-8').strip()
|
|
64
|
+
_key_fetched_at = now
|
|
65
|
+
return _cached_key
|
|
66
|
+
except:
|
|
67
|
+
return _cached_key
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _decrypt_aes(encrypted: str, key: str) -> Optional[List[dict]]:
|
|
71
|
+
if not HAS_CRYPTO:
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
key_bytes = key.encode('utf-8')
|
|
76
|
+
encrypted_bytes = base64.b64decode(encrypted)
|
|
77
|
+
|
|
78
|
+
cipher = AES.new(key_bytes, AES.MODE_ECB)
|
|
79
|
+
decrypted = unpad(cipher.decrypt(encrypted_bytes), AES.block_size)
|
|
80
|
+
return json.loads(decrypted.decode('utf-8'))
|
|
81
|
+
except:
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
key_bytes = bytes.fromhex(key)
|
|
86
|
+
encrypted_bytes = base64.b64decode(encrypted)
|
|
87
|
+
|
|
88
|
+
cipher = AES.new(key_bytes, AES.MODE_ECB)
|
|
89
|
+
decrypted = unpad(cipher.decrypt(encrypted_bytes), AES.block_size)
|
|
90
|
+
return json.loads(decrypted.decode('utf-8'))
|
|
91
|
+
except:
|
|
92
|
+
return None
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _extract_token(url: str) -> Optional[str]:
|
|
96
|
+
html = _get_html(url, {
|
|
97
|
+
**HEADERS,
|
|
98
|
+
"Referer": f"{HIANIME_BASE}/"
|
|
99
|
+
})
|
|
100
|
+
|
|
101
|
+
if not html:
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
soup = BeautifulSoup(html, 'html.parser')
|
|
105
|
+
|
|
106
|
+
meta = soup.select_one('meta[name="_gg_fb"]')
|
|
107
|
+
if meta and meta.get('content'):
|
|
108
|
+
return meta['content']
|
|
109
|
+
|
|
110
|
+
data_dpi = soup.select_one('[data-dpi]')
|
|
111
|
+
if data_dpi and data_dpi.get('data-dpi'):
|
|
112
|
+
return data_dpi['data-dpi']
|
|
113
|
+
|
|
114
|
+
for script in soup.select('script[nonce]'):
|
|
115
|
+
nonce = script.get('nonce')
|
|
116
|
+
if nonce and len(nonce) >= 10:
|
|
117
|
+
return nonce
|
|
118
|
+
|
|
119
|
+
patterns = [
|
|
120
|
+
r'window\.\w+\s*=\s*["\']([a-zA-Z0-9_-]{10,})["\']',
|
|
121
|
+
r'data-k\s*=\s*["\']([a-zA-Z0-9_-]{10,})["\']',
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
for pattern in patterns:
|
|
125
|
+
match = re.search(pattern, html)
|
|
126
|
+
if match:
|
|
127
|
+
return match.group(1)
|
|
128
|
+
|
|
129
|
+
return None
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def _get_fallback_source(ep_id: str, server_type: str, server_name: str) -> Optional[dict]:
|
|
133
|
+
providers = [
|
|
134
|
+
("megaplay", MEGAPLAY_URL),
|
|
135
|
+
("vidwish", VIDWISH_URL)
|
|
136
|
+
]
|
|
137
|
+
|
|
138
|
+
if server_name.lower() == "hd-2":
|
|
139
|
+
providers = providers[::-1]
|
|
140
|
+
|
|
141
|
+
for name, base_url in providers:
|
|
142
|
+
try:
|
|
143
|
+
url = f"{base_url}{ep_id}/{server_type}"
|
|
144
|
+
html = _get_html(url, {
|
|
145
|
+
**HEADERS,
|
|
146
|
+
"Referer": f"https://{name}.{'buzz' if name == 'megaplay' else 'live'}/"
|
|
147
|
+
})
|
|
148
|
+
|
|
149
|
+
if not html:
|
|
150
|
+
continue
|
|
151
|
+
|
|
152
|
+
match = re.search(r'data-id=["\'](\d+)["\']', html)
|
|
153
|
+
if not match:
|
|
154
|
+
continue
|
|
155
|
+
|
|
156
|
+
real_id = match.group(1)
|
|
157
|
+
domain = "megaplay.buzz" if name == "megaplay" else "vidwish.live"
|
|
158
|
+
sources_url = f"https://{domain}/stream/getSources?id={real_id}"
|
|
159
|
+
|
|
160
|
+
data = _get_json(sources_url, {
|
|
161
|
+
**HEADERS,
|
|
162
|
+
"X-Requested-With": "XMLHttpRequest",
|
|
163
|
+
"Referer": f"https://{domain}/"
|
|
164
|
+
})
|
|
165
|
+
|
|
166
|
+
if data and data.get('sources', {}).get('file'):
|
|
167
|
+
return {
|
|
168
|
+
"file": data['sources']['file'],
|
|
169
|
+
"tracks": data.get('tracks', []),
|
|
170
|
+
"server": name
|
|
171
|
+
}
|
|
172
|
+
except:
|
|
173
|
+
continue
|
|
174
|
+
|
|
175
|
+
return None
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def extract_stream(server_id: int, episode_id: str, server_type: str = "sub", server_name: str = "hd-1") -> Optional[dict]:
|
|
179
|
+
ep_id = episode_id.split('ep=')[-1] if 'ep=' in episode_id else episode_id.split('::')[-1]
|
|
180
|
+
|
|
181
|
+
sources_url = f"{HIANIME_BASE}/ajax/v2/episode/sources?id={server_id}"
|
|
182
|
+
ajax_data = _get_json(sources_url, {**HEADERS, "Referer": HIANIME_BASE})
|
|
183
|
+
|
|
184
|
+
if not ajax_data or 'link' not in ajax_data:
|
|
185
|
+
return _get_fallback_source(ep_id, server_type, server_name)
|
|
186
|
+
|
|
187
|
+
embed_link = ajax_data['link']
|
|
188
|
+
|
|
189
|
+
match = re.search(r'/([^/?]+)\?', embed_link)
|
|
190
|
+
source_id = match.group(1) if match else None
|
|
191
|
+
|
|
192
|
+
match = re.search(r'^(https?://[^/]+(?:/[^/]+){3})', embed_link)
|
|
193
|
+
base_url = match.group(1) if match else None
|
|
194
|
+
|
|
195
|
+
if not source_id or not base_url:
|
|
196
|
+
return _get_fallback_source(ep_id, server_type, server_name)
|
|
197
|
+
|
|
198
|
+
token_url = f"{base_url}/{source_id}?k=1&autoPlay=0&oa=0&asi=1"
|
|
199
|
+
token = _extract_token(token_url)
|
|
200
|
+
|
|
201
|
+
if not token:
|
|
202
|
+
return _get_fallback_source(ep_id, server_type, server_name)
|
|
203
|
+
|
|
204
|
+
get_sources_url = f"{base_url}/getSources?id={source_id}&_k={token}"
|
|
205
|
+
sources_data = _get_json(get_sources_url, {
|
|
206
|
+
**HEADERS,
|
|
207
|
+
"X-Requested-With": "XMLHttpRequest",
|
|
208
|
+
"Referer": f"{base_url}/{source_id}"
|
|
209
|
+
})
|
|
210
|
+
|
|
211
|
+
if not sources_data:
|
|
212
|
+
return _get_fallback_source(ep_id, server_type, server_name)
|
|
213
|
+
|
|
214
|
+
encrypted = sources_data.get('sources')
|
|
215
|
+
|
|
216
|
+
if isinstance(encrypted, list) and encrypted:
|
|
217
|
+
return {
|
|
218
|
+
"file": encrypted[0].get('file'),
|
|
219
|
+
"tracks": sources_data.get('tracks', []),
|
|
220
|
+
"intro": sources_data.get('intro'),
|
|
221
|
+
"outro": sources_data.get('outro'),
|
|
222
|
+
"server": server_name
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
if isinstance(encrypted, str):
|
|
226
|
+
key = _get_decryption_key()
|
|
227
|
+
if key:
|
|
228
|
+
decrypted = _decrypt_aes(encrypted, key)
|
|
229
|
+
if decrypted and decrypted[0].get('file'):
|
|
230
|
+
return {
|
|
231
|
+
"file": decrypted[0]['file'],
|
|
232
|
+
"tracks": sources_data.get('tracks', []),
|
|
233
|
+
"intro": sources_data.get('intro'),
|
|
234
|
+
"outro": sources_data.get('outro'),
|
|
235
|
+
"server": server_name
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
return _get_fallback_source(ep_id, server_type, server_name)
|