musicdl 2.1.11__py3-none-any.whl → 2.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- musicdl/__init__.py +5 -5
- musicdl/modules/__init__.py +10 -3
- musicdl/modules/common/__init__.py +2 -0
- musicdl/modules/common/gdstudio.py +204 -0
- musicdl/modules/js/__init__.py +1 -0
- musicdl/modules/js/youtube/__init__.py +2 -0
- musicdl/modules/js/youtube/botguard.js +1 -0
- musicdl/modules/js/youtube/jsinterp.py +902 -0
- musicdl/modules/js/youtube/runner.js +2 -0
- musicdl/modules/sources/__init__.py +41 -10
- musicdl/modules/sources/apple.py +207 -0
- musicdl/modules/sources/base.py +256 -28
- musicdl/modules/sources/bilibili.py +118 -0
- musicdl/modules/sources/buguyy.py +148 -0
- musicdl/modules/sources/fangpi.py +153 -0
- musicdl/modules/sources/fivesing.py +108 -0
- musicdl/modules/sources/gequbao.py +148 -0
- musicdl/modules/sources/jamendo.py +108 -0
- musicdl/modules/sources/joox.py +104 -68
- musicdl/modules/sources/kugou.py +129 -76
- musicdl/modules/sources/kuwo.py +188 -68
- musicdl/modules/sources/lizhi.py +107 -0
- musicdl/modules/sources/migu.py +172 -66
- musicdl/modules/sources/mitu.py +140 -0
- musicdl/modules/sources/mp3juice.py +264 -0
- musicdl/modules/sources/netease.py +163 -115
- musicdl/modules/sources/qianqian.py +125 -77
- musicdl/modules/sources/qq.py +232 -94
- musicdl/modules/sources/tidal.py +342 -0
- musicdl/modules/sources/ximalaya.py +256 -0
- musicdl/modules/sources/yinyuedao.py +144 -0
- musicdl/modules/sources/youtube.py +238 -0
- musicdl/modules/utils/__init__.py +12 -4
- musicdl/modules/utils/appleutils.py +563 -0
- musicdl/modules/utils/data.py +107 -0
- musicdl/modules/utils/logger.py +211 -58
- musicdl/modules/utils/lyric.py +73 -0
- musicdl/modules/utils/misc.py +335 -23
- musicdl/modules/utils/modulebuilder.py +75 -0
- musicdl/modules/utils/neteaseutils.py +81 -0
- musicdl/modules/utils/qqutils.py +184 -0
- musicdl/modules/utils/quarkparser.py +105 -0
- musicdl/modules/utils/songinfoutils.py +54 -0
- musicdl/modules/utils/tidalutils.py +738 -0
- musicdl/modules/utils/youtubeutils.py +3606 -0
- musicdl/musicdl.py +184 -86
- musicdl-2.7.3.dist-info/LICENSE +203 -0
- musicdl-2.7.3.dist-info/METADATA +704 -0
- musicdl-2.7.3.dist-info/RECORD +53 -0
- {musicdl-2.1.11.dist-info → musicdl-2.7.3.dist-info}/WHEEL +5 -5
- musicdl-2.7.3.dist-info/entry_points.txt +2 -0
- musicdl/modules/sources/baiduFlac.py +0 -69
- musicdl/modules/sources/xiami.py +0 -104
- musicdl/modules/utils/downloader.py +0 -80
- musicdl-2.1.11.dist-info/LICENSE +0 -22
- musicdl-2.1.11.dist-info/METADATA +0 -82
- musicdl-2.1.11.dist-info/RECORD +0 -24
- {musicdl-2.1.11.dist-info → musicdl-2.7.3.dist-info}/top_level.txt +0 -0
- {musicdl-2.1.11.dist-info → musicdl-2.7.3.dist-info}/zip-safe +0 -0
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
'''
|
|
2
|
+
Function:
|
|
3
|
+
Implementation of TIDALMusicClient: https://tidal.com/
|
|
4
|
+
Author:
|
|
5
|
+
Zhenchao Jin
|
|
6
|
+
WeChat Official Account (微信公众号):
|
|
7
|
+
Charles的皮卡丘
|
|
8
|
+
'''
|
|
9
|
+
import os
|
|
10
|
+
import re
|
|
11
|
+
import copy
|
|
12
|
+
import aigpy
|
|
13
|
+
import base64
|
|
14
|
+
import tempfile
|
|
15
|
+
import json_repair
|
|
16
|
+
from xml.etree import ElementTree
|
|
17
|
+
from .base import BaseMusicClient
|
|
18
|
+
from rich.progress import Progress
|
|
19
|
+
from urllib.parse import urlencode, urljoin
|
|
20
|
+
from ..utils import legalizestring, resp2json, seconds2hms, touchdir, replacefile, usesearchheaderscookies, usedownloadheaderscookies, SongInfo, SongInfoUtils
|
|
21
|
+
from ..utils.tidalutils import (
|
|
22
|
+
TIDALTvSession, SearchResult, StreamRespond, StreamUrl, Manifest, Period, AdaptationSet, Representation, SegmentTemplate, SegmentList, SegmentTimelineEntry, Track,
|
|
23
|
+
decryptfile, decryptsecuritytoken, pyavready, ffmpegready, remuxflacstream, setmetadata
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
'''TIDALMusicClient'''
|
|
28
|
+
class TIDALMusicClient(BaseMusicClient):
|
|
29
|
+
source = 'TIDALMusicClient'
|
|
30
|
+
def __init__(self, **kwargs):
|
|
31
|
+
super(TIDALMusicClient, self).__init__(**kwargs)
|
|
32
|
+
self.tidal_session = TIDALTvSession(headers={}, cookies=self.default_cookies)
|
|
33
|
+
try:
|
|
34
|
+
self.tidal_session.loadfromcache()
|
|
35
|
+
self.tidal_session.refresh()
|
|
36
|
+
except:
|
|
37
|
+
self.tidal_session.auth()
|
|
38
|
+
self.tidal_session.cache()
|
|
39
|
+
self._setauthheaders()
|
|
40
|
+
self._initsession()
|
|
41
|
+
'''_setauthheaders'''
|
|
42
|
+
def _setauthheaders(self):
|
|
43
|
+
self.default_search_headers = self.tidal_session.auth_headers
|
|
44
|
+
self.default_download_headers = self.tidal_session.auth_headers
|
|
45
|
+
self.default_headers = self.default_search_headers
|
|
46
|
+
'''_saferequestget'''
|
|
47
|
+
def _saferequestget(self, url, **kwargs):
|
|
48
|
+
resp = self.get(url, **kwargs)
|
|
49
|
+
if resp.status_code in [401, 403]:
|
|
50
|
+
self.tidal_session.refresh()
|
|
51
|
+
self._setauthheaders()
|
|
52
|
+
self._initsession()
|
|
53
|
+
resp = self.get(url, **kwargs)
|
|
54
|
+
return resp
|
|
55
|
+
'''_parsedashmanifest'''
|
|
56
|
+
def _parsedashmanifest(self, xml):
|
|
57
|
+
# getbaseurl
|
|
58
|
+
def _getbaseurl(element: ElementTree.Element, inherited: str):
|
|
59
|
+
base_url = inherited
|
|
60
|
+
base_el = element.find('BaseURL')
|
|
61
|
+
if base_el is not None and base_el.text:
|
|
62
|
+
candidate = base_el.text.strip()
|
|
63
|
+
if candidate:
|
|
64
|
+
base_url = urljoin(inherited, candidate)
|
|
65
|
+
return base_url
|
|
66
|
+
# _parsesegmenttemplate
|
|
67
|
+
def _parsesegmenttemplate(element: ElementTree.Element):
|
|
68
|
+
template = SegmentTemplate(
|
|
69
|
+
media=element.get('media'), initialization=element.get('initialization'), start_number=int(element.get('startNumber') or 1),
|
|
70
|
+
timescale=int(element.get('timescale') or 1), presentation_time_offset=int(element.get('presentationTimeOffset') or 0),
|
|
71
|
+
)
|
|
72
|
+
timeline_el = element.find('SegmentTimeline')
|
|
73
|
+
if timeline_el is not None:
|
|
74
|
+
for s_el in timeline_el.findall('S'):
|
|
75
|
+
duration = int(s_el.get('d'))
|
|
76
|
+
repeat = int(s_el.get('r') or 0)
|
|
77
|
+
start_time = int(s_el.get('t')) if s_el.get('t') else None
|
|
78
|
+
template.timeline.append(SegmentTimelineEntry(start_time=start_time, duration=duration, repeat=repeat))
|
|
79
|
+
return template
|
|
80
|
+
# _parsesegmentlist
|
|
81
|
+
def _parsesegmentlist(element: ElementTree.Element):
|
|
82
|
+
init_el = element.find('Initialization')
|
|
83
|
+
initialization = init_el.get('sourceURL') if init_el is not None else None
|
|
84
|
+
media_segments = []
|
|
85
|
+
for seg_el in element.findall('SegmentURL'):
|
|
86
|
+
media = seg_el.get('media')
|
|
87
|
+
if media: media_segments.append(media)
|
|
88
|
+
return SegmentList(initialization=initialization, media_segments=media_segments)
|
|
89
|
+
# _parserepresentation
|
|
90
|
+
def _parserepresentation(element: ElementTree.Element, parent_base: str):
|
|
91
|
+
base_url = _getbaseurl(element, parent_base)
|
|
92
|
+
template = element.find('SegmentTemplate')
|
|
93
|
+
seg_template = _parsesegmenttemplate(template) if template is not None else None
|
|
94
|
+
seg_list_el = element.find('SegmentList')
|
|
95
|
+
seg_list = _parsesegmentlist(seg_list_el) if seg_list_el is not None else None
|
|
96
|
+
return Representation(
|
|
97
|
+
id=element.get('id'), bandwidth=element.get('bandwidth'), codec=element.get('codecs'), base_url=base_url, segment_template=seg_template,
|
|
98
|
+
segment_list=seg_list,
|
|
99
|
+
)
|
|
100
|
+
# _parseadaptation
|
|
101
|
+
def _parseadaptation(element: ElementTree.Element, parent_base: str):
|
|
102
|
+
base_url = _getbaseurl(element, parent_base)
|
|
103
|
+
adaptation = AdaptationSet(content_type=element.get('contentType'), base_url=base_url)
|
|
104
|
+
for rep_el in element.findall('Representation'):
|
|
105
|
+
adaptation.representations.append(_parserepresentation(rep_el, base_url))
|
|
106
|
+
return adaptation
|
|
107
|
+
# _parseperiod
|
|
108
|
+
def _parseperiod(element: ElementTree.Element, parent_base: str):
|
|
109
|
+
base_url = _getbaseurl(element, parent_base)
|
|
110
|
+
period = Period(base_url=base_url)
|
|
111
|
+
for adaptation_el in element.findall('AdaptationSet'):
|
|
112
|
+
period.adaptation_sets.append(_parseadaptation(adaptation_el, base_url))
|
|
113
|
+
return period
|
|
114
|
+
# convert to string text
|
|
115
|
+
if isinstance(xml, bytes):
|
|
116
|
+
xml_text = xml.decode("utf-8")
|
|
117
|
+
else:
|
|
118
|
+
xml_text = str(xml)
|
|
119
|
+
# parse
|
|
120
|
+
xml_text = re.sub(r'xmlns="[^"]+"', '', xml_text, count=1)
|
|
121
|
+
root = ElementTree.fromstring(xml_text)
|
|
122
|
+
manifest_base = _getbaseurl(root, '')
|
|
123
|
+
manifest = Manifest(base_url=manifest_base)
|
|
124
|
+
for period_el in root.findall('Period'):
|
|
125
|
+
manifest.periods.append(_parseperiod(period_el, manifest_base))
|
|
126
|
+
# return
|
|
127
|
+
return manifest
|
|
128
|
+
'''_parsempd'''
|
|
129
|
+
def _parsempd(self, xml: bytes):
|
|
130
|
+
manifest = self._parsedashmanifest(xml)
|
|
131
|
+
for period in manifest.periods:
|
|
132
|
+
for adaptation in period.adaptation_sets:
|
|
133
|
+
if adaptation.content_type == 'audio':
|
|
134
|
+
for representation in adaptation.representations:
|
|
135
|
+
if representation.segments:
|
|
136
|
+
return manifest
|
|
137
|
+
'''_parsemanifest'''
|
|
138
|
+
def _parsemanifest(self, stream_resp: StreamRespond):
|
|
139
|
+
# vnd.tidal.bt
|
|
140
|
+
if "vnd.tidal.bt" in stream_resp.manifestMimeType:
|
|
141
|
+
manifest = json_repair.loads(base64.b64decode(stream_resp.manifest).decode('utf-8'))
|
|
142
|
+
stream_url = StreamUrl()
|
|
143
|
+
stream_url.trackid = stream_resp.trackid
|
|
144
|
+
stream_url.soundQuality = stream_resp.audioQuality
|
|
145
|
+
stream_url.codec = manifest['codecs']
|
|
146
|
+
stream_url.encryptionKey = manifest['keyId'] if 'keyId' in manifest else ""
|
|
147
|
+
stream_url.url = manifest['urls'][0]
|
|
148
|
+
stream_url.urls = [stream_url.url]
|
|
149
|
+
return stream_url
|
|
150
|
+
# dash+xml
|
|
151
|
+
elif "dash+xml" in stream_resp.manifestMimeType:
|
|
152
|
+
xml_bytes = base64.b64decode(stream_resp.manifest)
|
|
153
|
+
manifest = self._parsempd(xml_bytes)
|
|
154
|
+
if not manifest: return
|
|
155
|
+
stream_url = StreamUrl()
|
|
156
|
+
stream_url.trackid = stream_resp.trackid
|
|
157
|
+
stream_url.soundQuality = stream_resp.audioQuality
|
|
158
|
+
audio_reps = []
|
|
159
|
+
for period in manifest.periods:
|
|
160
|
+
for adaptation in period.adaptation_sets:
|
|
161
|
+
if adaptation.content_type == 'audio':
|
|
162
|
+
audio_reps.extend(adaptation.representations)
|
|
163
|
+
if not audio_reps: return
|
|
164
|
+
representation = next((rep for rep in audio_reps if rep.segments), audio_reps[0])
|
|
165
|
+
codec = (representation.codec or '').upper()
|
|
166
|
+
if codec.startswith('MP4A'): codec = 'AAC'
|
|
167
|
+
stream_url.codec = codec
|
|
168
|
+
stream_url.encryptionKey = ""
|
|
169
|
+
stream_url.urls = representation.segments
|
|
170
|
+
if len(stream_url.urls) > 0:
|
|
171
|
+
stream_url.url = stream_url.urls[0]
|
|
172
|
+
return stream_url
|
|
173
|
+
'''_guessextension'''
|
|
174
|
+
def _guessextension(self, stream_url: StreamUrl):
|
|
175
|
+
url = (stream_url.url or '').lower()
|
|
176
|
+
codec = (stream_url.codec or '').lower()
|
|
177
|
+
if '.flac' in url: return '.flac'
|
|
178
|
+
if '.mp4' in url:
|
|
179
|
+
if 'ac4' in codec or 'mha1' in codec: return '.mp4'
|
|
180
|
+
elif 'flac' in codec: return '.flac'
|
|
181
|
+
return '.m4a'
|
|
182
|
+
return '.m4a'
|
|
183
|
+
'''_guessstreamextension'''
|
|
184
|
+
def _guessstreamextension(self, stream_url: StreamUrl):
|
|
185
|
+
candidates = []
|
|
186
|
+
if stream_url.url: candidates.append(stream_url.url)
|
|
187
|
+
if stream_url.urls: candidates.extend(stream_url.urls)
|
|
188
|
+
for candidate in candidates:
|
|
189
|
+
if not candidate: continue
|
|
190
|
+
lowered: str = candidate.split("?")[0].lower()
|
|
191
|
+
for ext in (".flac", ".mp4", ".m4a", ".m4b", ".mp3", ".ogg", ".aac"):
|
|
192
|
+
if lowered.endswith(ext): return ext
|
|
193
|
+
codec = (stream_url.codec or "").lower()
|
|
194
|
+
if "flac" in codec:
|
|
195
|
+
return ".flac"
|
|
196
|
+
if "mp4" in codec or "m4a" in codec or "aac" in codec:
|
|
197
|
+
return ".m4a"
|
|
198
|
+
return ".m4a"
|
|
199
|
+
'''_constructsearchurls'''
|
|
200
|
+
def _constructsearchurls(self, keyword: str, rule: dict = None, request_overrides: dict = None):
|
|
201
|
+
# init
|
|
202
|
+
rule, request_overrides = rule or {}, request_overrides or {}
|
|
203
|
+
# search rules
|
|
204
|
+
default_rule = {'countryCode': self.tidal_session.storage.country_code, 'limit': 10, 'offset': 0, 'query': keyword, 'includeContributors': 'truee'}
|
|
205
|
+
default_rule.update(rule)
|
|
206
|
+
# construct search urls based on search rules
|
|
207
|
+
base_url = 'https://api.tidal.com/v1/search?'
|
|
208
|
+
search_urls, page_size, count = [], self.search_size_per_page, 0
|
|
209
|
+
while self.search_size_per_source > count:
|
|
210
|
+
page_rule = copy.deepcopy(default_rule)
|
|
211
|
+
page_rule['limit'] = page_size
|
|
212
|
+
page_rule['offset'] = count
|
|
213
|
+
search_urls.append(base_url + urlencode(page_rule))
|
|
214
|
+
count += page_size
|
|
215
|
+
# return
|
|
216
|
+
return search_urls
|
|
217
|
+
'''_download'''
|
|
218
|
+
@usedownloadheaderscookies
|
|
219
|
+
def _download(self, song_info: SongInfo, request_overrides: dict = None, downloaded_song_infos: list = [], progress: Progress = None, song_progress_id: int = 0):
|
|
220
|
+
# init
|
|
221
|
+
request_overrides = request_overrides or {}
|
|
222
|
+
# success
|
|
223
|
+
try:
|
|
224
|
+
touchdir(song_info.work_dir)
|
|
225
|
+
# parse basic information
|
|
226
|
+
stream_url: StreamUrl = song_info.download_url
|
|
227
|
+
download_ext, final_ext = self._guessstreamextension(stream_url=stream_url), song_info.ext
|
|
228
|
+
if (final_ext != ".flac") or (download_ext == ".flac"):
|
|
229
|
+
remux_required = False
|
|
230
|
+
else:
|
|
231
|
+
remux_required = "flac" in (stream_url.codec or "").lower()
|
|
232
|
+
if remux_required and (not ffmpegready() and not pyavready()):
|
|
233
|
+
final_ext, remux_required = download_ext, False
|
|
234
|
+
chunk_size = 1048576
|
|
235
|
+
progress.update(song_progress_id, total=1)
|
|
236
|
+
progress.update(song_progress_id, description=f"{self.source}.download >>> {song_info.song_name} (Downloading)")
|
|
237
|
+
# download music file
|
|
238
|
+
with tempfile.TemporaryDirectory(prefix="musicdl-TIDALMusicClient-track-") as tmpdir:
|
|
239
|
+
download_part = os.path.join(
|
|
240
|
+
tmpdir, f"download{download_ext}.part" if download_ext else "download.part"
|
|
241
|
+
)
|
|
242
|
+
tool = aigpy.download.DownloadTool(download_part, stream_url.urls)
|
|
243
|
+
tool.setUserProgress(None)
|
|
244
|
+
tool.setPartSize(chunk_size)
|
|
245
|
+
check, err = tool.start(showProgress=False)
|
|
246
|
+
assert check
|
|
247
|
+
decrypted_target = os.path.join(
|
|
248
|
+
tmpdir, f"decrypted{download_ext}" if download_ext else "decrypted"
|
|
249
|
+
)
|
|
250
|
+
if aigpy.string.isNull(stream_url.encryptionKey):
|
|
251
|
+
replacefile(download_part, decrypted_target)
|
|
252
|
+
decrypted_path = decrypted_target
|
|
253
|
+
else:
|
|
254
|
+
key, nonce = decryptsecuritytoken(stream_url.encryptionKey)
|
|
255
|
+
decryptfile(download_part, decrypted_target, key, nonce)
|
|
256
|
+
os.remove(download_part)
|
|
257
|
+
decrypted_path = decrypted_target
|
|
258
|
+
if remux_required:
|
|
259
|
+
remux_target = os.path.join(tmpdir, "remux.flac")
|
|
260
|
+
processed_path, backend_used = remuxflacstream(decrypted_path, remux_target)
|
|
261
|
+
if processed_path != decrypted_path:
|
|
262
|
+
if os.path.exists(decrypted_path): os.remove(decrypted_path)
|
|
263
|
+
decrypted_path = processed_path
|
|
264
|
+
else:
|
|
265
|
+
final_ext = download_ext
|
|
266
|
+
decrypted_path = decrypted_path
|
|
267
|
+
save_path = song_info.save_path
|
|
268
|
+
replacefile(decrypted_path, save_path)
|
|
269
|
+
setmetadata(track=song_info.raw_data['search'], filepath=save_path, stream=stream_url)
|
|
270
|
+
# update progress
|
|
271
|
+
progress.update(song_progress_id, total=os.path.getsize(save_path))
|
|
272
|
+
progress.advance(song_progress_id, os.path.getsize(save_path))
|
|
273
|
+
progress.update(song_progress_id, description=f"{self.source}.download >>> {song_info.song_name} (Success)")
|
|
274
|
+
downloaded_song_info = copy.deepcopy(song_info)
|
|
275
|
+
downloaded_song_info.ext = final_ext
|
|
276
|
+
downloaded_song_infos.append(SongInfoUtils.fillsongtechinfo(downloaded_song_info, logger_handle=self.logger_handle, disable_print=self.disable_print))
|
|
277
|
+
# failure
|
|
278
|
+
except Exception as err:
|
|
279
|
+
progress.update(song_progress_id, description=f"{self.source}.download >>> {song_info.song_name} (Error: {err})")
|
|
280
|
+
# return
|
|
281
|
+
return downloaded_song_infos
|
|
282
|
+
'''_search'''
|
|
283
|
+
@usesearchheaderscookies
|
|
284
|
+
def _search(self, keyword: str = '', search_url: str = '', request_overrides: dict = None, song_infos: list = [], progress: Progress = None, progress_id: int = 0):
|
|
285
|
+
# init
|
|
286
|
+
request_overrides = request_overrides or {}
|
|
287
|
+
# successful
|
|
288
|
+
try:
|
|
289
|
+
# --search results
|
|
290
|
+
resp = self._saferequestget(search_url, **request_overrides)
|
|
291
|
+
resp.raise_for_status()
|
|
292
|
+
search_results: list[Track] = aigpy.model.dictToModel(resp2json(resp=resp), SearchResult()).tracks.items
|
|
293
|
+
for search_result in search_results:
|
|
294
|
+
if search_result.id is None: continue
|
|
295
|
+
song_info = SongInfo(source=self.source)
|
|
296
|
+
# --download results
|
|
297
|
+
qualities = [('hi_res_lossless', 'HI_RES_LOSSLESS'), ('high_lossless', 'LOSSLESS'), ('low_320k', 'HIGH'), ('low_96k', 'LOW')]
|
|
298
|
+
for quality in qualities:
|
|
299
|
+
params = {"playbackmode": "STREAM", "audioquality": quality[1], "assetpresentation": "FULL",}
|
|
300
|
+
try:
|
|
301
|
+
resp = self._saferequestget(f'https://tidal.com/v1/tracks/{search_result.id}/playbackinfo', params=params, **request_overrides)
|
|
302
|
+
resp.raise_for_status()
|
|
303
|
+
download_result = aigpy.model.dictToModel(resp2json(resp), StreamRespond())
|
|
304
|
+
except:
|
|
305
|
+
continue
|
|
306
|
+
if ("vnd.tidal.bt" not in download_result.manifestMimeType) and ("dash+xml" not in download_result.manifestMimeType): continue
|
|
307
|
+
try: download_url = self._parsemanifest(stream_resp=download_result)
|
|
308
|
+
except: continue
|
|
309
|
+
if not download_url: continue
|
|
310
|
+
song_info = SongInfo(
|
|
311
|
+
source=self.source, download_url=download_url, download_url_status=self.audio_link_tester.test(download_url.urls[0], request_overrides),
|
|
312
|
+
ext=self._guessextension(stream_url=download_url), duration=seconds2hms(search_result.duration),
|
|
313
|
+
raw_data={'search': search_result, 'download': download_result}, file_size='NULL',
|
|
314
|
+
song_name=legalizestring(search_result.title, replace_null_string='NULL'),
|
|
315
|
+
singers=legalizestring(', '.join([singer.name for singer in search_result.artists]), replace_null_string='NULL'),
|
|
316
|
+
album=legalizestring(search_result.album.title, replace_null_string='NULL'),
|
|
317
|
+
identifier=search_result.id,
|
|
318
|
+
)
|
|
319
|
+
if song_info.with_valid_download_url: break
|
|
320
|
+
if not song_info.with_valid_download_url: continue
|
|
321
|
+
# --lyric results
|
|
322
|
+
params = {'countryCode': self.tidal_session.storage.country_code, 'include': 'lyrics'}
|
|
323
|
+
try:
|
|
324
|
+
resp = self._saferequestget(f'https://openapi.tidal.com/v2/tracks/{search_result.id}', params=params, **request_overrides)
|
|
325
|
+
resp.raise_for_status()
|
|
326
|
+
lyric_result = resp2json(resp)
|
|
327
|
+
lyric = lyric_result.get('included', [{}])[0].get('attributes', {}).get('lrcText', 'NULL')
|
|
328
|
+
except:
|
|
329
|
+
lyric_result, lyric = {}, 'NULL'
|
|
330
|
+
song_info.raw_data['lyric'] = lyric_result
|
|
331
|
+
song_info.lyric = lyric
|
|
332
|
+
# --append to song_infos
|
|
333
|
+
song_infos.append(song_info)
|
|
334
|
+
# --judgement for search_size
|
|
335
|
+
if self.strict_limit_search_size_per_page and len(song_infos) >= self.search_size_per_page: break
|
|
336
|
+
# --update progress
|
|
337
|
+
progress.update(progress_id, description=f"{self.source}.search >>> {search_url} (Success)")
|
|
338
|
+
# failure
|
|
339
|
+
except Exception as err:
|
|
340
|
+
progress.update(progress_id, description=f"{self.source}.search >>> {search_url} (Error: {err})")
|
|
341
|
+
# return
|
|
342
|
+
return song_infos
|
|
@@ -0,0 +1,256 @@
|
|
|
1
|
+
'''
|
|
2
|
+
Function:
|
|
3
|
+
Implementation of XimalayaMusicClient: https://www.ximalaya.com/
|
|
4
|
+
Author:
|
|
5
|
+
Zhenchao Jin
|
|
6
|
+
WeChat Official Account (微信公众号):
|
|
7
|
+
Charles的皮卡丘
|
|
8
|
+
'''
|
|
9
|
+
import re
|
|
10
|
+
import time
|
|
11
|
+
import copy
|
|
12
|
+
import base64
|
|
13
|
+
import binascii
|
|
14
|
+
from Crypto.Cipher import AES
|
|
15
|
+
from .base import BaseMusicClient
|
|
16
|
+
from rich.progress import Progress
|
|
17
|
+
from urllib.parse import urlencode, urlparse
|
|
18
|
+
from ..utils import byte2mb, resp2json, seconds2hms, legalizestring, safeextractfromdict, usesearchheaderscookies, SongInfo
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
'''XimalayaMusicClient'''
|
|
22
|
+
class XimalayaMusicClient(BaseMusicClient):
|
|
23
|
+
source = 'XimalayaMusicClient'
|
|
24
|
+
def __init__(self, **kwargs):
|
|
25
|
+
super(XimalayaMusicClient, self).__init__(**kwargs)
|
|
26
|
+
self.default_search_headers = {
|
|
27
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
|
|
28
|
+
}
|
|
29
|
+
self.default_download_headers = {
|
|
30
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
|
|
31
|
+
}
|
|
32
|
+
self.default_headers = self.default_search_headers
|
|
33
|
+
self._initsession()
|
|
34
|
+
'''_decrypturl'''
|
|
35
|
+
def _decrypturl(self, ciphertext: str):
|
|
36
|
+
if not ciphertext: return ciphertext
|
|
37
|
+
key = binascii.unhexlify("aaad3e4fd540b0f79dca95606e72bf93")
|
|
38
|
+
ciphertext = base64.urlsafe_b64decode(ciphertext + "=" * (4 - len(ciphertext) % 4))
|
|
39
|
+
cipher = AES.new(key, AES.MODE_ECB)
|
|
40
|
+
plaintext = cipher.decrypt(ciphertext)
|
|
41
|
+
plaintext = re.sub(r"[^\x20-\x7E]", "", plaintext.decode("utf-8"))
|
|
42
|
+
return plaintext
|
|
43
|
+
'''_validategdstudio'''
|
|
44
|
+
def _validategdstudio(self, request_overrides: dict = None):
|
|
45
|
+
request_overrides = request_overrides or {}
|
|
46
|
+
try:
|
|
47
|
+
resp = self.get('https://music-api.gdstudio.xyz/api.php?types=search&source=ximalaya&name=%E4%B8%89%E5%9B%BD&count=1&pages=1', timeout=10, **request_overrides)
|
|
48
|
+
resp.raise_for_status()
|
|
49
|
+
result = resp2json(resp=resp)
|
|
50
|
+
assert isinstance(result, list) and (len(result) == 1)
|
|
51
|
+
return True
|
|
52
|
+
except:
|
|
53
|
+
return False
|
|
54
|
+
'''_constructsearchurls'''
|
|
55
|
+
def _constructsearchurls(self, keyword: str, rule: dict = None, request_overrides: dict = None):
|
|
56
|
+
# init
|
|
57
|
+
rule, request_overrides = rule or {}, request_overrides or {}
|
|
58
|
+
# if with cookies, use official apis
|
|
59
|
+
if self.default_search_cookies:
|
|
60
|
+
# --search rules
|
|
61
|
+
default_rule = {
|
|
62
|
+
'kw': keyword, 'page': 1, 'spellchecker': 'true', 'condition': 'relation', 'rows': self.search_size_per_page, 'device': 'iPhone',
|
|
63
|
+
'core': 'track', 'fq': '', 'paidFilter': 'false',
|
|
64
|
+
}
|
|
65
|
+
default_rule.update(rule)
|
|
66
|
+
# --construct search urls based on search rules
|
|
67
|
+
base_url = 'https://www.ximalaya.com/revision/search/main?'
|
|
68
|
+
search_urls, page_size, count = [], self.search_size_per_page, 0
|
|
69
|
+
while self.search_size_per_source > count:
|
|
70
|
+
page_rule = copy.deepcopy(default_rule)
|
|
71
|
+
page_rule['rows'] = page_size
|
|
72
|
+
page_rule['page'] = int(count // page_size) + 1
|
|
73
|
+
search_urls.append(base_url + urlencode(page_rule))
|
|
74
|
+
count += page_size
|
|
75
|
+
# if without cookies, use third part apis
|
|
76
|
+
else:
|
|
77
|
+
use_gdstudio = self._validategdstudio(request_overrides=request_overrides)
|
|
78
|
+
if use_gdstudio:
|
|
79
|
+
# --search rules
|
|
80
|
+
default_rule = {'types': 'search', 'source': 'ximalaya', 'name': keyword, 'count': self.search_size_per_page, 'pages': '1'}
|
|
81
|
+
default_rule.update(rule)
|
|
82
|
+
# --construct search urls based on search rules
|
|
83
|
+
base_url = 'https://music-api.gdstudio.xyz/api.php?'
|
|
84
|
+
search_urls, page_size, count = [], self.search_size_per_page, 0
|
|
85
|
+
while self.search_size_per_source > count:
|
|
86
|
+
page_rule = copy.deepcopy(default_rule)
|
|
87
|
+
page_rule['count'] = page_size
|
|
88
|
+
page_rule['pages'] = int(count // page_size) + 1
|
|
89
|
+
search_urls.append(base_url + urlencode(page_rule))
|
|
90
|
+
count += page_size
|
|
91
|
+
else:
|
|
92
|
+
# --search rules
|
|
93
|
+
default_rule = {'msg': keyword, 'n': '', 'num': self.search_size_per_source, 'type': 'json'}
|
|
94
|
+
default_rule.update(rule)
|
|
95
|
+
# --construct search urls based on search rules
|
|
96
|
+
for base_url in ['https://api-v1.cenguigui.cn/api/music/dg_ximalayamusic.php?', 'https://api.cenguigui.cn/api/music/dg_ximalayamusic.php?']:
|
|
97
|
+
page_rule = copy.deepcopy(default_rule)
|
|
98
|
+
page_rule['num'] = self.search_size_per_source
|
|
99
|
+
search_urls = [base_url + urlencode(page_rule)]
|
|
100
|
+
self.search_size_per_page = self.search_size_per_source
|
|
101
|
+
try:
|
|
102
|
+
resp = self.get(search_urls[0], timeout=10, **request_overrides)
|
|
103
|
+
resp.raise_for_status()
|
|
104
|
+
result = resp2json(resp=resp)
|
|
105
|
+
assert isinstance(result, dict) and (len(result['data']) > 0)
|
|
106
|
+
break
|
|
107
|
+
except:
|
|
108
|
+
continue
|
|
109
|
+
# return
|
|
110
|
+
return search_urls
|
|
111
|
+
'''_parsecggapi'''
|
|
112
|
+
def _parsecggapi(self, keyword, search_results, song_infos: list = [], request_overrides: dict = None):
|
|
113
|
+
# init
|
|
114
|
+
request_overrides = request_overrides or {}
|
|
115
|
+
# parse
|
|
116
|
+
for search_result in search_results['data']:
|
|
117
|
+
# --download results
|
|
118
|
+
if (not isinstance(search_result, dict)) or ('trackId' not in search_result) or ('n' not in search_result):
|
|
119
|
+
continue
|
|
120
|
+
song_info = SongInfo(source=self.source)
|
|
121
|
+
params = {'msg': keyword, 'n': search_result['n'], 'num': self.search_size_per_source, 'type': 'json'}
|
|
122
|
+
try:
|
|
123
|
+
try:
|
|
124
|
+
resp = self.get('https://api-v1.cenguigui.cn/api/music/dg_ximalayamusic.php', params=params, timeout=10, **request_overrides)
|
|
125
|
+
resp.raise_for_status()
|
|
126
|
+
except:
|
|
127
|
+
resp = self.get('https://api.cenguigui.cn/api/music/dg_ximalayamusic.php', params=params, timeout=10, **request_overrides)
|
|
128
|
+
resp.raise_for_status()
|
|
129
|
+
download_result = resp2json(resp)
|
|
130
|
+
download_url: str = download_result.get('url', '')
|
|
131
|
+
if not download_url: continue
|
|
132
|
+
ext = download_url.split('.')[-1].split('?')[0]
|
|
133
|
+
song_info = SongInfo(
|
|
134
|
+
source=self.source, download_url=download_url, download_url_status=self.audio_link_tester.test(download_url, request_overrides),
|
|
135
|
+
raw_data={'search': search_result, 'download': {}, 'lyric': {}}, lyric='NULL', duration='-:-:-', file_size='NULL', ext=ext,
|
|
136
|
+
song_name=legalizestring(search_result.get('title', 'NULL'), replace_null_string='NULL'),
|
|
137
|
+
singers=legalizestring(search_result.get('nickname', 'NULL'), replace_null_string='NULL'),
|
|
138
|
+
album=legalizestring(search_result.get('categoryName', 'NULL'), replace_null_string='NULL'),
|
|
139
|
+
identifier=search_result['trackId'],
|
|
140
|
+
)
|
|
141
|
+
except:
|
|
142
|
+
continue
|
|
143
|
+
if not song_info.with_valid_download_url: continue
|
|
144
|
+
song_info.download_url_status['probe_status'] = self.audio_link_tester.probe(song_info.download_url, request_overrides)
|
|
145
|
+
ext, file_size = song_info.download_url_status['probe_status']['ext'], song_info.download_url_status['probe_status']['file_size']
|
|
146
|
+
if file_size and file_size != 'NULL': song_info.file_size = file_size
|
|
147
|
+
if ext and ext != 'NULL': song_info.ext = ext
|
|
148
|
+
# --append to song_infos
|
|
149
|
+
song_infos.append(song_info)
|
|
150
|
+
# --judgement for search_size
|
|
151
|
+
if self.strict_limit_search_size_per_page and len(song_infos) >= self.search_size_per_page: break
|
|
152
|
+
# return
|
|
153
|
+
return song_infos
|
|
154
|
+
'''_parsegdstudioapi'''
|
|
155
|
+
def _parsegdstudioapi(self, search_results, song_infos: list = [], request_overrides: dict = None):
|
|
156
|
+
# init
|
|
157
|
+
request_overrides = request_overrides or {}
|
|
158
|
+
# parse
|
|
159
|
+
for search_result in search_results:
|
|
160
|
+
# --download results
|
|
161
|
+
if (not isinstance(search_result, dict)) or ('id' not in search_result) or ('raw' not in search_result):
|
|
162
|
+
continue
|
|
163
|
+
song_info = SongInfo(source=self.source)
|
|
164
|
+
for quality in ['play_path_64', 'play_path_aacv164', 'play_path_32', 'play_path_aacv224']:
|
|
165
|
+
download_url: str = search_result['raw'].get(quality, '')
|
|
166
|
+
if not download_url: continue
|
|
167
|
+
song_info = SongInfo(
|
|
168
|
+
source=self.source, download_url=download_url, download_url_status=self.audio_link_tester.test(download_url, request_overrides),
|
|
169
|
+
raw_data={'search': search_result, 'download': {}, 'lyric': {}}, lyric='NULL', duration_s=search_result['raw'].get('duration', 0),
|
|
170
|
+
duration=seconds2hms(search_result['raw'].get('duration', 0)), file_size='NULL', ext=download_url.split('.')[-1].split('?')[0],
|
|
171
|
+
song_name=legalizestring(search_result['raw'].get('title', 'NULL'), replace_null_string='NULL'),
|
|
172
|
+
singers=legalizestring(search_result['raw'].get('nickname', 'NULL'), replace_null_string='NULL'),
|
|
173
|
+
album=legalizestring(search_result['raw'].get('album_title', 'NULL'), replace_null_string='NULL'),
|
|
174
|
+
identifier=search_result['id'],
|
|
175
|
+
)
|
|
176
|
+
if song_info.with_valid_download_url: break
|
|
177
|
+
if not song_info.with_valid_download_url: continue
|
|
178
|
+
song_info.download_url_status['probe_status'] = self.audio_link_tester.probe(song_info.download_url, request_overrides)
|
|
179
|
+
ext, file_size = song_info.download_url_status['probe_status']['ext'], song_info.download_url_status['probe_status']['file_size']
|
|
180
|
+
if file_size and file_size != 'NULL': song_info.file_size = file_size
|
|
181
|
+
if ext and ext != 'NULL': song_info.ext = ext
|
|
182
|
+
# --append to song_infos
|
|
183
|
+
song_infos.append(song_info)
|
|
184
|
+
# --judgement for search_size
|
|
185
|
+
if self.strict_limit_search_size_per_page and len(song_infos) >= self.search_size_per_page: break
|
|
186
|
+
# return
|
|
187
|
+
return song_infos
|
|
188
|
+
'''_parseofficialapi'''
|
|
189
|
+
def _parseofficialapi(self, search_results, song_infos: list = [], request_overrides: dict = None):
|
|
190
|
+
# init
|
|
191
|
+
request_overrides = request_overrides or {}
|
|
192
|
+
for search_result in search_results['data']['track']['docs']:
|
|
193
|
+
# --download results
|
|
194
|
+
if (not isinstance(search_result, dict)) or ('trackUrl' not in search_result):
|
|
195
|
+
continue
|
|
196
|
+
song_info = SongInfo(source=self.source)
|
|
197
|
+
track_id = search_result.get('trackUrl').strip('/').split('/')[-1]
|
|
198
|
+
for quality in [2, 1, 0]:
|
|
199
|
+
params = {"device": "web", "trackId": track_id, "trackQualityLevel": quality}
|
|
200
|
+
try:
|
|
201
|
+
resp = self.get(f"https://www.ximalaya.com/mobile-playpage/track/v3/baseInfo/{int(time.time() * 1000)}", params=params, **request_overrides)
|
|
202
|
+
resp.raise_for_status()
|
|
203
|
+
download_result = resp2json(resp=resp)
|
|
204
|
+
track_info = safeextractfromdict(download_result, ['trackInfo'], {})
|
|
205
|
+
if not track_info: continue
|
|
206
|
+
except:
|
|
207
|
+
continue
|
|
208
|
+
for encrypted_url in sorted(safeextractfromdict(track_info, ['playUrlList'], []), key=lambda x: int(x['fileSize']), reverse=True):
|
|
209
|
+
if not isinstance(encrypted_url, dict): continue
|
|
210
|
+
download_url = self._decrypturl(encrypted_url.get('url', ''))
|
|
211
|
+
if not download_url: continue
|
|
212
|
+
song_info = SongInfo(
|
|
213
|
+
source=self.source, download_url=download_url, download_url_status=self.audio_link_tester.test(download_url, request_overrides),
|
|
214
|
+
raw_data={'search': search_result, 'download': download_result, 'lyric': {}}, lyric='NULL', duration_s=track_info.get('duration', 0),
|
|
215
|
+
duration=seconds2hms(track_info.get('duration', 0)), file_size_bytes=encrypted_url.get('fileSize', 0), file_size=byte2mb(encrypted_url.get('fileSize', 0)),
|
|
216
|
+
ext=download_url.split('.')[-1].split('?')[0], identifier=track_id, song_name=legalizestring(search_result.get('title', 'NULL'), replace_null_string='NULL'),
|
|
217
|
+
singers=legalizestring(search_result.get('nickname', 'NULL'), replace_null_string='NULL'),
|
|
218
|
+
album=legalizestring(safeextractfromdict(search_result, ['albumInfo', 'title'], ''), replace_null_string='NULL'),
|
|
219
|
+
)
|
|
220
|
+
if song_info.with_valid_download_url: break
|
|
221
|
+
if song_info.with_valid_download_url: break
|
|
222
|
+
if not song_info.with_valid_download_url: continue
|
|
223
|
+
song_info.download_url_status['probe_status'] = self.audio_link_tester.probe(song_info.download_url, request_overrides)
|
|
224
|
+
ext, file_size = song_info.download_url_status['probe_status']['ext'], song_info.download_url_status['probe_status']['file_size']
|
|
225
|
+
if file_size and file_size != 'NULL': song_info.file_size = file_size
|
|
226
|
+
if ext and ext != 'NULL': song_info.ext = ext
|
|
227
|
+
# --append to song_infos
|
|
228
|
+
song_infos.append(song_info)
|
|
229
|
+
# --judgement for search_size
|
|
230
|
+
if self.strict_limit_search_size_per_page and len(song_infos) >= self.search_size_per_page: break
|
|
231
|
+
'''_search'''
|
|
232
|
+
@usesearchheaderscookies
|
|
233
|
+
def _search(self, keyword: str = '', search_url: str = '', request_overrides: dict = None, song_infos: list = [], progress: Progress = None, progress_id: int = 0):
|
|
234
|
+
# init
|
|
235
|
+
request_overrides = request_overrides or {}
|
|
236
|
+
# successful
|
|
237
|
+
try:
|
|
238
|
+
# --search results
|
|
239
|
+
resp = self.get(search_url, **request_overrides)
|
|
240
|
+
resp.raise_for_status()
|
|
241
|
+
search_results = resp2json(resp)
|
|
242
|
+
# --parse based on selected API
|
|
243
|
+
parsed_search_url = urlparse(search_url)
|
|
244
|
+
if parsed_search_url.hostname in ['music-api.gdstudio.xyz']:
|
|
245
|
+
self._parsegdstudioapi(search_results, song_infos=song_infos, request_overrides=request_overrides)
|
|
246
|
+
elif parsed_search_url.hostname in ['api-v1.cenguigui.cn', 'api.cenguigui.cn']:
|
|
247
|
+
self._parsecggapi(keyword, search_results, song_infos=song_infos, request_overrides=request_overrides)
|
|
248
|
+
else:
|
|
249
|
+
self._parseofficialapi(search_results, song_infos=song_infos, request_overrides=request_overrides)
|
|
250
|
+
# --update progress
|
|
251
|
+
progress.update(progress_id, description=f"{self.source}.search >>> {search_url} (Success)")
|
|
252
|
+
# failure
|
|
253
|
+
except Exception as err:
|
|
254
|
+
progress.update(progress_id, description=f"{self.source}.search >>> {search_url} (Error: {err})")
|
|
255
|
+
# return
|
|
256
|
+
return song_infos
|