TonieToolbox 0.2.2__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- TonieToolbox/__init__.py +1 -1
- TonieToolbox/__main__.py +407 -7
- TonieToolbox/dependency_manager.py +95 -5
- TonieToolbox/media_tags.py +637 -0
- TonieToolbox/recursive_processor.py +96 -11
- TonieToolbox/teddycloud.py +580 -0
- {tonietoolbox-0.2.2.dist-info → tonietoolbox-0.3.0.dist-info}/METADATA +158 -5
- {tonietoolbox-0.2.2.dist-info → tonietoolbox-0.3.0.dist-info}/RECORD +12 -10
- {tonietoolbox-0.2.2.dist-info → tonietoolbox-0.3.0.dist-info}/WHEEL +1 -1
- {tonietoolbox-0.2.2.dist-info → tonietoolbox-0.3.0.dist-info}/entry_points.txt +0 -0
- {tonietoolbox-0.2.2.dist-info → tonietoolbox-0.3.0.dist-info}/licenses/LICENSE.md +0 -0
- {tonietoolbox-0.2.2.dist-info → tonietoolbox-0.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,580 @@
|
|
1
|
+
#!/usr/bin/python3
|
2
|
+
"""
|
3
|
+
TeddyCloud API client for TonieToolbox.
|
4
|
+
Handles uploading .taf files to a TeddyCloud instance.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import os
|
8
|
+
import sys
|
9
|
+
import json
|
10
|
+
import logging
|
11
|
+
import urllib.parse
|
12
|
+
import urllib.request
|
13
|
+
import urllib.error
|
14
|
+
import base64
|
15
|
+
import mimetypes
|
16
|
+
import ssl
|
17
|
+
import time
|
18
|
+
import socket
|
19
|
+
import glob
|
20
|
+
from typing import Optional, Dict, Any, Tuple, List
|
21
|
+
|
22
|
+
from .logger import get_logger
|
23
|
+
|
24
|
+
logger = get_logger('teddycloud')
|
25
|
+
|
26
|
+
# Default timeout settings (in seconds)
|
27
|
+
DEFAULT_CONNECTION_TIMEOUT = 10
|
28
|
+
DEFAULT_READ_TIMEOUT = 300 # 5 minutes
|
29
|
+
DEFAULT_MAX_RETRIES = 3
|
30
|
+
DEFAULT_RETRY_DELAY = 5 # seconds
|
31
|
+
|
32
|
+
# Add function to get file paths for any file type (not just audio)
|
33
|
+
def get_file_paths(input_pattern):
|
34
|
+
"""
|
35
|
+
Get file paths based on a pattern, without filtering by file type.
|
36
|
+
This is different from audio_conversion.get_input_files as it doesn't filter for audio files.
|
37
|
+
|
38
|
+
Args:
|
39
|
+
input_pattern: Input file pattern or direct file path
|
40
|
+
|
41
|
+
Returns:
|
42
|
+
list: List of file paths
|
43
|
+
"""
|
44
|
+
logger.debug("Getting file paths for pattern: %s", input_pattern)
|
45
|
+
|
46
|
+
if input_pattern.endswith(".lst"):
|
47
|
+
logger.debug("Processing list file: %s", input_pattern)
|
48
|
+
list_dir = os.path.dirname(os.path.abspath(input_pattern))
|
49
|
+
file_paths = []
|
50
|
+
|
51
|
+
with open(input_pattern, 'r', encoding='utf-8') as file_list:
|
52
|
+
for line_num, line in enumerate(file_list, 1):
|
53
|
+
fname = line.strip()
|
54
|
+
if not fname or fname.startswith('#'): # Skip empty lines and comments
|
55
|
+
continue
|
56
|
+
|
57
|
+
# Remove any quote characters from path
|
58
|
+
fname = fname.strip('"\'')
|
59
|
+
|
60
|
+
# Check if the path is absolute or has a drive letter (Windows)
|
61
|
+
if os.path.isabs(fname) or (len(fname) > 1 and fname[1] == ':'):
|
62
|
+
full_path = fname # Use as is if it's an absolute path
|
63
|
+
logger.trace("Using absolute path from list: %s", full_path)
|
64
|
+
else:
|
65
|
+
full_path = os.path.join(list_dir, fname)
|
66
|
+
logger.trace("Using relative path from list: %s", full_path)
|
67
|
+
|
68
|
+
# Handle directory paths by finding all files in the directory
|
69
|
+
if os.path.isdir(full_path):
|
70
|
+
logger.debug("Path is a directory, finding files in: %s", full_path)
|
71
|
+
dir_glob = os.path.join(full_path, "*")
|
72
|
+
dir_files = sorted(glob.glob(dir_glob))
|
73
|
+
if dir_files:
|
74
|
+
file_paths.extend([f for f in dir_files if os.path.isfile(f)])
|
75
|
+
logger.debug("Found %d files in directory", len(dir_files))
|
76
|
+
else:
|
77
|
+
logger.warning("No files found in directory at line %d: %s", line_num, full_path)
|
78
|
+
elif os.path.isfile(full_path):
|
79
|
+
file_paths.append(full_path)
|
80
|
+
else:
|
81
|
+
logger.warning("File not found at line %d: %s", line_num, full_path)
|
82
|
+
|
83
|
+
logger.debug("Found %d files in list file", len(file_paths))
|
84
|
+
return file_paths
|
85
|
+
else:
|
86
|
+
# Process as glob pattern
|
87
|
+
logger.debug("Processing glob pattern: %s", input_pattern)
|
88
|
+
file_paths = sorted([f for f in glob.glob(input_pattern) if os.path.isfile(f)])
|
89
|
+
|
90
|
+
if not file_paths:
|
91
|
+
# Try with explicit directory if the pattern didn't work
|
92
|
+
# This is helpful for Windows paths with backslashes
|
93
|
+
dir_name = os.path.dirname(input_pattern)
|
94
|
+
file_name = os.path.basename(input_pattern)
|
95
|
+
if dir_name:
|
96
|
+
alt_pattern = os.path.join(dir_name, file_name)
|
97
|
+
file_paths = sorted([f for f in glob.glob(alt_pattern) if os.path.isfile(f)])
|
98
|
+
|
99
|
+
# If still no files, try with the literal path (no glob interpretation)
|
100
|
+
if not file_paths and os.path.isfile(input_pattern):
|
101
|
+
file_paths = [input_pattern]
|
102
|
+
|
103
|
+
logger.debug("Found %d files matching pattern", len(file_paths))
|
104
|
+
return file_paths
|
105
|
+
|
106
|
+
class ProgressTracker:
|
107
|
+
"""Helper class to track and display upload progress."""
|
108
|
+
|
109
|
+
def __init__(self, total_size, file_name):
|
110
|
+
"""
|
111
|
+
Initialize progress tracker.
|
112
|
+
|
113
|
+
Args:
|
114
|
+
total_size: Total size of the file in bytes
|
115
|
+
file_name: Name of the file being uploaded
|
116
|
+
"""
|
117
|
+
self.total_size = total_size
|
118
|
+
self.file_name = file_name
|
119
|
+
self.uploaded = 0
|
120
|
+
self.start_time = time.time()
|
121
|
+
self.last_update = 0
|
122
|
+
self.last_percent = 0
|
123
|
+
|
124
|
+
def update(self, chunk_size):
|
125
|
+
"""
|
126
|
+
Update progress by the given chunk size.
|
127
|
+
|
128
|
+
Args:
|
129
|
+
chunk_size: Size of the chunk that was uploaded
|
130
|
+
"""
|
131
|
+
self.uploaded += chunk_size
|
132
|
+
current_time = time.time()
|
133
|
+
|
134
|
+
# Limit updates to max 10 per second to avoid flooding console
|
135
|
+
if current_time - self.last_update >= 0.1:
|
136
|
+
percent = min(100, int((self.uploaded / self.total_size) * 100))
|
137
|
+
|
138
|
+
# Only update if percentage changed or it's been more than a second
|
139
|
+
if percent != self.last_percent or current_time - self.last_update >= 1:
|
140
|
+
self.print_progress(percent)
|
141
|
+
self.last_update = current_time
|
142
|
+
self.last_percent = percent
|
143
|
+
|
144
|
+
def print_progress(self, percent):
|
145
|
+
"""
|
146
|
+
Print progress bar.
|
147
|
+
|
148
|
+
Args:
|
149
|
+
percent: Current percentage of upload completed
|
150
|
+
"""
|
151
|
+
bar_length = 30
|
152
|
+
filled_length = int(bar_length * percent // 100)
|
153
|
+
bar = '█' * filled_length + '-' * (bar_length - filled_length)
|
154
|
+
|
155
|
+
# Calculate speed
|
156
|
+
elapsed_time = max(0.1, time.time() - self.start_time)
|
157
|
+
speed = self.uploaded / elapsed_time / 1024 # KB/s
|
158
|
+
|
159
|
+
# Format speed based on magnitude
|
160
|
+
if speed >= 1024:
|
161
|
+
speed_str = f"{speed/1024:.2f} MB/s"
|
162
|
+
else:
|
163
|
+
speed_str = f"{speed:.2f} KB/s"
|
164
|
+
|
165
|
+
# Format size
|
166
|
+
if self.total_size >= 1048576: # 1 MB
|
167
|
+
size_str = f"{self.uploaded/1048576:.2f}/{self.total_size/1048576:.2f} MB"
|
168
|
+
else:
|
169
|
+
size_str = f"{self.uploaded/1024:.2f}/{self.total_size/1024:.2f} KB"
|
170
|
+
|
171
|
+
# Calculate remaining time
|
172
|
+
if percent > 0:
|
173
|
+
remaining = (self.total_size - self.uploaded) / (self.uploaded / elapsed_time)
|
174
|
+
if remaining > 60:
|
175
|
+
remaining_str = f"{int(remaining/60)}m {int(remaining%60)}s"
|
176
|
+
else:
|
177
|
+
remaining_str = f"{int(remaining)}s"
|
178
|
+
else:
|
179
|
+
remaining_str = "calculating..."
|
180
|
+
|
181
|
+
# Print progress bar
|
182
|
+
sys.stdout.write(f"\r{self.file_name}: |{bar}| {percent}% {size_str} {speed_str} ETA: {remaining_str}")
|
183
|
+
sys.stdout.flush()
|
184
|
+
|
185
|
+
if percent >= 100:
|
186
|
+
sys.stdout.write("\n")
|
187
|
+
sys.stdout.flush()
|
188
|
+
|
189
|
+
class ProgressTrackerHandler(urllib.request.HTTPHandler):
|
190
|
+
"""Custom HTTP handler to track upload progress."""
|
191
|
+
|
192
|
+
def __init__(self, tracker=None):
|
193
|
+
"""
|
194
|
+
Initialize handler.
|
195
|
+
|
196
|
+
Args:
|
197
|
+
tracker: ProgressTracker instance to use for tracking
|
198
|
+
"""
|
199
|
+
super().__init__()
|
200
|
+
self.tracker = tracker
|
201
|
+
|
202
|
+
def http_request(self, req):
|
203
|
+
"""
|
204
|
+
Hook into HTTP request to track upload progress.
|
205
|
+
|
206
|
+
Args:
|
207
|
+
req: HTTP request object
|
208
|
+
|
209
|
+
Returns:
|
210
|
+
Modified request object
|
211
|
+
"""
|
212
|
+
if self.tracker and req.data:
|
213
|
+
req.add_unredirected_header('Content-Length', str(len(req.data)))
|
214
|
+
old_data = req.data
|
215
|
+
|
216
|
+
# Replace data with an iterator that tracks progress
|
217
|
+
def data_iterator():
|
218
|
+
chunk_size = 8192
|
219
|
+
total_sent = 0
|
220
|
+
data = old_data
|
221
|
+
while total_sent < len(data):
|
222
|
+
chunk = data[total_sent:total_sent + chunk_size]
|
223
|
+
total_sent += len(chunk)
|
224
|
+
self.tracker.update(len(chunk))
|
225
|
+
yield chunk
|
226
|
+
|
227
|
+
req.data = data_iterator()
|
228
|
+
|
229
|
+
return req
|
230
|
+
|
231
|
+
class TeddyCloudClient:
|
232
|
+
"""Client for interacting with TeddyCloud API."""
|
233
|
+
|
234
|
+
def __init__(self, base_url: str, ignore_ssl_verify: bool = False,
|
235
|
+
connection_timeout: int = DEFAULT_CONNECTION_TIMEOUT,
|
236
|
+
read_timeout: int = DEFAULT_READ_TIMEOUT,
|
237
|
+
max_retries: int = DEFAULT_MAX_RETRIES,
|
238
|
+
retry_delay: int = DEFAULT_RETRY_DELAY):
|
239
|
+
"""
|
240
|
+
Initialize the TeddyCloud client.
|
241
|
+
|
242
|
+
Args:
|
243
|
+
base_url: Base URL of the TeddyCloud instance (e.g., https://teddycloud.example.com)
|
244
|
+
ignore_ssl_verify: If True, SSL certificate verification will be disabled (useful for self-signed certificates)
|
245
|
+
connection_timeout: Timeout for establishing a connection
|
246
|
+
read_timeout: Timeout for reading data from the server
|
247
|
+
max_retries: Maximum number of retries for failed requests
|
248
|
+
retry_delay: Delay between retries
|
249
|
+
"""
|
250
|
+
self.base_url = base_url.rstrip('/')
|
251
|
+
self.ignore_ssl_verify = ignore_ssl_verify
|
252
|
+
self.connection_timeout = connection_timeout
|
253
|
+
self.read_timeout = read_timeout
|
254
|
+
self.max_retries = max_retries
|
255
|
+
self.retry_delay = retry_delay
|
256
|
+
|
257
|
+
# Create SSL context if needed
|
258
|
+
self.ssl_context = None
|
259
|
+
if ignore_ssl_verify:
|
260
|
+
logger.warning("SSL certificate verification is disabled. This is insecure!")
|
261
|
+
self.ssl_context = ssl._create_unverified_context()
|
262
|
+
|
263
|
+
def _urlopen(self, req):
|
264
|
+
"""Helper method to open URLs with optional SSL verification bypass and retry logic."""
|
265
|
+
for attempt in range(self.max_retries):
|
266
|
+
try:
|
267
|
+
if self.ignore_ssl_verify:
|
268
|
+
return urllib.request.urlopen(req, context=self.ssl_context, timeout=self.connection_timeout)
|
269
|
+
else:
|
270
|
+
return urllib.request.urlopen(req, timeout=self.connection_timeout)
|
271
|
+
except (urllib.error.URLError, socket.timeout) as e:
|
272
|
+
logger.warning("Request failed (attempt %d/%d): %s", attempt + 1, self.max_retries, e)
|
273
|
+
if attempt < self.max_retries - 1:
|
274
|
+
time.sleep(self.retry_delay)
|
275
|
+
else:
|
276
|
+
raise
|
277
|
+
except Exception as e:
|
278
|
+
logger.error("Unexpected error during request: %s", e)
|
279
|
+
raise
|
280
|
+
|
281
|
+
def get_tag_index(self) -> Optional[list]:
|
282
|
+
"""
|
283
|
+
Get list of tags from TeddyCloud.
|
284
|
+
|
285
|
+
Returns:
|
286
|
+
List of tags or None if request failed
|
287
|
+
"""
|
288
|
+
try:
|
289
|
+
url = f"{self.base_url}/api/getTagIndex"
|
290
|
+
headers = {'Content-Type': 'application/json'}
|
291
|
+
|
292
|
+
req = urllib.request.Request(url, headers=headers)
|
293
|
+
|
294
|
+
with self._urlopen(req) as response:
|
295
|
+
tags = json.loads(response.read().decode('utf-8'))
|
296
|
+
logger.debug("Found %d tags", len(tags))
|
297
|
+
return tags
|
298
|
+
|
299
|
+
except urllib.error.HTTPError as e:
|
300
|
+
logger.error("Failed to get tags: %s", e)
|
301
|
+
return None
|
302
|
+
except Exception as e:
|
303
|
+
logger.error("Error getting tags: %s", e)
|
304
|
+
return None
|
305
|
+
|
306
|
+
def upload_file(self, file_path: str, special_folder: str = None, path: str = None, show_progress: bool = True) -> bool:
|
307
|
+
"""
|
308
|
+
Upload a .taf or image file to TeddyCloud.
|
309
|
+
|
310
|
+
Args:
|
311
|
+
file_path: Path to the file to upload (.taf, .jpg, .jpeg, .png)
|
312
|
+
special_folder: Special folder to upload to (currently only 'library' is supported)
|
313
|
+
path: Path where to write the file within the special folder
|
314
|
+
show_progress: Whether to show a progress bar during upload
|
315
|
+
|
316
|
+
Returns:
|
317
|
+
True if upload was successful, False otherwise
|
318
|
+
"""
|
319
|
+
try:
|
320
|
+
if not os.path.exists(file_path):
|
321
|
+
logger.error("File does not exist: %s", file_path)
|
322
|
+
return False
|
323
|
+
|
324
|
+
# Check for supported file types
|
325
|
+
file_ext = os.path.splitext(file_path.lower())[1]
|
326
|
+
is_taf = file_ext == '.taf'
|
327
|
+
is_image = file_ext in ['.jpg', '.jpeg', '.png']
|
328
|
+
|
329
|
+
if not (is_taf or is_image):
|
330
|
+
logger.error("Unsupported file type %s: %s", file_ext, file_path)
|
331
|
+
return False
|
332
|
+
|
333
|
+
# Read file and prepare for upload
|
334
|
+
file_size = os.path.getsize(file_path)
|
335
|
+
logger.info("File size: %.2f MB", file_size / (1024 * 1024))
|
336
|
+
|
337
|
+
with open(file_path, 'rb') as f:
|
338
|
+
file_content = f.read()
|
339
|
+
|
340
|
+
filename = os.path.basename(file_path)
|
341
|
+
|
342
|
+
# Build multipart form data
|
343
|
+
boundary = '----WebKitFormBoundary7MA4YWxkTrZu0gW'
|
344
|
+
headers = {
|
345
|
+
'Content-Type': f'multipart/form-data; boundary={boundary}',
|
346
|
+
'User-Agent': 'TonieToolbox/1.0'
|
347
|
+
}
|
348
|
+
|
349
|
+
# Start request data with boundary
|
350
|
+
body = []
|
351
|
+
body.append(f'--{boundary}'.encode())
|
352
|
+
|
353
|
+
# Set appropriate content type based on file extension
|
354
|
+
content_type = 'application/octet-stream'
|
355
|
+
if is_image:
|
356
|
+
if file_ext == '.jpg' or file_ext == '.jpeg':
|
357
|
+
content_type = 'image/jpeg'
|
358
|
+
elif file_ext == '.png':
|
359
|
+
content_type = 'image/png'
|
360
|
+
|
361
|
+
body.append(f'Content-Disposition: form-data; name="file"; filename="{filename}"'.encode())
|
362
|
+
body.append(f'Content-Type: {content_type}'.encode())
|
363
|
+
body.append(b'')
|
364
|
+
body.append(file_content)
|
365
|
+
body.append(f'--{boundary}--'.encode())
|
366
|
+
body.append(b'')
|
367
|
+
|
368
|
+
# Join all parts with CRLF
|
369
|
+
body = b'\r\n'.join(body)
|
370
|
+
|
371
|
+
# Build the upload URL with query parameters
|
372
|
+
if special_folder or path:
|
373
|
+
query_params = []
|
374
|
+
|
375
|
+
if special_folder:
|
376
|
+
query_params.append(f"special={urllib.parse.quote(special_folder)}")
|
377
|
+
|
378
|
+
if path:
|
379
|
+
query_params.append(f"path={urllib.parse.quote(path)}")
|
380
|
+
|
381
|
+
query_string = "&".join(query_params)
|
382
|
+
upload_url = f"{self.base_url}/api/fileUpload?{query_string}"
|
383
|
+
logger.debug("Using endpoint with params: %s", upload_url)
|
384
|
+
else:
|
385
|
+
# Fallback to previous endpoint for backward compatibility
|
386
|
+
upload_url = f"{self.base_url}/api/v1/audio"
|
387
|
+
logger.debug("Using legacy endpoint: %s", upload_url)
|
388
|
+
|
389
|
+
# Setup progress tracking if requested
|
390
|
+
if show_progress:
|
391
|
+
tracker = ProgressTracker(total_size=len(body), file_name=filename)
|
392
|
+
handler = ProgressTrackerHandler(tracker=tracker)
|
393
|
+
opener = urllib.request.build_opener(handler)
|
394
|
+
urllib.request.install_opener(opener)
|
395
|
+
|
396
|
+
# Try upload with retries
|
397
|
+
for attempt in range(self.max_retries):
|
398
|
+
try:
|
399
|
+
# Create a fresh request for each attempt
|
400
|
+
req = urllib.request.Request(upload_url, data=body, headers=headers, method='POST')
|
401
|
+
|
402
|
+
# Set timeouts
|
403
|
+
socket.setdefaulttimeout(self.read_timeout)
|
404
|
+
|
405
|
+
with self._urlopen(req) as response:
|
406
|
+
result_text = response.read().decode('utf-8')
|
407
|
+
|
408
|
+
# Try to parse as JSON, but handle plain text responses too
|
409
|
+
try:
|
410
|
+
result = json.loads(result_text)
|
411
|
+
logger.info("Upload successful: %s", result.get('name', 'Unknown'))
|
412
|
+
except json.JSONDecodeError:
|
413
|
+
logger.info("Upload successful, response: %s", result_text)
|
414
|
+
|
415
|
+
return True
|
416
|
+
|
417
|
+
except urllib.error.HTTPError as e:
|
418
|
+
logger.error("HTTP error during upload (attempt %d/%d): %s",
|
419
|
+
attempt + 1, self.max_retries, e)
|
420
|
+
|
421
|
+
# Try to parse error response
|
422
|
+
try:
|
423
|
+
error_msg = json.loads(e.read().decode('utf-8'))
|
424
|
+
logger.error("Error details: %s", error_msg)
|
425
|
+
except:
|
426
|
+
pass
|
427
|
+
|
428
|
+
# Only retry on certain HTTP errors (e.g. 500, 502, 503, 504)
|
429
|
+
if e.code >= 500 and attempt < self.max_retries - 1:
|
430
|
+
time.sleep(self.retry_delay)
|
431
|
+
continue
|
432
|
+
|
433
|
+
return False
|
434
|
+
|
435
|
+
except (urllib.error.URLError, socket.timeout) as e:
|
436
|
+
# Network errors, timeout errors
|
437
|
+
logger.error("Network error during upload (attempt %d/%d): %s",
|
438
|
+
attempt + 1, self.max_retries, e)
|
439
|
+
|
440
|
+
if attempt < self.max_retries - 1:
|
441
|
+
time.sleep(self.retry_delay)
|
442
|
+
continue
|
443
|
+
|
444
|
+
return False
|
445
|
+
|
446
|
+
except Exception as e:
|
447
|
+
logger.error("Unexpected error during upload: %s", e)
|
448
|
+
return False
|
449
|
+
|
450
|
+
return False
|
451
|
+
|
452
|
+
except Exception as e:
|
453
|
+
logger.error("Error preparing file for upload: %s", e)
|
454
|
+
return False
|
455
|
+
|
456
|
+
def upload_to_teddycloud(file_path: str, teddycloud_url: str, ignore_ssl_verify: bool = False,
|
457
|
+
special_folder: str = None, path: str = None, show_progress: bool = True,
|
458
|
+
connection_timeout: int = DEFAULT_CONNECTION_TIMEOUT,
|
459
|
+
read_timeout: int = DEFAULT_READ_TIMEOUT,
|
460
|
+
max_retries: int = DEFAULT_MAX_RETRIES,
|
461
|
+
retry_delay: int = DEFAULT_RETRY_DELAY) -> bool:
|
462
|
+
"""
|
463
|
+
Upload a .taf file to TeddyCloud.
|
464
|
+
|
465
|
+
Args:
|
466
|
+
file_path: Path to the .taf file to upload
|
467
|
+
teddycloud_url: URL of the TeddyCloud instance
|
468
|
+
ignore_ssl_verify: If True, SSL certificate verification will be disabled
|
469
|
+
special_folder: Special folder to upload to (currently only 'library' is supported)
|
470
|
+
path: Path where to write the file within the special folder
|
471
|
+
show_progress: Whether to show a progress bar during upload
|
472
|
+
connection_timeout: Timeout for establishing a connection in seconds
|
473
|
+
read_timeout: Timeout for reading data from the server in seconds
|
474
|
+
max_retries: Maximum number of retry attempts
|
475
|
+
retry_delay: Delay between retry attempts in seconds
|
476
|
+
|
477
|
+
Returns:
|
478
|
+
True if upload was successful, False otherwise
|
479
|
+
"""
|
480
|
+
logger.info("Uploading %s to TeddyCloud %s", file_path, teddycloud_url)
|
481
|
+
|
482
|
+
if special_folder:
|
483
|
+
logger.info("Using special folder: %s", special_folder)
|
484
|
+
|
485
|
+
if path:
|
486
|
+
logger.info("Using custom path: %s", path)
|
487
|
+
|
488
|
+
if max_retries > 1:
|
489
|
+
logger.info("Will retry up to %d times with %d second delay if upload fails",
|
490
|
+
max_retries, retry_delay)
|
491
|
+
|
492
|
+
client = TeddyCloudClient(
|
493
|
+
teddycloud_url, ignore_ssl_verify,
|
494
|
+
connection_timeout=connection_timeout,
|
495
|
+
read_timeout=read_timeout,
|
496
|
+
max_retries=max_retries,
|
497
|
+
retry_delay=retry_delay
|
498
|
+
)
|
499
|
+
|
500
|
+
return client.upload_file(file_path, special_folder, path, show_progress)
|
501
|
+
|
502
|
+
def get_tags_from_teddycloud(teddycloud_url: str, ignore_ssl_verify: bool = False) -> bool:
|
503
|
+
"""
|
504
|
+
Get and display tags from a TeddyCloud instance.
|
505
|
+
|
506
|
+
Args:
|
507
|
+
teddycloud_url: URL of the TeddyCloud instance
|
508
|
+
ignore_ssl_verify: If True, SSL certificate verification will be disabled
|
509
|
+
|
510
|
+
Returns:
|
511
|
+
True if tags were retrieved successfully, False otherwise
|
512
|
+
"""
|
513
|
+
logger.info("Getting tags from TeddyCloud %s", teddycloud_url)
|
514
|
+
|
515
|
+
client = TeddyCloudClient(teddycloud_url, ignore_ssl_verify)
|
516
|
+
response = client.get_tag_index()
|
517
|
+
|
518
|
+
if not response:
|
519
|
+
logger.error("Failed to retrieve tags from TeddyCloud")
|
520
|
+
return False
|
521
|
+
|
522
|
+
# Handle the specific JSON structure returned by TeddyCloud API
|
523
|
+
if isinstance(response, dict) and 'tags' in response:
|
524
|
+
tags = response['tags']
|
525
|
+
logger.info("Successfully retrieved %d tags from TeddyCloud", len(tags))
|
526
|
+
|
527
|
+
print("\nAvailable Tags from TeddyCloud:")
|
528
|
+
print("-" * 60)
|
529
|
+
|
530
|
+
# Sort tags by type and then by uid for better organization
|
531
|
+
sorted_tags = sorted(tags, key=lambda x: (x.get('type', ''), x.get('uid', '')))
|
532
|
+
|
533
|
+
for tag in sorted_tags:
|
534
|
+
uid = tag.get('uid', 'Unknown UID')
|
535
|
+
tag_type = tag.get('type', 'Unknown')
|
536
|
+
valid = "✓" if tag.get('valid', False) else "✗"
|
537
|
+
series = tag.get('tonieInfo', {}).get('series', '')
|
538
|
+
episode = tag.get('tonieInfo', {}).get('episode', '')
|
539
|
+
source = tag.get('source', '')
|
540
|
+
|
541
|
+
# Format header with key information
|
542
|
+
print(f"UID: {uid} ({tag_type}) - Valid: {valid}")
|
543
|
+
|
544
|
+
# Show more detailed information
|
545
|
+
if series:
|
546
|
+
print(f"Series: {series}")
|
547
|
+
if episode:
|
548
|
+
print(f"Episode: {episode}")
|
549
|
+
if source:
|
550
|
+
print(f"Source: {source}")
|
551
|
+
|
552
|
+
# Show track information if available
|
553
|
+
tracks = tag.get('tonieInfo', {}).get('tracks', [])
|
554
|
+
if tracks:
|
555
|
+
print("Tracks:")
|
556
|
+
for i, track in enumerate(tracks, 1):
|
557
|
+
print(f" {i}. {track}")
|
558
|
+
|
559
|
+
# Show track duration information
|
560
|
+
track_seconds = tag.get('trackSeconds', [])
|
561
|
+
if track_seconds and len(track_seconds) > 1:
|
562
|
+
total_seconds = track_seconds[-1]
|
563
|
+
minutes = total_seconds // 60
|
564
|
+
seconds = total_seconds % 60
|
565
|
+
print(f"Duration: {minutes}:{seconds:02d} ({len(track_seconds)-1} tracks)")
|
566
|
+
|
567
|
+
print("-" * 60)
|
568
|
+
else:
|
569
|
+
# Fallback for unexpected formats
|
570
|
+
logger.info("Successfully retrieved tag data from TeddyCloud")
|
571
|
+
print("\nTag data from TeddyCloud:")
|
572
|
+
print("-" * 60)
|
573
|
+
|
574
|
+
# Pretty print JSON data
|
575
|
+
import json
|
576
|
+
print(json.dumps(response, indent=2))
|
577
|
+
|
578
|
+
print("-" * 60)
|
579
|
+
|
580
|
+
return True
|