karaoke-gen 0.57.0__py3-none-any.whl → 0.71.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- karaoke_gen/audio_fetcher.py +461 -0
- karaoke_gen/audio_processor.py +407 -30
- karaoke_gen/config.py +62 -113
- karaoke_gen/file_handler.py +32 -59
- karaoke_gen/karaoke_finalise/karaoke_finalise.py +148 -67
- karaoke_gen/karaoke_gen.py +270 -61
- karaoke_gen/lyrics_processor.py +13 -1
- karaoke_gen/metadata.py +78 -73
- karaoke_gen/pipeline/__init__.py +87 -0
- karaoke_gen/pipeline/base.py +215 -0
- karaoke_gen/pipeline/context.py +230 -0
- karaoke_gen/pipeline/executors/__init__.py +21 -0
- karaoke_gen/pipeline/executors/local.py +159 -0
- karaoke_gen/pipeline/executors/remote.py +257 -0
- karaoke_gen/pipeline/stages/__init__.py +27 -0
- karaoke_gen/pipeline/stages/finalize.py +202 -0
- karaoke_gen/pipeline/stages/render.py +165 -0
- karaoke_gen/pipeline/stages/screens.py +139 -0
- karaoke_gen/pipeline/stages/separation.py +191 -0
- karaoke_gen/pipeline/stages/transcription.py +191 -0
- karaoke_gen/style_loader.py +531 -0
- karaoke_gen/utils/bulk_cli.py +6 -0
- karaoke_gen/utils/cli_args.py +424 -0
- karaoke_gen/utils/gen_cli.py +26 -261
- karaoke_gen/utils/remote_cli.py +1965 -0
- karaoke_gen/video_background_processor.py +351 -0
- karaoke_gen-0.71.27.dist-info/METADATA +610 -0
- karaoke_gen-0.71.27.dist-info/RECORD +275 -0
- {karaoke_gen-0.57.0.dist-info → karaoke_gen-0.71.27.dist-info}/WHEEL +1 -1
- {karaoke_gen-0.57.0.dist-info → karaoke_gen-0.71.27.dist-info}/entry_points.txt +1 -0
- lyrics_transcriber/__init__.py +10 -0
- lyrics_transcriber/cli/__init__.py +0 -0
- lyrics_transcriber/cli/cli_main.py +285 -0
- lyrics_transcriber/core/__init__.py +0 -0
- lyrics_transcriber/core/config.py +50 -0
- lyrics_transcriber/core/controller.py +520 -0
- lyrics_transcriber/correction/__init__.py +0 -0
- lyrics_transcriber/correction/agentic/__init__.py +9 -0
- lyrics_transcriber/correction/agentic/adapter.py +71 -0
- lyrics_transcriber/correction/agentic/agent.py +313 -0
- lyrics_transcriber/correction/agentic/feedback/aggregator.py +12 -0
- lyrics_transcriber/correction/agentic/feedback/collector.py +17 -0
- lyrics_transcriber/correction/agentic/feedback/retention.py +24 -0
- lyrics_transcriber/correction/agentic/feedback/store.py +76 -0
- lyrics_transcriber/correction/agentic/handlers/__init__.py +24 -0
- lyrics_transcriber/correction/agentic/handlers/ambiguous.py +44 -0
- lyrics_transcriber/correction/agentic/handlers/background_vocals.py +68 -0
- lyrics_transcriber/correction/agentic/handlers/base.py +51 -0
- lyrics_transcriber/correction/agentic/handlers/complex_multi_error.py +46 -0
- lyrics_transcriber/correction/agentic/handlers/extra_words.py +74 -0
- lyrics_transcriber/correction/agentic/handlers/no_error.py +42 -0
- lyrics_transcriber/correction/agentic/handlers/punctuation.py +44 -0
- lyrics_transcriber/correction/agentic/handlers/registry.py +60 -0
- lyrics_transcriber/correction/agentic/handlers/repeated_section.py +44 -0
- lyrics_transcriber/correction/agentic/handlers/sound_alike.py +126 -0
- lyrics_transcriber/correction/agentic/models/__init__.py +5 -0
- lyrics_transcriber/correction/agentic/models/ai_correction.py +31 -0
- lyrics_transcriber/correction/agentic/models/correction_session.py +30 -0
- lyrics_transcriber/correction/agentic/models/enums.py +38 -0
- lyrics_transcriber/correction/agentic/models/human_feedback.py +30 -0
- lyrics_transcriber/correction/agentic/models/learning_data.py +26 -0
- lyrics_transcriber/correction/agentic/models/observability_metrics.py +28 -0
- lyrics_transcriber/correction/agentic/models/schemas.py +46 -0
- lyrics_transcriber/correction/agentic/models/utils.py +19 -0
- lyrics_transcriber/correction/agentic/observability/__init__.py +5 -0
- lyrics_transcriber/correction/agentic/observability/langfuse_integration.py +35 -0
- lyrics_transcriber/correction/agentic/observability/metrics.py +46 -0
- lyrics_transcriber/correction/agentic/observability/performance.py +19 -0
- lyrics_transcriber/correction/agentic/prompts/__init__.py +2 -0
- lyrics_transcriber/correction/agentic/prompts/classifier.py +227 -0
- lyrics_transcriber/correction/agentic/providers/__init__.py +6 -0
- lyrics_transcriber/correction/agentic/providers/base.py +36 -0
- lyrics_transcriber/correction/agentic/providers/circuit_breaker.py +145 -0
- lyrics_transcriber/correction/agentic/providers/config.py +73 -0
- lyrics_transcriber/correction/agentic/providers/constants.py +24 -0
- lyrics_transcriber/correction/agentic/providers/health.py +28 -0
- lyrics_transcriber/correction/agentic/providers/langchain_bridge.py +212 -0
- lyrics_transcriber/correction/agentic/providers/model_factory.py +209 -0
- lyrics_transcriber/correction/agentic/providers/response_cache.py +218 -0
- lyrics_transcriber/correction/agentic/providers/response_parser.py +111 -0
- lyrics_transcriber/correction/agentic/providers/retry_executor.py +127 -0
- lyrics_transcriber/correction/agentic/router.py +35 -0
- lyrics_transcriber/correction/agentic/workflows/__init__.py +5 -0
- lyrics_transcriber/correction/agentic/workflows/consensus_workflow.py +24 -0
- lyrics_transcriber/correction/agentic/workflows/correction_graph.py +59 -0
- lyrics_transcriber/correction/agentic/workflows/feedback_workflow.py +24 -0
- lyrics_transcriber/correction/anchor_sequence.py +1043 -0
- lyrics_transcriber/correction/corrector.py +760 -0
- lyrics_transcriber/correction/feedback/__init__.py +2 -0
- lyrics_transcriber/correction/feedback/schemas.py +107 -0
- lyrics_transcriber/correction/feedback/store.py +236 -0
- lyrics_transcriber/correction/handlers/__init__.py +0 -0
- lyrics_transcriber/correction/handlers/base.py +52 -0
- lyrics_transcriber/correction/handlers/extend_anchor.py +149 -0
- lyrics_transcriber/correction/handlers/levenshtein.py +189 -0
- lyrics_transcriber/correction/handlers/llm.py +293 -0
- lyrics_transcriber/correction/handlers/llm_providers.py +60 -0
- lyrics_transcriber/correction/handlers/no_space_punct_match.py +154 -0
- lyrics_transcriber/correction/handlers/relaxed_word_count_match.py +85 -0
- lyrics_transcriber/correction/handlers/repeat.py +88 -0
- lyrics_transcriber/correction/handlers/sound_alike.py +259 -0
- lyrics_transcriber/correction/handlers/syllables_match.py +252 -0
- lyrics_transcriber/correction/handlers/word_count_match.py +80 -0
- lyrics_transcriber/correction/handlers/word_operations.py +187 -0
- lyrics_transcriber/correction/operations.py +352 -0
- lyrics_transcriber/correction/phrase_analyzer.py +435 -0
- lyrics_transcriber/correction/text_utils.py +30 -0
- lyrics_transcriber/frontend/.gitignore +23 -0
- lyrics_transcriber/frontend/.yarn/releases/yarn-4.7.0.cjs +935 -0
- lyrics_transcriber/frontend/.yarnrc.yml +3 -0
- lyrics_transcriber/frontend/README.md +50 -0
- lyrics_transcriber/frontend/REPLACE_ALL_FUNCTIONALITY.md +210 -0
- lyrics_transcriber/frontend/__init__.py +25 -0
- lyrics_transcriber/frontend/eslint.config.js +28 -0
- lyrics_transcriber/frontend/index.html +18 -0
- lyrics_transcriber/frontend/package.json +42 -0
- lyrics_transcriber/frontend/public/android-chrome-192x192.png +0 -0
- lyrics_transcriber/frontend/public/android-chrome-512x512.png +0 -0
- lyrics_transcriber/frontend/public/apple-touch-icon.png +0 -0
- lyrics_transcriber/frontend/public/favicon-16x16.png +0 -0
- lyrics_transcriber/frontend/public/favicon-32x32.png +0 -0
- lyrics_transcriber/frontend/public/favicon.ico +0 -0
- lyrics_transcriber/frontend/public/nomad-karaoke-logo.png +0 -0
- lyrics_transcriber/frontend/src/App.tsx +212 -0
- lyrics_transcriber/frontend/src/api.ts +239 -0
- lyrics_transcriber/frontend/src/components/AIFeedbackModal.tsx +77 -0
- lyrics_transcriber/frontend/src/components/AddLyricsModal.tsx +114 -0
- lyrics_transcriber/frontend/src/components/AgenticCorrectionMetrics.tsx +204 -0
- lyrics_transcriber/frontend/src/components/AudioPlayer.tsx +180 -0
- lyrics_transcriber/frontend/src/components/CorrectedWordWithActions.tsx +167 -0
- lyrics_transcriber/frontend/src/components/CorrectionAnnotationModal.tsx +359 -0
- lyrics_transcriber/frontend/src/components/CorrectionDetailCard.tsx +281 -0
- lyrics_transcriber/frontend/src/components/CorrectionMetrics.tsx +162 -0
- lyrics_transcriber/frontend/src/components/DurationTimelineView.tsx +257 -0
- lyrics_transcriber/frontend/src/components/EditActionBar.tsx +68 -0
- lyrics_transcriber/frontend/src/components/EditModal.tsx +702 -0
- lyrics_transcriber/frontend/src/components/EditTimelineSection.tsx +496 -0
- lyrics_transcriber/frontend/src/components/EditWordList.tsx +379 -0
- lyrics_transcriber/frontend/src/components/FileUpload.tsx +77 -0
- lyrics_transcriber/frontend/src/components/FindReplaceModal.tsx +467 -0
- lyrics_transcriber/frontend/src/components/Header.tsx +387 -0
- lyrics_transcriber/frontend/src/components/LyricsAnalyzer.tsx +1373 -0
- lyrics_transcriber/frontend/src/components/MetricsDashboard.tsx +51 -0
- lyrics_transcriber/frontend/src/components/ModeSelector.tsx +67 -0
- lyrics_transcriber/frontend/src/components/ModelSelector.tsx +23 -0
- lyrics_transcriber/frontend/src/components/PreviewVideoSection.tsx +144 -0
- lyrics_transcriber/frontend/src/components/ReferenceView.tsx +268 -0
- lyrics_transcriber/frontend/src/components/ReplaceAllLyricsModal.tsx +688 -0
- lyrics_transcriber/frontend/src/components/ReviewChangesModal.tsx +354 -0
- lyrics_transcriber/frontend/src/components/SegmentDetailsModal.tsx +64 -0
- lyrics_transcriber/frontend/src/components/TimelineEditor.tsx +376 -0
- lyrics_transcriber/frontend/src/components/TimingOffsetModal.tsx +131 -0
- lyrics_transcriber/frontend/src/components/TranscriptionView.tsx +256 -0
- lyrics_transcriber/frontend/src/components/WordDivider.tsx +187 -0
- lyrics_transcriber/frontend/src/components/shared/components/HighlightedText.tsx +379 -0
- lyrics_transcriber/frontend/src/components/shared/components/SourceSelector.tsx +56 -0
- lyrics_transcriber/frontend/src/components/shared/components/Word.tsx +87 -0
- lyrics_transcriber/frontend/src/components/shared/constants.ts +20 -0
- lyrics_transcriber/frontend/src/components/shared/hooks/useWordClick.ts +180 -0
- lyrics_transcriber/frontend/src/components/shared/styles.ts +13 -0
- lyrics_transcriber/frontend/src/components/shared/types.js +2 -0
- lyrics_transcriber/frontend/src/components/shared/types.ts +129 -0
- lyrics_transcriber/frontend/src/components/shared/utils/keyboardHandlers.ts +177 -0
- lyrics_transcriber/frontend/src/components/shared/utils/localStorage.ts +78 -0
- lyrics_transcriber/frontend/src/components/shared/utils/referenceLineCalculator.ts +75 -0
- lyrics_transcriber/frontend/src/components/shared/utils/segmentOperations.ts +360 -0
- lyrics_transcriber/frontend/src/components/shared/utils/timingUtils.ts +110 -0
- lyrics_transcriber/frontend/src/components/shared/utils/wordUtils.ts +22 -0
- lyrics_transcriber/frontend/src/hooks/useManualSync.ts +435 -0
- lyrics_transcriber/frontend/src/main.tsx +17 -0
- lyrics_transcriber/frontend/src/theme.ts +177 -0
- lyrics_transcriber/frontend/src/types/global.d.ts +9 -0
- lyrics_transcriber/frontend/src/types.js +2 -0
- lyrics_transcriber/frontend/src/types.ts +199 -0
- lyrics_transcriber/frontend/src/validation.ts +132 -0
- lyrics_transcriber/frontend/src/vite-env.d.ts +1 -0
- lyrics_transcriber/frontend/tsconfig.app.json +26 -0
- lyrics_transcriber/frontend/tsconfig.json +25 -0
- lyrics_transcriber/frontend/tsconfig.node.json +23 -0
- lyrics_transcriber/frontend/tsconfig.tsbuildinfo +1 -0
- lyrics_transcriber/frontend/update_version.js +11 -0
- lyrics_transcriber/frontend/vite.config.d.ts +2 -0
- lyrics_transcriber/frontend/vite.config.js +10 -0
- lyrics_transcriber/frontend/vite.config.ts +11 -0
- lyrics_transcriber/frontend/web_assets/android-chrome-192x192.png +0 -0
- lyrics_transcriber/frontend/web_assets/android-chrome-512x512.png +0 -0
- lyrics_transcriber/frontend/web_assets/apple-touch-icon.png +0 -0
- lyrics_transcriber/frontend/web_assets/assets/index-DdJTDWH3.js +42039 -0
- lyrics_transcriber/frontend/web_assets/assets/index-DdJTDWH3.js.map +1 -0
- lyrics_transcriber/frontend/web_assets/favicon-16x16.png +0 -0
- lyrics_transcriber/frontend/web_assets/favicon-32x32.png +0 -0
- lyrics_transcriber/frontend/web_assets/favicon.ico +0 -0
- lyrics_transcriber/frontend/web_assets/index.html +18 -0
- lyrics_transcriber/frontend/web_assets/nomad-karaoke-logo.png +0 -0
- lyrics_transcriber/frontend/yarn.lock +3752 -0
- lyrics_transcriber/lyrics/__init__.py +0 -0
- lyrics_transcriber/lyrics/base_lyrics_provider.py +211 -0
- lyrics_transcriber/lyrics/file_provider.py +95 -0
- lyrics_transcriber/lyrics/genius.py +384 -0
- lyrics_transcriber/lyrics/lrclib.py +231 -0
- lyrics_transcriber/lyrics/musixmatch.py +156 -0
- lyrics_transcriber/lyrics/spotify.py +290 -0
- lyrics_transcriber/lyrics/user_input_provider.py +44 -0
- lyrics_transcriber/output/__init__.py +0 -0
- lyrics_transcriber/output/ass/__init__.py +21 -0
- lyrics_transcriber/output/ass/ass.py +2088 -0
- lyrics_transcriber/output/ass/ass_specs.txt +732 -0
- lyrics_transcriber/output/ass/config.py +180 -0
- lyrics_transcriber/output/ass/constants.py +23 -0
- lyrics_transcriber/output/ass/event.py +94 -0
- lyrics_transcriber/output/ass/formatters.py +132 -0
- lyrics_transcriber/output/ass/lyrics_line.py +265 -0
- lyrics_transcriber/output/ass/lyrics_screen.py +252 -0
- lyrics_transcriber/output/ass/section_detector.py +89 -0
- lyrics_transcriber/output/ass/section_screen.py +106 -0
- lyrics_transcriber/output/ass/style.py +187 -0
- lyrics_transcriber/output/cdg.py +619 -0
- lyrics_transcriber/output/cdgmaker/__init__.py +0 -0
- lyrics_transcriber/output/cdgmaker/cdg.py +262 -0
- lyrics_transcriber/output/cdgmaker/composer.py +2260 -0
- lyrics_transcriber/output/cdgmaker/config.py +151 -0
- lyrics_transcriber/output/cdgmaker/images/instrumental.png +0 -0
- lyrics_transcriber/output/cdgmaker/images/intro.png +0 -0
- lyrics_transcriber/output/cdgmaker/pack.py +507 -0
- lyrics_transcriber/output/cdgmaker/render.py +346 -0
- lyrics_transcriber/output/cdgmaker/transitions/centertexttoplogobottomtext.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/circlein.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/circleout.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/fizzle.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/largecentertexttoplogo.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/rectangle.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/spiral.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/topleftmusicalnotes.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/wipein.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/wipeleft.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/wipeout.png +0 -0
- lyrics_transcriber/output/cdgmaker/transitions/wiperight.png +0 -0
- lyrics_transcriber/output/cdgmaker/utils.py +132 -0
- lyrics_transcriber/output/countdown_processor.py +267 -0
- lyrics_transcriber/output/fonts/AvenirNext-Bold.ttf +0 -0
- lyrics_transcriber/output/fonts/DMSans-VariableFont_opsz,wght.ttf +0 -0
- lyrics_transcriber/output/fonts/DMSerifDisplay-Regular.ttf +0 -0
- lyrics_transcriber/output/fonts/Oswald-SemiBold.ttf +0 -0
- lyrics_transcriber/output/fonts/Zurich_Cn_BT_Bold.ttf +0 -0
- lyrics_transcriber/output/fonts/arial.ttf +0 -0
- lyrics_transcriber/output/fonts/georgia.ttf +0 -0
- lyrics_transcriber/output/fonts/verdana.ttf +0 -0
- lyrics_transcriber/output/generator.py +257 -0
- lyrics_transcriber/output/lrc_to_cdg.py +61 -0
- lyrics_transcriber/output/lyrics_file.py +102 -0
- lyrics_transcriber/output/plain_text.py +96 -0
- lyrics_transcriber/output/segment_resizer.py +431 -0
- lyrics_transcriber/output/subtitles.py +397 -0
- lyrics_transcriber/output/video.py +544 -0
- lyrics_transcriber/review/__init__.py +0 -0
- lyrics_transcriber/review/server.py +676 -0
- lyrics_transcriber/storage/__init__.py +0 -0
- lyrics_transcriber/storage/dropbox.py +225 -0
- lyrics_transcriber/transcribers/__init__.py +0 -0
- lyrics_transcriber/transcribers/audioshake.py +290 -0
- lyrics_transcriber/transcribers/base_transcriber.py +157 -0
- lyrics_transcriber/transcribers/whisper.py +330 -0
- lyrics_transcriber/types.py +648 -0
- lyrics_transcriber/utils/__init__.py +0 -0
- lyrics_transcriber/utils/word_utils.py +27 -0
- karaoke_gen-0.57.0.dist-info/METADATA +0 -167
- karaoke_gen-0.57.0.dist-info/RECORD +0 -23
- {karaoke_gen-0.57.0.dist-info → karaoke_gen-0.71.27.dist-info/licenses}/LICENSE +0 -0
|
@@ -0,0 +1,1965 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
"""
|
|
3
|
+
Remote CLI for karaoke-gen - Submit jobs to a cloud-hosted backend.
|
|
4
|
+
|
|
5
|
+
This CLI provides the same interface as karaoke-gen but processes jobs on a cloud backend.
|
|
6
|
+
Set KARAOKE_GEN_URL environment variable to your cloud backend URL.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
karaoke-gen-remote <filepath> <artist> <title>
|
|
10
|
+
karaoke-gen-remote --resume <job_id>
|
|
11
|
+
karaoke-gen-remote --retry <job_id>
|
|
12
|
+
karaoke-gen-remote --list
|
|
13
|
+
karaoke-gen-remote --cancel <job_id>
|
|
14
|
+
karaoke-gen-remote --delete <job_id>
|
|
15
|
+
"""
|
|
16
|
+
# Suppress SyntaxWarnings from third-party dependencies (pydub, syrics)
|
|
17
|
+
# that have invalid escape sequences in regex patterns (not yet fixed for Python 3.12+)
|
|
18
|
+
import warnings
|
|
19
|
+
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pydub")
|
|
20
|
+
warnings.filterwarnings("ignore", category=SyntaxWarning, module="syrics")
|
|
21
|
+
|
|
22
|
+
import json
|
|
23
|
+
import logging
|
|
24
|
+
import os
|
|
25
|
+
import platform
|
|
26
|
+
import subprocess
|
|
27
|
+
import sys
|
|
28
|
+
import time
|
|
29
|
+
import urllib.parse
|
|
30
|
+
import webbrowser
|
|
31
|
+
from dataclasses import dataclass
|
|
32
|
+
from enum import Enum
|
|
33
|
+
from pathlib import Path
|
|
34
|
+
from typing import Any, Dict, Optional
|
|
35
|
+
|
|
36
|
+
import requests
|
|
37
|
+
|
|
38
|
+
from .cli_args import create_parser, process_style_overrides, is_url, is_file
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class JobStatus(str, Enum):
|
|
42
|
+
"""Job status values (matching backend)."""
|
|
43
|
+
PENDING = "pending"
|
|
44
|
+
DOWNLOADING = "downloading"
|
|
45
|
+
SEPARATING_STAGE1 = "separating_stage1"
|
|
46
|
+
SEPARATING_STAGE2 = "separating_stage2"
|
|
47
|
+
AUDIO_COMPLETE = "audio_complete"
|
|
48
|
+
TRANSCRIBING = "transcribing"
|
|
49
|
+
CORRECTING = "correcting"
|
|
50
|
+
LYRICS_COMPLETE = "lyrics_complete"
|
|
51
|
+
GENERATING_SCREENS = "generating_screens"
|
|
52
|
+
APPLYING_PADDING = "applying_padding"
|
|
53
|
+
AWAITING_REVIEW = "awaiting_review"
|
|
54
|
+
IN_REVIEW = "in_review"
|
|
55
|
+
REVIEW_COMPLETE = "review_complete"
|
|
56
|
+
RENDERING_VIDEO = "rendering_video"
|
|
57
|
+
AWAITING_INSTRUMENTAL_SELECTION = "awaiting_instrumental_selection"
|
|
58
|
+
INSTRUMENTAL_SELECTED = "instrumental_selected"
|
|
59
|
+
GENERATING_VIDEO = "generating_video"
|
|
60
|
+
ENCODING = "encoding"
|
|
61
|
+
PACKAGING = "packaging"
|
|
62
|
+
UPLOADING = "uploading"
|
|
63
|
+
NOTIFYING = "notifying"
|
|
64
|
+
COMPLETE = "complete"
|
|
65
|
+
FAILED = "failed"
|
|
66
|
+
CANCELLED = "cancelled"
|
|
67
|
+
ERROR = "error"
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
@dataclass
|
|
71
|
+
class Config:
|
|
72
|
+
"""Configuration for the remote CLI."""
|
|
73
|
+
service_url: str
|
|
74
|
+
review_ui_url: str
|
|
75
|
+
poll_interval: int
|
|
76
|
+
output_dir: str
|
|
77
|
+
auth_token: Optional[str] = None
|
|
78
|
+
non_interactive: bool = False # Auto-accept defaults for testing
|
|
79
|
+
# Job tracking metadata (sent as headers for filtering/tracking)
|
|
80
|
+
environment: str = "" # test/production/development
|
|
81
|
+
client_id: str = "" # Customer/user identifier
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class RemoteKaraokeClient:
|
|
85
|
+
"""Client for interacting with the karaoke-gen cloud backend."""
|
|
86
|
+
|
|
87
|
+
ALLOWED_AUDIO_EXTENSIONS = {'.mp3', '.wav', '.flac', '.m4a', '.ogg', '.aac'}
|
|
88
|
+
ALLOWED_IMAGE_EXTENSIONS = {'.png', '.jpg', '.jpeg', '.gif', '.webp'}
|
|
89
|
+
ALLOWED_FONT_EXTENSIONS = {'.ttf', '.otf', '.woff', '.woff2'}
|
|
90
|
+
|
|
91
|
+
def __init__(self, config: Config, logger: logging.Logger):
|
|
92
|
+
self.config = config
|
|
93
|
+
self.logger = logger
|
|
94
|
+
self.session = requests.Session()
|
|
95
|
+
self._setup_auth()
|
|
96
|
+
|
|
97
|
+
def _setup_auth(self) -> None:
|
|
98
|
+
"""Set up authentication and tracking headers."""
|
|
99
|
+
if self.config.auth_token:
|
|
100
|
+
self.session.headers['Authorization'] = f'Bearer {self.config.auth_token}'
|
|
101
|
+
|
|
102
|
+
# Set up job tracking headers (used for filtering and operational management)
|
|
103
|
+
if self.config.environment:
|
|
104
|
+
self.session.headers['X-Environment'] = self.config.environment
|
|
105
|
+
if self.config.client_id:
|
|
106
|
+
self.session.headers['X-Client-ID'] = self.config.client_id
|
|
107
|
+
|
|
108
|
+
# Always include CLI version as user-agent
|
|
109
|
+
from importlib import metadata
|
|
110
|
+
try:
|
|
111
|
+
version = metadata.version("karaoke-gen")
|
|
112
|
+
except metadata.PackageNotFoundError:
|
|
113
|
+
version = "unknown"
|
|
114
|
+
self.session.headers['User-Agent'] = f'karaoke-gen-remote/{version}'
|
|
115
|
+
|
|
116
|
+
def _get_auth_token_from_gcloud(self) -> Optional[str]:
|
|
117
|
+
"""Get auth token from gcloud CLI."""
|
|
118
|
+
try:
|
|
119
|
+
result = subprocess.run(
|
|
120
|
+
['gcloud', 'auth', 'print-identity-token'],
|
|
121
|
+
capture_output=True,
|
|
122
|
+
text=True,
|
|
123
|
+
check=True
|
|
124
|
+
)
|
|
125
|
+
return result.stdout.strip()
|
|
126
|
+
except subprocess.CalledProcessError:
|
|
127
|
+
return None
|
|
128
|
+
except FileNotFoundError:
|
|
129
|
+
return None
|
|
130
|
+
|
|
131
|
+
def refresh_auth(self) -> bool:
|
|
132
|
+
"""Refresh authentication token."""
|
|
133
|
+
token = self._get_auth_token_from_gcloud()
|
|
134
|
+
if token:
|
|
135
|
+
self.config.auth_token = token
|
|
136
|
+
self.session.headers['Authorization'] = f'Bearer {token}'
|
|
137
|
+
return True
|
|
138
|
+
return False
|
|
139
|
+
|
|
140
|
+
def _request(self, method: str, endpoint: str, **kwargs) -> requests.Response:
|
|
141
|
+
"""Make an authenticated request."""
|
|
142
|
+
url = f"{self.config.service_url}{endpoint}"
|
|
143
|
+
response = self.session.request(method, url, **kwargs)
|
|
144
|
+
return response
|
|
145
|
+
|
|
146
|
+
def _upload_file_to_signed_url(self, signed_url: str, file_path: str, content_type: str) -> bool:
|
|
147
|
+
"""
|
|
148
|
+
Upload a file directly to GCS using a signed URL.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
signed_url: The signed URL from the backend
|
|
152
|
+
file_path: Local path to the file to upload
|
|
153
|
+
content_type: MIME type for the Content-Type header
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
True if upload succeeded, False otherwise
|
|
157
|
+
"""
|
|
158
|
+
try:
|
|
159
|
+
with open(file_path, 'rb') as f:
|
|
160
|
+
# Use a fresh requests session (not self.session) because
|
|
161
|
+
# signed URLs should not have our auth headers
|
|
162
|
+
response = requests.put(
|
|
163
|
+
signed_url,
|
|
164
|
+
data=f,
|
|
165
|
+
headers={'Content-Type': content_type},
|
|
166
|
+
timeout=600 # 10 minutes for large files
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
if response.status_code in (200, 201):
|
|
170
|
+
return True
|
|
171
|
+
else:
|
|
172
|
+
self.logger.error(f"Failed to upload to signed URL: HTTP {response.status_code} - {response.text}")
|
|
173
|
+
return False
|
|
174
|
+
except Exception as e:
|
|
175
|
+
self.logger.error(f"Error uploading to signed URL: {e}")
|
|
176
|
+
return False
|
|
177
|
+
|
|
178
|
+
def _get_content_type(self, file_path: str) -> str:
|
|
179
|
+
"""Get the MIME content type for a file based on its extension."""
|
|
180
|
+
ext = Path(file_path).suffix.lower()
|
|
181
|
+
|
|
182
|
+
content_types = {
|
|
183
|
+
# Audio
|
|
184
|
+
'.mp3': 'audio/mpeg',
|
|
185
|
+
'.wav': 'audio/wav',
|
|
186
|
+
'.flac': 'audio/flac',
|
|
187
|
+
'.m4a': 'audio/mp4',
|
|
188
|
+
'.ogg': 'audio/ogg',
|
|
189
|
+
'.aac': 'audio/aac',
|
|
190
|
+
# Images
|
|
191
|
+
'.png': 'image/png',
|
|
192
|
+
'.jpg': 'image/jpeg',
|
|
193
|
+
'.jpeg': 'image/jpeg',
|
|
194
|
+
'.gif': 'image/gif',
|
|
195
|
+
'.webp': 'image/webp',
|
|
196
|
+
# Fonts
|
|
197
|
+
'.ttf': 'font/ttf',
|
|
198
|
+
'.otf': 'font/otf',
|
|
199
|
+
'.woff': 'font/woff',
|
|
200
|
+
'.woff2': 'font/woff2',
|
|
201
|
+
# Other
|
|
202
|
+
'.json': 'application/json',
|
|
203
|
+
'.txt': 'text/plain',
|
|
204
|
+
'.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
|
|
205
|
+
'.rtf': 'application/rtf',
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
return content_types.get(ext, 'application/octet-stream')
|
|
209
|
+
|
|
210
|
+
def _parse_style_params(self, style_params_path: str) -> Dict[str, str]:
|
|
211
|
+
"""
|
|
212
|
+
Parse style_params.json and extract file paths that need to be uploaded.
|
|
213
|
+
|
|
214
|
+
Returns a dict mapping asset_key -> local_file_path for files that exist.
|
|
215
|
+
"""
|
|
216
|
+
asset_files = {}
|
|
217
|
+
|
|
218
|
+
try:
|
|
219
|
+
with open(style_params_path, 'r') as f:
|
|
220
|
+
style_params = json.load(f)
|
|
221
|
+
except Exception as e:
|
|
222
|
+
self.logger.warning(f"Failed to parse style_params.json: {e}")
|
|
223
|
+
return asset_files
|
|
224
|
+
|
|
225
|
+
# Map of style param paths to asset keys
|
|
226
|
+
file_mappings = [
|
|
227
|
+
('intro', 'background_image', 'style_intro_background'),
|
|
228
|
+
('intro', 'font', 'style_font'),
|
|
229
|
+
('karaoke', 'background_image', 'style_karaoke_background'),
|
|
230
|
+
('karaoke', 'font_path', 'style_font'),
|
|
231
|
+
('end', 'background_image', 'style_end_background'),
|
|
232
|
+
('end', 'font', 'style_font'),
|
|
233
|
+
('cdg', 'font_path', 'style_font'),
|
|
234
|
+
('cdg', 'instrumental_background', 'style_cdg_instrumental_background'),
|
|
235
|
+
('cdg', 'title_screen_background', 'style_cdg_title_background'),
|
|
236
|
+
('cdg', 'outro_background', 'style_cdg_outro_background'),
|
|
237
|
+
]
|
|
238
|
+
|
|
239
|
+
for section, key, asset_key in file_mappings:
|
|
240
|
+
if section in style_params and key in style_params[section]:
|
|
241
|
+
file_path = style_params[section][key]
|
|
242
|
+
if file_path and os.path.isfile(file_path):
|
|
243
|
+
# Don't duplicate font uploads
|
|
244
|
+
if asset_key not in asset_files:
|
|
245
|
+
asset_files[asset_key] = file_path
|
|
246
|
+
self.logger.info(f" Found style asset: {asset_key} -> {file_path}")
|
|
247
|
+
|
|
248
|
+
return asset_files
|
|
249
|
+
|
|
250
|
+
def submit_job(
|
|
251
|
+
self,
|
|
252
|
+
filepath: str,
|
|
253
|
+
artist: str,
|
|
254
|
+
title: str,
|
|
255
|
+
style_params_path: Optional[str] = None,
|
|
256
|
+
enable_cdg: bool = True,
|
|
257
|
+
enable_txt: bool = True,
|
|
258
|
+
brand_prefix: Optional[str] = None,
|
|
259
|
+
discord_webhook_url: Optional[str] = None,
|
|
260
|
+
youtube_description: Optional[str] = None,
|
|
261
|
+
organised_dir_rclone_root: Optional[str] = None,
|
|
262
|
+
enable_youtube_upload: bool = False,
|
|
263
|
+
# Native API distribution (uses server-side credentials)
|
|
264
|
+
dropbox_path: Optional[str] = None,
|
|
265
|
+
gdrive_folder_id: Optional[str] = None,
|
|
266
|
+
# Lyrics configuration
|
|
267
|
+
lyrics_artist: Optional[str] = None,
|
|
268
|
+
lyrics_title: Optional[str] = None,
|
|
269
|
+
lyrics_file: Optional[str] = None,
|
|
270
|
+
subtitle_offset_ms: int = 0,
|
|
271
|
+
# Audio separation model configuration
|
|
272
|
+
clean_instrumental_model: Optional[str] = None,
|
|
273
|
+
backing_vocals_models: Optional[list] = None,
|
|
274
|
+
other_stems_models: Optional[list] = None,
|
|
275
|
+
# Existing instrumental (Batch 3)
|
|
276
|
+
existing_instrumental: Optional[str] = None,
|
|
277
|
+
) -> Dict[str, Any]:
|
|
278
|
+
"""
|
|
279
|
+
Submit a new karaoke generation job with optional style configuration.
|
|
280
|
+
|
|
281
|
+
Uses signed URL upload flow to bypass Cloud Run's 32MB request body limit:
|
|
282
|
+
1. Create job and get signed upload URLs from backend
|
|
283
|
+
2. Upload files directly to GCS using signed URLs
|
|
284
|
+
3. Notify backend that uploads are complete to start processing
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
filepath: Path to audio file
|
|
288
|
+
artist: Artist name
|
|
289
|
+
title: Song title
|
|
290
|
+
style_params_path: Path to style_params.json (optional)
|
|
291
|
+
enable_cdg: Generate CDG+MP3 package
|
|
292
|
+
enable_txt: Generate TXT+MP3 package
|
|
293
|
+
brand_prefix: Brand code prefix (e.g., "NOMAD")
|
|
294
|
+
discord_webhook_url: Discord webhook for notifications
|
|
295
|
+
youtube_description: YouTube video description
|
|
296
|
+
organised_dir_rclone_root: Legacy rclone path (deprecated)
|
|
297
|
+
enable_youtube_upload: Enable YouTube upload
|
|
298
|
+
dropbox_path: Dropbox folder path for organized output (native API)
|
|
299
|
+
gdrive_folder_id: Google Drive folder ID for public share (native API)
|
|
300
|
+
lyrics_artist: Override artist name for lyrics search
|
|
301
|
+
lyrics_title: Override title for lyrics search
|
|
302
|
+
lyrics_file: Path to user-provided lyrics file
|
|
303
|
+
subtitle_offset_ms: Subtitle timing offset in milliseconds
|
|
304
|
+
clean_instrumental_model: Model for clean instrumental separation
|
|
305
|
+
backing_vocals_models: List of models for backing vocals separation
|
|
306
|
+
other_stems_models: List of models for other stems (bass, drums, etc.)
|
|
307
|
+
existing_instrumental: Path to existing instrumental file to use instead of AI separation
|
|
308
|
+
"""
|
|
309
|
+
file_path = Path(filepath)
|
|
310
|
+
|
|
311
|
+
if not file_path.exists():
|
|
312
|
+
raise FileNotFoundError(f"File not found: {filepath}")
|
|
313
|
+
|
|
314
|
+
ext = file_path.suffix.lower()
|
|
315
|
+
if ext not in self.ALLOWED_AUDIO_EXTENSIONS:
|
|
316
|
+
raise ValueError(
|
|
317
|
+
f"Unsupported file type: {ext}. "
|
|
318
|
+
f"Allowed: {', '.join(self.ALLOWED_AUDIO_EXTENSIONS)}"
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
# Step 1: Build list of files to upload
|
|
322
|
+
files_info = []
|
|
323
|
+
local_files = {} # file_type -> local_path
|
|
324
|
+
|
|
325
|
+
# Main audio file
|
|
326
|
+
audio_content_type = self._get_content_type(filepath)
|
|
327
|
+
files_info.append({
|
|
328
|
+
'filename': file_path.name,
|
|
329
|
+
'content_type': audio_content_type,
|
|
330
|
+
'file_type': 'audio'
|
|
331
|
+
})
|
|
332
|
+
local_files['audio'] = filepath
|
|
333
|
+
self.logger.info(f"Will upload audio: {filepath}")
|
|
334
|
+
|
|
335
|
+
# Parse style params and find referenced files
|
|
336
|
+
style_assets = {}
|
|
337
|
+
if style_params_path and os.path.isfile(style_params_path):
|
|
338
|
+
self.logger.info(f"Parsing style configuration: {style_params_path}")
|
|
339
|
+
style_assets = self._parse_style_params(style_params_path)
|
|
340
|
+
|
|
341
|
+
# Add style_params.json
|
|
342
|
+
files_info.append({
|
|
343
|
+
'filename': Path(style_params_path).name,
|
|
344
|
+
'content_type': 'application/json',
|
|
345
|
+
'file_type': 'style_params'
|
|
346
|
+
})
|
|
347
|
+
local_files['style_params'] = style_params_path
|
|
348
|
+
self.logger.info(f" Will upload style_params.json")
|
|
349
|
+
|
|
350
|
+
# Add each style asset file
|
|
351
|
+
for asset_key, asset_path in style_assets.items():
|
|
352
|
+
if os.path.isfile(asset_path):
|
|
353
|
+
content_type = self._get_content_type(asset_path)
|
|
354
|
+
files_info.append({
|
|
355
|
+
'filename': Path(asset_path).name,
|
|
356
|
+
'content_type': content_type,
|
|
357
|
+
'file_type': asset_key # e.g., 'style_intro_background'
|
|
358
|
+
})
|
|
359
|
+
local_files[asset_key] = asset_path
|
|
360
|
+
self.logger.info(f" Will upload {asset_key}: {asset_path}")
|
|
361
|
+
|
|
362
|
+
# Add lyrics file if provided
|
|
363
|
+
if lyrics_file and os.path.isfile(lyrics_file):
|
|
364
|
+
content_type = self._get_content_type(lyrics_file)
|
|
365
|
+
files_info.append({
|
|
366
|
+
'filename': Path(lyrics_file).name,
|
|
367
|
+
'content_type': content_type,
|
|
368
|
+
'file_type': 'lyrics_file'
|
|
369
|
+
})
|
|
370
|
+
local_files['lyrics_file'] = lyrics_file
|
|
371
|
+
self.logger.info(f"Will upload lyrics file: {lyrics_file}")
|
|
372
|
+
|
|
373
|
+
# Add existing instrumental file if provided (Batch 3)
|
|
374
|
+
if existing_instrumental and os.path.isfile(existing_instrumental):
|
|
375
|
+
content_type = self._get_content_type(existing_instrumental)
|
|
376
|
+
files_info.append({
|
|
377
|
+
'filename': Path(existing_instrumental).name,
|
|
378
|
+
'content_type': content_type,
|
|
379
|
+
'file_type': 'existing_instrumental'
|
|
380
|
+
})
|
|
381
|
+
local_files['existing_instrumental'] = existing_instrumental
|
|
382
|
+
self.logger.info(f"Will upload existing instrumental: {existing_instrumental}")
|
|
383
|
+
|
|
384
|
+
# Step 2: Create job and get signed upload URLs
|
|
385
|
+
self.logger.info(f"Creating job at {self.config.service_url}/api/jobs/create-with-upload-urls")
|
|
386
|
+
|
|
387
|
+
create_request = {
|
|
388
|
+
'artist': artist,
|
|
389
|
+
'title': title,
|
|
390
|
+
'files': files_info,
|
|
391
|
+
'enable_cdg': enable_cdg,
|
|
392
|
+
'enable_txt': enable_txt,
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
if brand_prefix:
|
|
396
|
+
create_request['brand_prefix'] = brand_prefix
|
|
397
|
+
if discord_webhook_url:
|
|
398
|
+
create_request['discord_webhook_url'] = discord_webhook_url
|
|
399
|
+
if youtube_description:
|
|
400
|
+
create_request['youtube_description'] = youtube_description
|
|
401
|
+
if enable_youtube_upload:
|
|
402
|
+
create_request['enable_youtube_upload'] = enable_youtube_upload
|
|
403
|
+
if dropbox_path:
|
|
404
|
+
create_request['dropbox_path'] = dropbox_path
|
|
405
|
+
if gdrive_folder_id:
|
|
406
|
+
create_request['gdrive_folder_id'] = gdrive_folder_id
|
|
407
|
+
if organised_dir_rclone_root:
|
|
408
|
+
create_request['organised_dir_rclone_root'] = organised_dir_rclone_root
|
|
409
|
+
if lyrics_artist:
|
|
410
|
+
create_request['lyrics_artist'] = lyrics_artist
|
|
411
|
+
if lyrics_title:
|
|
412
|
+
create_request['lyrics_title'] = lyrics_title
|
|
413
|
+
if subtitle_offset_ms != 0:
|
|
414
|
+
create_request['subtitle_offset_ms'] = subtitle_offset_ms
|
|
415
|
+
if clean_instrumental_model:
|
|
416
|
+
create_request['clean_instrumental_model'] = clean_instrumental_model
|
|
417
|
+
if backing_vocals_models:
|
|
418
|
+
create_request['backing_vocals_models'] = backing_vocals_models
|
|
419
|
+
if other_stems_models:
|
|
420
|
+
create_request['other_stems_models'] = other_stems_models
|
|
421
|
+
|
|
422
|
+
response = self._request('POST', '/api/jobs/create-with-upload-urls', json=create_request)
|
|
423
|
+
|
|
424
|
+
if response.status_code != 200:
|
|
425
|
+
try:
|
|
426
|
+
error_detail = response.json()
|
|
427
|
+
except Exception:
|
|
428
|
+
error_detail = response.text
|
|
429
|
+
raise RuntimeError(f"Error creating job: {error_detail}")
|
|
430
|
+
|
|
431
|
+
create_result = response.json()
|
|
432
|
+
if create_result.get('status') != 'success':
|
|
433
|
+
raise RuntimeError(f"Error creating job: {create_result}")
|
|
434
|
+
|
|
435
|
+
job_id = create_result['job_id']
|
|
436
|
+
upload_urls = create_result['upload_urls']
|
|
437
|
+
|
|
438
|
+
self.logger.info(f"Job {job_id} created. Uploading {len(upload_urls)} files directly to storage...")
|
|
439
|
+
|
|
440
|
+
# Step 3: Upload each file directly to GCS using signed URLs
|
|
441
|
+
uploaded_files = []
|
|
442
|
+
for url_info in upload_urls:
|
|
443
|
+
file_type = url_info['file_type']
|
|
444
|
+
signed_url = url_info['upload_url']
|
|
445
|
+
content_type = url_info['content_type']
|
|
446
|
+
local_path = local_files.get(file_type)
|
|
447
|
+
|
|
448
|
+
if not local_path:
|
|
449
|
+
self.logger.warning(f"No local file found for file_type: {file_type}")
|
|
450
|
+
continue
|
|
451
|
+
|
|
452
|
+
# Calculate file size for logging
|
|
453
|
+
file_size = os.path.getsize(local_path)
|
|
454
|
+
file_size_mb = file_size / (1024 * 1024)
|
|
455
|
+
self.logger.info(f" Uploading {file_type} ({file_size_mb:.1f} MB)...")
|
|
456
|
+
|
|
457
|
+
success = self._upload_file_to_signed_url(signed_url, local_path, content_type)
|
|
458
|
+
if not success:
|
|
459
|
+
raise RuntimeError(f"Failed to upload {file_type} to storage")
|
|
460
|
+
|
|
461
|
+
uploaded_files.append(file_type)
|
|
462
|
+
self.logger.info(f" ✓ Uploaded {file_type}")
|
|
463
|
+
|
|
464
|
+
# Step 4: Notify backend that uploads are complete
|
|
465
|
+
self.logger.info(f"Notifying backend that uploads are complete...")
|
|
466
|
+
|
|
467
|
+
complete_request = {
|
|
468
|
+
'uploaded_files': uploaded_files
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
response = self._request('POST', f'/api/jobs/{job_id}/uploads-complete', json=complete_request)
|
|
472
|
+
|
|
473
|
+
if response.status_code != 200:
|
|
474
|
+
try:
|
|
475
|
+
error_detail = response.json()
|
|
476
|
+
except Exception:
|
|
477
|
+
error_detail = response.text
|
|
478
|
+
raise RuntimeError(f"Error completing uploads: {error_detail}")
|
|
479
|
+
|
|
480
|
+
result = response.json()
|
|
481
|
+
if result.get('status') != 'success':
|
|
482
|
+
raise RuntimeError(f"Error completing uploads: {result}")
|
|
483
|
+
|
|
484
|
+
# Log distribution services info if available
|
|
485
|
+
if 'distribution_services' in result:
|
|
486
|
+
dist_services = result['distribution_services']
|
|
487
|
+
self.logger.info("")
|
|
488
|
+
self.logger.info("Distribution Services:")
|
|
489
|
+
|
|
490
|
+
for service_name, service_info in dist_services.items():
|
|
491
|
+
if service_info.get('enabled'):
|
|
492
|
+
status = "✓" if service_info.get('credentials_valid', True) else "✗"
|
|
493
|
+
default_note = " (default)" if service_info.get('using_default') else ""
|
|
494
|
+
|
|
495
|
+
if service_name == 'dropbox':
|
|
496
|
+
path = service_info.get('path', '')
|
|
497
|
+
self.logger.info(f" {status} Dropbox: {path}{default_note}")
|
|
498
|
+
elif service_name == 'gdrive':
|
|
499
|
+
folder_id = service_info.get('folder_id', '')
|
|
500
|
+
self.logger.info(f" {status} Google Drive: folder {folder_id}{default_note}")
|
|
501
|
+
elif service_name == 'youtube':
|
|
502
|
+
self.logger.info(f" {status} YouTube: enabled")
|
|
503
|
+
elif service_name == 'discord':
|
|
504
|
+
self.logger.info(f" {status} Discord: notifications{default_note}")
|
|
505
|
+
|
|
506
|
+
return result
|
|
507
|
+
|
|
508
|
+
def get_job(self, job_id: str) -> Dict[str, Any]:
|
|
509
|
+
"""Get job status and details."""
|
|
510
|
+
response = self._request('GET', f'/api/jobs/{job_id}')
|
|
511
|
+
if response.status_code == 404:
|
|
512
|
+
raise ValueError(f"Job not found: {job_id}")
|
|
513
|
+
if response.status_code != 200:
|
|
514
|
+
raise RuntimeError(f"Error getting job: {response.text}")
|
|
515
|
+
return response.json()
|
|
516
|
+
|
|
517
|
+
def cancel_job(self, job_id: str, reason: str = "User requested") -> Dict[str, Any]:
|
|
518
|
+
"""Cancel a running job. Stops processing but keeps the job record."""
|
|
519
|
+
response = self._request(
|
|
520
|
+
'POST',
|
|
521
|
+
f'/api/jobs/{job_id}/cancel',
|
|
522
|
+
json={'reason': reason}
|
|
523
|
+
)
|
|
524
|
+
if response.status_code == 404:
|
|
525
|
+
raise ValueError(f"Job not found: {job_id}")
|
|
526
|
+
if response.status_code == 400:
|
|
527
|
+
try:
|
|
528
|
+
error_detail = response.json().get('detail', response.text)
|
|
529
|
+
except Exception:
|
|
530
|
+
error_detail = response.text
|
|
531
|
+
raise RuntimeError(f"Cannot cancel job: {error_detail}")
|
|
532
|
+
if response.status_code != 200:
|
|
533
|
+
raise RuntimeError(f"Error cancelling job: {response.text}")
|
|
534
|
+
return response.json()
|
|
535
|
+
|
|
536
|
+
def delete_job(self, job_id: str, delete_files: bool = True) -> Dict[str, Any]:
|
|
537
|
+
"""Delete a job and optionally its files. Permanent removal."""
|
|
538
|
+
response = self._request(
|
|
539
|
+
'DELETE',
|
|
540
|
+
f'/api/jobs/{job_id}',
|
|
541
|
+
params={'delete_files': str(delete_files).lower()}
|
|
542
|
+
)
|
|
543
|
+
if response.status_code == 404:
|
|
544
|
+
raise ValueError(f"Job not found: {job_id}")
|
|
545
|
+
if response.status_code != 200:
|
|
546
|
+
raise RuntimeError(f"Error deleting job: {response.text}")
|
|
547
|
+
return response.json()
|
|
548
|
+
|
|
549
|
+
def retry_job(self, job_id: str) -> Dict[str, Any]:
|
|
550
|
+
"""Retry a failed job from the last successful checkpoint."""
|
|
551
|
+
response = self._request(
|
|
552
|
+
'POST',
|
|
553
|
+
f'/api/jobs/{job_id}/retry'
|
|
554
|
+
)
|
|
555
|
+
if response.status_code == 404:
|
|
556
|
+
raise ValueError(f"Job not found: {job_id}")
|
|
557
|
+
if response.status_code == 400:
|
|
558
|
+
try:
|
|
559
|
+
error_detail = response.json().get('detail', response.text)
|
|
560
|
+
except Exception:
|
|
561
|
+
error_detail = response.text
|
|
562
|
+
raise RuntimeError(f"Cannot retry job: {error_detail}")
|
|
563
|
+
if response.status_code != 200:
|
|
564
|
+
raise RuntimeError(f"Error retrying job: {response.text}")
|
|
565
|
+
return response.json()
|
|
566
|
+
|
|
567
|
+
def list_jobs(
|
|
568
|
+
self,
|
|
569
|
+
status: Optional[str] = None,
|
|
570
|
+
environment: Optional[str] = None,
|
|
571
|
+
client_id: Optional[str] = None,
|
|
572
|
+
limit: int = 100
|
|
573
|
+
) -> list:
|
|
574
|
+
"""
|
|
575
|
+
List all jobs with optional filters.
|
|
576
|
+
|
|
577
|
+
Args:
|
|
578
|
+
status: Filter by job status
|
|
579
|
+
environment: Filter by request_metadata.environment
|
|
580
|
+
client_id: Filter by request_metadata.client_id
|
|
581
|
+
limit: Maximum number of jobs to return
|
|
582
|
+
"""
|
|
583
|
+
params = {'limit': limit}
|
|
584
|
+
if status:
|
|
585
|
+
params['status'] = status
|
|
586
|
+
if environment:
|
|
587
|
+
params['environment'] = environment
|
|
588
|
+
if client_id:
|
|
589
|
+
params['client_id'] = client_id
|
|
590
|
+
response = self._request('GET', '/api/jobs', params=params)
|
|
591
|
+
if response.status_code != 200:
|
|
592
|
+
raise RuntimeError(f"Error listing jobs: {response.text}")
|
|
593
|
+
return response.json()
|
|
594
|
+
|
|
595
|
+
def bulk_delete_jobs(
|
|
596
|
+
self,
|
|
597
|
+
environment: Optional[str] = None,
|
|
598
|
+
client_id: Optional[str] = None,
|
|
599
|
+
status: Optional[str] = None,
|
|
600
|
+
confirm: bool = False,
|
|
601
|
+
delete_files: bool = True
|
|
602
|
+
) -> Dict[str, Any]:
|
|
603
|
+
"""
|
|
604
|
+
Delete multiple jobs matching filter criteria.
|
|
605
|
+
|
|
606
|
+
Args:
|
|
607
|
+
environment: Delete jobs with this environment
|
|
608
|
+
client_id: Delete jobs from this client
|
|
609
|
+
status: Delete jobs with this status
|
|
610
|
+
confirm: Must be True to execute deletion
|
|
611
|
+
delete_files: Also delete GCS files
|
|
612
|
+
|
|
613
|
+
Returns:
|
|
614
|
+
Dict with deletion results or preview
|
|
615
|
+
"""
|
|
616
|
+
params = {
|
|
617
|
+
'confirm': str(confirm).lower(),
|
|
618
|
+
'delete_files': str(delete_files).lower(),
|
|
619
|
+
}
|
|
620
|
+
if environment:
|
|
621
|
+
params['environment'] = environment
|
|
622
|
+
if client_id:
|
|
623
|
+
params['client_id'] = client_id
|
|
624
|
+
if status:
|
|
625
|
+
params['status'] = status
|
|
626
|
+
|
|
627
|
+
response = self._request('DELETE', '/api/jobs', params=params)
|
|
628
|
+
if response.status_code == 400:
|
|
629
|
+
try:
|
|
630
|
+
error_detail = response.json().get('detail', response.text)
|
|
631
|
+
except Exception:
|
|
632
|
+
error_detail = response.text
|
|
633
|
+
raise RuntimeError(f"Error: {error_detail}")
|
|
634
|
+
if response.status_code != 200:
|
|
635
|
+
raise RuntimeError(f"Error bulk deleting jobs: {response.text}")
|
|
636
|
+
return response.json()
|
|
637
|
+
|
|
638
|
+
def get_instrumental_options(self, job_id: str) -> Dict[str, Any]:
|
|
639
|
+
"""Get instrumental options for selection."""
|
|
640
|
+
response = self._request('GET', f'/api/jobs/{job_id}/instrumental-options')
|
|
641
|
+
if response.status_code != 200:
|
|
642
|
+
try:
|
|
643
|
+
error_detail = response.json()
|
|
644
|
+
except Exception:
|
|
645
|
+
error_detail = response.text
|
|
646
|
+
raise RuntimeError(f"Error getting instrumental options: {error_detail}")
|
|
647
|
+
return response.json()
|
|
648
|
+
|
|
649
|
+
def select_instrumental(self, job_id: str, selection: str) -> Dict[str, Any]:
|
|
650
|
+
"""Submit instrumental selection."""
|
|
651
|
+
response = self._request(
|
|
652
|
+
'POST',
|
|
653
|
+
f'/api/jobs/{job_id}/select-instrumental',
|
|
654
|
+
json={'selection': selection}
|
|
655
|
+
)
|
|
656
|
+
if response.status_code != 200:
|
|
657
|
+
try:
|
|
658
|
+
error_detail = response.json()
|
|
659
|
+
except Exception:
|
|
660
|
+
error_detail = response.text
|
|
661
|
+
raise RuntimeError(f"Error selecting instrumental: {error_detail}")
|
|
662
|
+
return response.json()
|
|
663
|
+
|
|
664
|
+
def get_download_urls(self, job_id: str) -> Dict[str, Any]:
|
|
665
|
+
"""Get signed download URLs for all job output files."""
|
|
666
|
+
response = self._request('GET', f'/api/jobs/{job_id}/download-urls')
|
|
667
|
+
if response.status_code != 200:
|
|
668
|
+
try:
|
|
669
|
+
error_detail = response.json()
|
|
670
|
+
except Exception:
|
|
671
|
+
error_detail = response.text
|
|
672
|
+
raise RuntimeError(f"Error getting download URLs: {error_detail}")
|
|
673
|
+
return response.json()
|
|
674
|
+
|
|
675
|
+
def download_file_via_url(self, url: str, local_path: str) -> bool:
|
|
676
|
+
"""Download file from a URL via HTTP."""
|
|
677
|
+
try:
|
|
678
|
+
# Handle relative URLs by prepending service URL
|
|
679
|
+
if url.startswith('/'):
|
|
680
|
+
url = f"{self.config.service_url}{url}"
|
|
681
|
+
|
|
682
|
+
response = requests.get(url, stream=True, timeout=600)
|
|
683
|
+
if response.status_code != 200:
|
|
684
|
+
return False
|
|
685
|
+
|
|
686
|
+
# Ensure parent directory exists
|
|
687
|
+
os.makedirs(os.path.dirname(local_path), exist_ok=True)
|
|
688
|
+
|
|
689
|
+
with open(local_path, 'wb') as f:
|
|
690
|
+
for chunk in response.iter_content(chunk_size=8192):
|
|
691
|
+
f.write(chunk)
|
|
692
|
+
return True
|
|
693
|
+
except Exception:
|
|
694
|
+
return False
|
|
695
|
+
|
|
696
|
+
def download_file_via_gsutil(self, gcs_path: str, local_path: str) -> bool:
|
|
697
|
+
"""Download file from GCS using gsutil (fallback method)."""
|
|
698
|
+
try:
|
|
699
|
+
bucket_name = os.environ.get('KARAOKE_GEN_BUCKET', 'karaoke-gen-storage-nomadkaraoke')
|
|
700
|
+
gcs_uri = f"gs://{bucket_name}/{gcs_path}"
|
|
701
|
+
|
|
702
|
+
result = subprocess.run(
|
|
703
|
+
['gsutil', 'cp', gcs_uri, local_path],
|
|
704
|
+
capture_output=True,
|
|
705
|
+
text=True
|
|
706
|
+
)
|
|
707
|
+
return result.returncode == 0
|
|
708
|
+
except FileNotFoundError:
|
|
709
|
+
return False
|
|
710
|
+
|
|
711
|
+
def get_worker_logs(self, job_id: str, since_index: int = 0) -> Dict[str, Any]:
|
|
712
|
+
"""
|
|
713
|
+
Get worker logs for debugging.
|
|
714
|
+
|
|
715
|
+
Args:
|
|
716
|
+
job_id: Job ID
|
|
717
|
+
since_index: Return only logs after this index (for pagination/polling)
|
|
718
|
+
|
|
719
|
+
Returns:
|
|
720
|
+
{
|
|
721
|
+
"logs": [{"timestamp": "...", "level": "INFO", "worker": "audio", "message": "..."}],
|
|
722
|
+
"next_index": 42,
|
|
723
|
+
"total_logs": 42
|
|
724
|
+
}
|
|
725
|
+
"""
|
|
726
|
+
response = self._request(
|
|
727
|
+
'GET',
|
|
728
|
+
f'/api/jobs/{job_id}/logs',
|
|
729
|
+
params={'since_index': since_index}
|
|
730
|
+
)
|
|
731
|
+
if response.status_code != 200:
|
|
732
|
+
return {"logs": [], "next_index": since_index, "total_logs": 0}
|
|
733
|
+
return response.json()
|
|
734
|
+
|
|
735
|
+
def get_review_data(self, job_id: str) -> Dict[str, Any]:
|
|
736
|
+
"""Get the current review/correction data for a job."""
|
|
737
|
+
response = self._request('GET', f'/api/review/{job_id}/correction-data')
|
|
738
|
+
if response.status_code != 200:
|
|
739
|
+
try:
|
|
740
|
+
error_detail = response.json()
|
|
741
|
+
except Exception:
|
|
742
|
+
error_detail = response.text
|
|
743
|
+
raise RuntimeError(f"Error getting review data: {error_detail}")
|
|
744
|
+
return response.json()
|
|
745
|
+
|
|
746
|
+
def complete_review(self, job_id: str, updated_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
747
|
+
"""Submit the review completion with corrected data."""
|
|
748
|
+
response = self._request(
|
|
749
|
+
'POST',
|
|
750
|
+
f'/api/review/{job_id}/complete',
|
|
751
|
+
json=updated_data
|
|
752
|
+
)
|
|
753
|
+
if response.status_code != 200:
|
|
754
|
+
try:
|
|
755
|
+
error_detail = response.json()
|
|
756
|
+
except Exception:
|
|
757
|
+
error_detail = response.text
|
|
758
|
+
raise RuntimeError(f"Error completing review: {error_detail}")
|
|
759
|
+
return response.json()
|
|
760
|
+
|
|
761
|
+
|
|
762
|
+
class JobMonitor:
|
|
763
|
+
"""Monitor job progress with verbose logging."""
|
|
764
|
+
|
|
765
|
+
def __init__(self, client: RemoteKaraokeClient, config: Config, logger: logging.Logger):
|
|
766
|
+
self.client = client
|
|
767
|
+
self.config = config
|
|
768
|
+
self.logger = logger
|
|
769
|
+
self._review_opened = False
|
|
770
|
+
self._instrumental_prompted = False
|
|
771
|
+
self._last_timeline_index = 0
|
|
772
|
+
self._last_log_index = 0
|
|
773
|
+
self._show_worker_logs = True # Enable worker log display
|
|
774
|
+
self._polls_without_updates = 0 # Track polling activity for heartbeat
|
|
775
|
+
self._heartbeat_interval = 6 # Show heartbeat every N polls without updates (~30s with 5s poll)
|
|
776
|
+
|
|
777
|
+
# Status descriptions for user-friendly logging
|
|
778
|
+
STATUS_DESCRIPTIONS = {
|
|
779
|
+
'pending': 'Job queued, waiting to start',
|
|
780
|
+
'downloading': 'Downloading and preparing input files',
|
|
781
|
+
'separating_stage1': 'AI audio separation (stage 1 of 2)',
|
|
782
|
+
'separating_stage2': 'AI audio separation (stage 2 of 2)',
|
|
783
|
+
'audio_complete': 'Audio separation complete',
|
|
784
|
+
'transcribing': 'Transcribing lyrics from audio',
|
|
785
|
+
'correcting': 'Auto-correcting lyrics against reference sources',
|
|
786
|
+
'lyrics_complete': 'Lyrics processing complete',
|
|
787
|
+
'generating_screens': 'Creating title and end screens',
|
|
788
|
+
'applying_padding': 'Adding intro/outro padding',
|
|
789
|
+
'awaiting_review': 'Waiting for lyrics review',
|
|
790
|
+
'in_review': 'Lyrics review in progress',
|
|
791
|
+
'review_complete': 'Review complete, preparing video render',
|
|
792
|
+
'rendering_video': 'Rendering karaoke video with lyrics',
|
|
793
|
+
'awaiting_instrumental_selection': 'Waiting for instrumental selection',
|
|
794
|
+
'instrumental_selected': 'Instrumental selected, preparing final encoding',
|
|
795
|
+
'generating_video': 'Downloading files for final video encoding',
|
|
796
|
+
'encoding': 'Encoding final videos (15-20 min, 4 formats)',
|
|
797
|
+
'packaging': 'Creating CDG/TXT packages',
|
|
798
|
+
'uploading': 'Uploading to distribution services',
|
|
799
|
+
'notifying': 'Sending notifications',
|
|
800
|
+
'complete': 'All processing complete',
|
|
801
|
+
'failed': 'Job failed',
|
|
802
|
+
'cancelled': 'Job cancelled',
|
|
803
|
+
}
|
|
804
|
+
|
|
805
|
+
def _get_status_description(self, status: str) -> str:
|
|
806
|
+
"""Get user-friendly description for a status."""
|
|
807
|
+
return self.STATUS_DESCRIPTIONS.get(status, status)
|
|
808
|
+
|
|
809
|
+
def open_browser(self, url: str) -> None:
|
|
810
|
+
"""Open URL in the default browser."""
|
|
811
|
+
system = platform.system()
|
|
812
|
+
try:
|
|
813
|
+
if system == 'Darwin':
|
|
814
|
+
subprocess.run(['open', url], check=True)
|
|
815
|
+
elif system == 'Linux':
|
|
816
|
+
subprocess.run(['xdg-open', url], check=True, stderr=subprocess.DEVNULL)
|
|
817
|
+
else:
|
|
818
|
+
webbrowser.open(url)
|
|
819
|
+
except Exception:
|
|
820
|
+
self.logger.info(f"Please open in browser: {url}")
|
|
821
|
+
|
|
822
|
+
def open_review_ui(self, job_id: str) -> None:
|
|
823
|
+
"""Open the lyrics review UI in browser."""
|
|
824
|
+
# Build the review URL with the API endpoint
|
|
825
|
+
base_api_url = f"{self.config.service_url}/api/review/{job_id}"
|
|
826
|
+
encoded_api_url = urllib.parse.quote(base_api_url, safe='')
|
|
827
|
+
|
|
828
|
+
# Try to get audio hash from job data
|
|
829
|
+
try:
|
|
830
|
+
job_data = self.client.get_job(job_id)
|
|
831
|
+
audio_hash = job_data.get('audio_hash', '')
|
|
832
|
+
except Exception:
|
|
833
|
+
audio_hash = ''
|
|
834
|
+
|
|
835
|
+
url = f"{self.config.review_ui_url}/?baseApiUrl={encoded_api_url}"
|
|
836
|
+
if audio_hash:
|
|
837
|
+
url += f"&audioHash={audio_hash}"
|
|
838
|
+
|
|
839
|
+
self.logger.info(f"Opening lyrics review UI: {url}")
|
|
840
|
+
self.open_browser(url)
|
|
841
|
+
|
|
842
|
+
def handle_review(self, job_id: str) -> None:
|
|
843
|
+
"""Handle the lyrics review interaction."""
|
|
844
|
+
self.logger.info("=" * 60)
|
|
845
|
+
self.logger.info("LYRICS REVIEW NEEDED")
|
|
846
|
+
self.logger.info("=" * 60)
|
|
847
|
+
|
|
848
|
+
# In non-interactive mode, auto-accept the current corrections
|
|
849
|
+
if self.config.non_interactive:
|
|
850
|
+
self.logger.info("Non-interactive mode: Auto-accepting current corrections")
|
|
851
|
+
try:
|
|
852
|
+
# Get current review data
|
|
853
|
+
review_data = self.client.get_review_data(job_id)
|
|
854
|
+
self.logger.info("Retrieved current correction data")
|
|
855
|
+
|
|
856
|
+
# Submit as-is to complete the review
|
|
857
|
+
result = self.client.complete_review(job_id, review_data)
|
|
858
|
+
if result.get('status') == 'success':
|
|
859
|
+
self.logger.info("Review auto-completed successfully")
|
|
860
|
+
return
|
|
861
|
+
else:
|
|
862
|
+
self.logger.error(f"Failed to auto-complete review: {result}")
|
|
863
|
+
# In non-interactive mode, raise exception instead of falling back to manual
|
|
864
|
+
raise RuntimeError(f"Failed to auto-complete review: {result}")
|
|
865
|
+
except Exception as e:
|
|
866
|
+
self.logger.error(f"Error auto-completing review: {e}")
|
|
867
|
+
# In non-interactive mode, we can't fall back to manual - raise the error
|
|
868
|
+
raise RuntimeError(f"Non-interactive review failed: {e}")
|
|
869
|
+
|
|
870
|
+
# Interactive mode - open browser and wait
|
|
871
|
+
self.logger.info("The transcription is ready for review.")
|
|
872
|
+
self.logger.info("Please review and correct the lyrics in the browser.")
|
|
873
|
+
|
|
874
|
+
self.open_review_ui(job_id)
|
|
875
|
+
|
|
876
|
+
self.logger.info(f"Waiting for review completion (polling every {self.config.poll_interval}s)...")
|
|
877
|
+
|
|
878
|
+
# Poll until status changes from review states
|
|
879
|
+
while True:
|
|
880
|
+
try:
|
|
881
|
+
job_data = self.client.get_job(job_id)
|
|
882
|
+
current_status = job_data.get('status', 'unknown')
|
|
883
|
+
|
|
884
|
+
if current_status in ['awaiting_review', 'in_review']:
|
|
885
|
+
time.sleep(self.config.poll_interval)
|
|
886
|
+
else:
|
|
887
|
+
self.logger.info(f"Review completed, status: {current_status}")
|
|
888
|
+
return
|
|
889
|
+
except Exception as e:
|
|
890
|
+
self.logger.warning(f"Error checking review status: {e}")
|
|
891
|
+
time.sleep(self.config.poll_interval)
|
|
892
|
+
|
|
893
|
+
def handle_instrumental_selection(self, job_id: str) -> None:
|
|
894
|
+
"""Handle instrumental selection interaction."""
|
|
895
|
+
self.logger.info("=" * 60)
|
|
896
|
+
self.logger.info("INSTRUMENTAL SELECTION NEEDED")
|
|
897
|
+
self.logger.info("=" * 60)
|
|
898
|
+
|
|
899
|
+
# In non-interactive mode, auto-select clean instrumental
|
|
900
|
+
if self.config.non_interactive:
|
|
901
|
+
self.logger.info("Non-interactive mode: Auto-selecting clean instrumental")
|
|
902
|
+
selection = 'clean'
|
|
903
|
+
else:
|
|
904
|
+
self.logger.info("")
|
|
905
|
+
self.logger.info("Choose which instrumental track to use for the final video:")
|
|
906
|
+
self.logger.info("")
|
|
907
|
+
self.logger.info(" 1) Clean Instrumental (no backing vocals)")
|
|
908
|
+
self.logger.info(" Best for songs where you want ONLY the lead vocal removed")
|
|
909
|
+
self.logger.info("")
|
|
910
|
+
self.logger.info(" 2) Instrumental with Backing Vocals")
|
|
911
|
+
self.logger.info(" Best for songs where backing vocals add to the karaoke experience")
|
|
912
|
+
self.logger.info("")
|
|
913
|
+
|
|
914
|
+
selection = ""
|
|
915
|
+
while not selection:
|
|
916
|
+
try:
|
|
917
|
+
choice = input("Enter your choice (1 or 2): ").strip()
|
|
918
|
+
if choice == '1':
|
|
919
|
+
selection = 'clean'
|
|
920
|
+
elif choice == '2':
|
|
921
|
+
selection = 'with_backing'
|
|
922
|
+
else:
|
|
923
|
+
self.logger.error("Invalid choice. Please enter 1 or 2.")
|
|
924
|
+
except KeyboardInterrupt:
|
|
925
|
+
print()
|
|
926
|
+
raise
|
|
927
|
+
|
|
928
|
+
self.logger.info(f"Submitting selection: {selection}")
|
|
929
|
+
|
|
930
|
+
try:
|
|
931
|
+
result = self.client.select_instrumental(job_id, selection)
|
|
932
|
+
if result.get('status') == 'success':
|
|
933
|
+
self.logger.info(f"Selection submitted successfully: {selection}")
|
|
934
|
+
else:
|
|
935
|
+
self.logger.error(f"Error submitting selection: {result}")
|
|
936
|
+
except Exception as e:
|
|
937
|
+
self.logger.error(f"Error submitting selection: {e}")
|
|
938
|
+
|
|
939
|
+
def download_outputs(self, job_id: str, job_data: Dict[str, Any]) -> None:
|
|
940
|
+
"""
|
|
941
|
+
Download all output files for a completed job.
|
|
942
|
+
|
|
943
|
+
Downloads all files to match local CLI output structure:
|
|
944
|
+
- Final videos (4 formats)
|
|
945
|
+
- CDG/TXT ZIP packages (and extracts individual files)
|
|
946
|
+
- Lyrics files (.ass, .lrc, .txt)
|
|
947
|
+
- Audio stems with descriptive names
|
|
948
|
+
- Title/End screen files (.mov, .jpg, .png)
|
|
949
|
+
- With Vocals intermediate video
|
|
950
|
+
"""
|
|
951
|
+
artist = job_data.get('artist', 'Unknown')
|
|
952
|
+
title = job_data.get('title', 'Unknown')
|
|
953
|
+
brand_code = job_data.get('state_data', {}).get('brand_code')
|
|
954
|
+
|
|
955
|
+
# Use brand code in folder name if available
|
|
956
|
+
if brand_code:
|
|
957
|
+
folder_name = f"{brand_code} - {artist} - {title}"
|
|
958
|
+
else:
|
|
959
|
+
folder_name = f"{artist} - {title}"
|
|
960
|
+
|
|
961
|
+
# Sanitize folder name
|
|
962
|
+
folder_name = "".join(c for c in folder_name if c.isalnum() or c in " -_").strip()
|
|
963
|
+
|
|
964
|
+
output_dir = Path(self.config.output_dir) / folder_name
|
|
965
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
966
|
+
|
|
967
|
+
self.logger.info(f"Downloading output files to: {output_dir}")
|
|
968
|
+
|
|
969
|
+
# Get signed download URLs from the API
|
|
970
|
+
try:
|
|
971
|
+
download_data = self.client.get_download_urls(job_id)
|
|
972
|
+
download_urls = download_data.get('download_urls', {})
|
|
973
|
+
except Exception as e:
|
|
974
|
+
self.logger.warning(f"Could not get signed download URLs: {e}")
|
|
975
|
+
self.logger.warning("Falling back to gsutil (requires gcloud auth)")
|
|
976
|
+
download_urls = {}
|
|
977
|
+
|
|
978
|
+
file_urls = job_data.get('file_urls', {})
|
|
979
|
+
base_name = f"{artist} - {title}"
|
|
980
|
+
|
|
981
|
+
def download_file(category: str, key: str, local_path: Path, filename: str) -> bool:
|
|
982
|
+
"""Helper to download a file using signed URL or gsutil fallback."""
|
|
983
|
+
# Try signed URL first
|
|
984
|
+
signed_url = download_urls.get(category, {}).get(key)
|
|
985
|
+
if signed_url:
|
|
986
|
+
if self.client.download_file_via_url(signed_url, str(local_path)):
|
|
987
|
+
return True
|
|
988
|
+
|
|
989
|
+
# Fall back to gsutil
|
|
990
|
+
gcs_path = file_urls.get(category, {}).get(key)
|
|
991
|
+
if gcs_path:
|
|
992
|
+
return self.client.download_file_via_gsutil(gcs_path, str(local_path))
|
|
993
|
+
return False
|
|
994
|
+
|
|
995
|
+
# Download final videos
|
|
996
|
+
finals = file_urls.get('finals', {})
|
|
997
|
+
if finals:
|
|
998
|
+
self.logger.info("Downloading final videos...")
|
|
999
|
+
for key, blob_path in finals.items():
|
|
1000
|
+
if blob_path:
|
|
1001
|
+
# Use descriptive filename
|
|
1002
|
+
if 'lossless_4k_mp4' in key:
|
|
1003
|
+
filename = f"{base_name} (Final Karaoke Lossless 4k).mp4"
|
|
1004
|
+
elif 'lossless_4k_mkv' in key:
|
|
1005
|
+
filename = f"{base_name} (Final Karaoke Lossless 4k).mkv"
|
|
1006
|
+
elif 'lossy_4k' in key:
|
|
1007
|
+
filename = f"{base_name} (Final Karaoke Lossy 4k).mp4"
|
|
1008
|
+
elif 'lossy_720p' in key:
|
|
1009
|
+
filename = f"{base_name} (Final Karaoke Lossy 720p).mp4"
|
|
1010
|
+
else:
|
|
1011
|
+
filename = Path(blob_path).name
|
|
1012
|
+
|
|
1013
|
+
local_path = output_dir / filename
|
|
1014
|
+
self.logger.info(f" Downloading {filename}...")
|
|
1015
|
+
if download_file('finals', key, local_path, filename):
|
|
1016
|
+
self.logger.info(f" OK: {local_path}")
|
|
1017
|
+
else:
|
|
1018
|
+
self.logger.warning(f" FAILED: {filename}")
|
|
1019
|
+
|
|
1020
|
+
# Download CDG/TXT packages
|
|
1021
|
+
packages = file_urls.get('packages', {})
|
|
1022
|
+
if packages:
|
|
1023
|
+
self.logger.info("Downloading karaoke packages...")
|
|
1024
|
+
for key, blob_path in packages.items():
|
|
1025
|
+
if blob_path:
|
|
1026
|
+
if 'cdg' in key.lower():
|
|
1027
|
+
filename = f"{base_name} (Final Karaoke CDG).zip"
|
|
1028
|
+
elif 'txt' in key.lower():
|
|
1029
|
+
filename = f"{base_name} (Final Karaoke TXT).zip"
|
|
1030
|
+
else:
|
|
1031
|
+
filename = Path(blob_path).name
|
|
1032
|
+
|
|
1033
|
+
local_path = output_dir / filename
|
|
1034
|
+
self.logger.info(f" Downloading {filename}...")
|
|
1035
|
+
if download_file('packages', key, local_path, filename):
|
|
1036
|
+
self.logger.info(f" OK: {local_path}")
|
|
1037
|
+
|
|
1038
|
+
# Extract CDG files to match local CLI (individual .cdg and .mp3 at root)
|
|
1039
|
+
if 'cdg' in key.lower():
|
|
1040
|
+
self._extract_cdg_files(local_path, output_dir, base_name)
|
|
1041
|
+
else:
|
|
1042
|
+
self.logger.warning(f" FAILED: {filename}")
|
|
1043
|
+
|
|
1044
|
+
# Download lyrics files
|
|
1045
|
+
lyrics = file_urls.get('lyrics', {})
|
|
1046
|
+
if lyrics:
|
|
1047
|
+
self.logger.info("Downloading lyrics files...")
|
|
1048
|
+
for key in ['ass', 'lrc', 'corrected_txt']:
|
|
1049
|
+
blob_path = lyrics.get(key)
|
|
1050
|
+
if blob_path:
|
|
1051
|
+
ext = Path(blob_path).suffix
|
|
1052
|
+
filename = f"{base_name} (Karaoke){ext}"
|
|
1053
|
+
local_path = output_dir / filename
|
|
1054
|
+
self.logger.info(f" Downloading {filename}...")
|
|
1055
|
+
if download_file('lyrics', key, local_path, filename):
|
|
1056
|
+
self.logger.info(f" OK: {local_path}")
|
|
1057
|
+
else:
|
|
1058
|
+
self.logger.warning(f" FAILED: {filename}")
|
|
1059
|
+
|
|
1060
|
+
# Download title/end screen files (video + images)
|
|
1061
|
+
screens = file_urls.get('screens', {})
|
|
1062
|
+
if screens:
|
|
1063
|
+
self.logger.info("Downloading title/end screens...")
|
|
1064
|
+
screen_mappings = {
|
|
1065
|
+
'title': f"{base_name} (Title).mov",
|
|
1066
|
+
'title_jpg': f"{base_name} (Title).jpg",
|
|
1067
|
+
'title_png': f"{base_name} (Title).png",
|
|
1068
|
+
'end': f"{base_name} (End).mov",
|
|
1069
|
+
'end_jpg': f"{base_name} (End).jpg",
|
|
1070
|
+
'end_png': f"{base_name} (End).png",
|
|
1071
|
+
}
|
|
1072
|
+
for key, filename in screen_mappings.items():
|
|
1073
|
+
blob_path = screens.get(key)
|
|
1074
|
+
if blob_path:
|
|
1075
|
+
local_path = output_dir / filename
|
|
1076
|
+
self.logger.info(f" Downloading {filename}...")
|
|
1077
|
+
if download_file('screens', key, local_path, filename):
|
|
1078
|
+
self.logger.info(f" OK: {local_path}")
|
|
1079
|
+
else:
|
|
1080
|
+
self.logger.warning(f" FAILED: {filename}")
|
|
1081
|
+
|
|
1082
|
+
# Download with_vocals intermediate video
|
|
1083
|
+
videos = file_urls.get('videos', {})
|
|
1084
|
+
if videos:
|
|
1085
|
+
self.logger.info("Downloading intermediate videos...")
|
|
1086
|
+
if videos.get('with_vocals'):
|
|
1087
|
+
filename = f"{base_name} (With Vocals).mkv"
|
|
1088
|
+
local_path = output_dir / filename
|
|
1089
|
+
self.logger.info(f" Downloading {filename}...")
|
|
1090
|
+
if download_file('videos', 'with_vocals', local_path, filename):
|
|
1091
|
+
self.logger.info(f" OK: {local_path}")
|
|
1092
|
+
else:
|
|
1093
|
+
self.logger.warning(f" FAILED: {filename}")
|
|
1094
|
+
|
|
1095
|
+
# Download stems with descriptive names
|
|
1096
|
+
stems = file_urls.get('stems', {})
|
|
1097
|
+
if stems:
|
|
1098
|
+
stems_dir = output_dir / 'stems'
|
|
1099
|
+
stems_dir.mkdir(exist_ok=True)
|
|
1100
|
+
self.logger.info("Downloading audio stems...")
|
|
1101
|
+
|
|
1102
|
+
# Map backend stem names to local CLI naming convention
|
|
1103
|
+
stem_name_mappings = {
|
|
1104
|
+
'instrumental_clean': f"{base_name} (Instrumental model_bs_roformer_ep_317_sdr_12.9755.ckpt).flac",
|
|
1105
|
+
'instrumental_with_backing': f"{base_name} (Instrumental +BV mel_band_roformer_karaoke_aufr33_viperx_sdr_10.1956.ckpt).flac",
|
|
1106
|
+
'vocals_clean': f"{base_name} (Vocals model_bs_roformer_ep_317_sdr_12.9755.ckpt).flac",
|
|
1107
|
+
'lead_vocals': f"{base_name} (Lead Vocals mel_band_roformer_karaoke_aufr33_viperx_sdr_10.1956.ckpt).flac",
|
|
1108
|
+
'backing_vocals': f"{base_name} (Backing Vocals mel_band_roformer_karaoke_aufr33_viperx_sdr_10.1956.ckpt).flac",
|
|
1109
|
+
'bass': f"{base_name} (Bass htdemucs_6s.yaml).flac",
|
|
1110
|
+
'drums': f"{base_name} (Drums htdemucs_6s.yaml).flac",
|
|
1111
|
+
'guitar': f"{base_name} (Guitar htdemucs_6s.yaml).flac",
|
|
1112
|
+
'piano': f"{base_name} (Piano htdemucs_6s.yaml).flac",
|
|
1113
|
+
'other': f"{base_name} (Other htdemucs_6s.yaml).flac",
|
|
1114
|
+
'vocals': f"{base_name} (Vocals htdemucs_6s.yaml).flac",
|
|
1115
|
+
}
|
|
1116
|
+
|
|
1117
|
+
for key, blob_path in stems.items():
|
|
1118
|
+
if blob_path:
|
|
1119
|
+
# Use descriptive filename if available, otherwise use GCS filename
|
|
1120
|
+
filename = stem_name_mappings.get(key, Path(blob_path).name)
|
|
1121
|
+
local_path = stems_dir / filename
|
|
1122
|
+
self.logger.info(f" Downloading {filename}...")
|
|
1123
|
+
if download_file('stems', key, local_path, filename):
|
|
1124
|
+
self.logger.info(f" OK: {local_path}")
|
|
1125
|
+
else:
|
|
1126
|
+
self.logger.warning(f" FAILED: {filename}")
|
|
1127
|
+
|
|
1128
|
+
# Also copy instrumental files to root directory (matching local CLI)
|
|
1129
|
+
for src_key, dest_suffix in [
|
|
1130
|
+
('instrumental_clean', 'Instrumental model_bs_roformer_ep_317_sdr_12.9755.ckpt'),
|
|
1131
|
+
('instrumental_with_backing', 'Instrumental +BV mel_band_roformer_karaoke_aufr33_viperx_sdr_10.1956.ckpt'),
|
|
1132
|
+
]:
|
|
1133
|
+
if stems.get(src_key):
|
|
1134
|
+
stem_file = stems_dir / stem_name_mappings.get(src_key, '')
|
|
1135
|
+
if stem_file.exists():
|
|
1136
|
+
dest_file = output_dir / f"{base_name} ({dest_suffix}).flac"
|
|
1137
|
+
try:
|
|
1138
|
+
import shutil
|
|
1139
|
+
shutil.copy2(stem_file, dest_file)
|
|
1140
|
+
self.logger.info(f" Copied to root: {dest_file.name}")
|
|
1141
|
+
except Exception as e:
|
|
1142
|
+
self.logger.warning(f" Failed to copy {dest_file.name}: {e}")
|
|
1143
|
+
|
|
1144
|
+
self.logger.info("")
|
|
1145
|
+
self.logger.info(f"All files downloaded to: {output_dir}")
|
|
1146
|
+
|
|
1147
|
+
# Show summary
|
|
1148
|
+
state_data = job_data.get('state_data', {})
|
|
1149
|
+
if brand_code:
|
|
1150
|
+
self.logger.info(f"Brand Code: {brand_code}")
|
|
1151
|
+
|
|
1152
|
+
youtube_url = state_data.get('youtube_url')
|
|
1153
|
+
if youtube_url:
|
|
1154
|
+
self.logger.info(f"YouTube URL: {youtube_url}")
|
|
1155
|
+
|
|
1156
|
+
# List downloaded files with sizes
|
|
1157
|
+
self.logger.info("")
|
|
1158
|
+
self.logger.info("Downloaded files:")
|
|
1159
|
+
total_size = 0
|
|
1160
|
+
for file_path in sorted(output_dir.rglob('*')):
|
|
1161
|
+
if file_path.is_file():
|
|
1162
|
+
size = file_path.stat().st_size
|
|
1163
|
+
total_size += size
|
|
1164
|
+
if size > 1024 * 1024:
|
|
1165
|
+
size_str = f"{size / (1024 * 1024):.1f} MB"
|
|
1166
|
+
elif size > 1024:
|
|
1167
|
+
size_str = f"{size / 1024:.1f} KB"
|
|
1168
|
+
else:
|
|
1169
|
+
size_str = f"{size} B"
|
|
1170
|
+
rel_path = file_path.relative_to(output_dir)
|
|
1171
|
+
self.logger.info(f" {rel_path} ({size_str})")
|
|
1172
|
+
|
|
1173
|
+
if total_size > 1024 * 1024 * 1024:
|
|
1174
|
+
total_str = f"{total_size / (1024 * 1024 * 1024):.2f} GB"
|
|
1175
|
+
elif total_size > 1024 * 1024:
|
|
1176
|
+
total_str = f"{total_size / (1024 * 1024):.1f} MB"
|
|
1177
|
+
else:
|
|
1178
|
+
total_str = f"{total_size / 1024:.1f} KB"
|
|
1179
|
+
self.logger.info(f"Total: {total_str}")
|
|
1180
|
+
|
|
1181
|
+
def _extract_cdg_files(self, zip_path: Path, output_dir: Path, base_name: str) -> None:
|
|
1182
|
+
"""
|
|
1183
|
+
Extract individual .cdg and .mp3 files from CDG ZIP to match local CLI output.
|
|
1184
|
+
|
|
1185
|
+
Local CLI produces both:
|
|
1186
|
+
- Artist - Title (Final Karaoke CDG).zip (containing .cdg + .mp3)
|
|
1187
|
+
- Artist - Title (Karaoke).cdg (individual file at root)
|
|
1188
|
+
- Artist - Title (Karaoke).mp3 (individual file at root)
|
|
1189
|
+
|
|
1190
|
+
Args:
|
|
1191
|
+
zip_path: Path to the CDG ZIP file
|
|
1192
|
+
output_dir: Output directory for extracted files
|
|
1193
|
+
base_name: Base name for output files (Artist - Title)
|
|
1194
|
+
"""
|
|
1195
|
+
import zipfile
|
|
1196
|
+
|
|
1197
|
+
try:
|
|
1198
|
+
with zipfile.ZipFile(zip_path, 'r') as zf:
|
|
1199
|
+
for member in zf.namelist():
|
|
1200
|
+
ext = Path(member).suffix.lower()
|
|
1201
|
+
if ext in ['.cdg', '.mp3']:
|
|
1202
|
+
# Extract with correct naming
|
|
1203
|
+
filename = f"{base_name} (Karaoke){ext}"
|
|
1204
|
+
extract_path = output_dir / filename
|
|
1205
|
+
|
|
1206
|
+
# Read from zip and write to destination
|
|
1207
|
+
with zf.open(member) as src:
|
|
1208
|
+
with open(extract_path, 'wb') as dst:
|
|
1209
|
+
dst.write(src.read())
|
|
1210
|
+
|
|
1211
|
+
self.logger.info(f" Extracted: {filename}")
|
|
1212
|
+
except Exception as e:
|
|
1213
|
+
self.logger.warning(f" Failed to extract CDG files: {e}")
|
|
1214
|
+
|
|
1215
|
+
def log_timeline_updates(self, job_data: Dict[str, Any]) -> None:
|
|
1216
|
+
"""Log any new timeline events."""
|
|
1217
|
+
timeline = job_data.get('timeline', [])
|
|
1218
|
+
|
|
1219
|
+
# Log any new events since last check
|
|
1220
|
+
for i, event in enumerate(timeline):
|
|
1221
|
+
if i >= self._last_timeline_index:
|
|
1222
|
+
timestamp = event.get('timestamp', '')
|
|
1223
|
+
status = event.get('status', '')
|
|
1224
|
+
message = event.get('message', '')
|
|
1225
|
+
progress = event.get('progress', '')
|
|
1226
|
+
|
|
1227
|
+
# Format timestamp if present
|
|
1228
|
+
if timestamp:
|
|
1229
|
+
# Truncate to just time portion if it's a full ISO timestamp
|
|
1230
|
+
if 'T' in timestamp:
|
|
1231
|
+
timestamp = timestamp.split('T')[1][:8]
|
|
1232
|
+
|
|
1233
|
+
log_parts = []
|
|
1234
|
+
if timestamp:
|
|
1235
|
+
log_parts.append(f"[{timestamp}]")
|
|
1236
|
+
if status:
|
|
1237
|
+
log_parts.append(f"[{status}]")
|
|
1238
|
+
if progress:
|
|
1239
|
+
log_parts.append(f"[{progress}%]")
|
|
1240
|
+
if message:
|
|
1241
|
+
log_parts.append(message)
|
|
1242
|
+
|
|
1243
|
+
if log_parts:
|
|
1244
|
+
self.logger.info(" ".join(log_parts))
|
|
1245
|
+
|
|
1246
|
+
self._last_timeline_index = len(timeline)
|
|
1247
|
+
|
|
1248
|
+
def log_worker_logs(self, job_id: str) -> None:
|
|
1249
|
+
"""Fetch and display any new worker logs."""
|
|
1250
|
+
if not self._show_worker_logs:
|
|
1251
|
+
return
|
|
1252
|
+
|
|
1253
|
+
try:
|
|
1254
|
+
result = self.client.get_worker_logs(job_id, since_index=self._last_log_index)
|
|
1255
|
+
logs = result.get('logs', [])
|
|
1256
|
+
|
|
1257
|
+
for log_entry in logs:
|
|
1258
|
+
timestamp = log_entry.get('timestamp', '')
|
|
1259
|
+
level = log_entry.get('level', 'INFO')
|
|
1260
|
+
worker = log_entry.get('worker', 'worker')
|
|
1261
|
+
message = log_entry.get('message', '')
|
|
1262
|
+
|
|
1263
|
+
# Format timestamp (just time portion)
|
|
1264
|
+
if timestamp and 'T' in timestamp:
|
|
1265
|
+
timestamp = timestamp.split('T')[1][:8]
|
|
1266
|
+
|
|
1267
|
+
# Color-code by level (using ASCII codes for terminal)
|
|
1268
|
+
if level == 'ERROR':
|
|
1269
|
+
level_prefix = f"\033[91m{level}\033[0m" # Red
|
|
1270
|
+
elif level == 'WARNING':
|
|
1271
|
+
level_prefix = f"\033[93m{level}\033[0m" # Yellow
|
|
1272
|
+
else:
|
|
1273
|
+
level_prefix = level
|
|
1274
|
+
|
|
1275
|
+
# Format: [HH:MM:SS] [worker:level] message
|
|
1276
|
+
log_line = f" [{timestamp}] [{worker}:{level_prefix}] {message}"
|
|
1277
|
+
|
|
1278
|
+
# Use appropriate log level
|
|
1279
|
+
if level == 'ERROR':
|
|
1280
|
+
self.logger.error(log_line)
|
|
1281
|
+
elif level == 'WARNING':
|
|
1282
|
+
self.logger.warning(log_line)
|
|
1283
|
+
else:
|
|
1284
|
+
self.logger.info(log_line)
|
|
1285
|
+
|
|
1286
|
+
# Update index for next poll
|
|
1287
|
+
self._last_log_index = result.get('next_index', self._last_log_index)
|
|
1288
|
+
|
|
1289
|
+
except Exception as e:
|
|
1290
|
+
# Log the error but don't fail
|
|
1291
|
+
self.logger.debug(f"Error fetching worker logs: {e}")
|
|
1292
|
+
|
|
1293
|
+
def monitor(self, job_id: str) -> int:
|
|
1294
|
+
"""Monitor job progress until completion."""
|
|
1295
|
+
last_status = ""
|
|
1296
|
+
|
|
1297
|
+
self.logger.info(f"Monitoring job: {job_id}")
|
|
1298
|
+
self.logger.info(f"Service URL: {self.config.service_url}")
|
|
1299
|
+
self.logger.info(f"Polling every {self.config.poll_interval} seconds...")
|
|
1300
|
+
self.logger.info("")
|
|
1301
|
+
|
|
1302
|
+
while True:
|
|
1303
|
+
try:
|
|
1304
|
+
job_data = self.client.get_job(job_id)
|
|
1305
|
+
|
|
1306
|
+
status = job_data.get('status', 'unknown')
|
|
1307
|
+
artist = job_data.get('artist', '')
|
|
1308
|
+
title = job_data.get('title', '')
|
|
1309
|
+
|
|
1310
|
+
# Track whether we got any new updates this poll
|
|
1311
|
+
had_updates = False
|
|
1312
|
+
prev_timeline_index = self._last_timeline_index
|
|
1313
|
+
prev_log_index = self._last_log_index
|
|
1314
|
+
|
|
1315
|
+
# Log timeline updates (shows status changes and progress)
|
|
1316
|
+
self.log_timeline_updates(job_data)
|
|
1317
|
+
if self._last_timeline_index > prev_timeline_index:
|
|
1318
|
+
had_updates = True
|
|
1319
|
+
|
|
1320
|
+
# Log worker logs (shows detailed worker output for debugging)
|
|
1321
|
+
self.log_worker_logs(job_id)
|
|
1322
|
+
if self._last_log_index > prev_log_index:
|
|
1323
|
+
had_updates = True
|
|
1324
|
+
|
|
1325
|
+
# Log status changes with user-friendly descriptions
|
|
1326
|
+
if status != last_status:
|
|
1327
|
+
description = self._get_status_description(status)
|
|
1328
|
+
if last_status:
|
|
1329
|
+
self.logger.info(f"Status: {status} - {description}")
|
|
1330
|
+
else:
|
|
1331
|
+
self.logger.info(f"Current status: {status} - {description}")
|
|
1332
|
+
last_status = status
|
|
1333
|
+
had_updates = True
|
|
1334
|
+
|
|
1335
|
+
# Heartbeat: if no updates for a while, show we're still alive
|
|
1336
|
+
if had_updates:
|
|
1337
|
+
self._polls_without_updates = 0
|
|
1338
|
+
else:
|
|
1339
|
+
self._polls_without_updates += 1
|
|
1340
|
+
if self._polls_without_updates >= self._heartbeat_interval:
|
|
1341
|
+
description = self._get_status_description(status)
|
|
1342
|
+
self.logger.info(f" [Still processing: {description}]")
|
|
1343
|
+
self._polls_without_updates = 0
|
|
1344
|
+
|
|
1345
|
+
# Handle human interaction points
|
|
1346
|
+
if status in ['awaiting_review', 'in_review']:
|
|
1347
|
+
if not self._review_opened:
|
|
1348
|
+
self.logger.info("")
|
|
1349
|
+
self.handle_review(job_id)
|
|
1350
|
+
self._review_opened = True
|
|
1351
|
+
self._last_timeline_index = 0 # Reset to catch any events during review
|
|
1352
|
+
# Refresh auth token after potentially long review
|
|
1353
|
+
self.client.refresh_auth()
|
|
1354
|
+
|
|
1355
|
+
elif status == 'awaiting_instrumental_selection':
|
|
1356
|
+
if not self._instrumental_prompted:
|
|
1357
|
+
self.logger.info("")
|
|
1358
|
+
self.handle_instrumental_selection(job_id)
|
|
1359
|
+
self._instrumental_prompted = True
|
|
1360
|
+
|
|
1361
|
+
elif status == 'complete':
|
|
1362
|
+
self.logger.info("")
|
|
1363
|
+
self.logger.info("=" * 60)
|
|
1364
|
+
self.logger.info("JOB COMPLETE!")
|
|
1365
|
+
self.logger.info("=" * 60)
|
|
1366
|
+
self.logger.info(f"Track: {artist} - {title}")
|
|
1367
|
+
self.logger.info("")
|
|
1368
|
+
self.download_outputs(job_id, job_data)
|
|
1369
|
+
return 0
|
|
1370
|
+
|
|
1371
|
+
elif status in ['failed', 'error']:
|
|
1372
|
+
self.logger.info("")
|
|
1373
|
+
self.logger.error("=" * 60)
|
|
1374
|
+
self.logger.error("JOB FAILED")
|
|
1375
|
+
self.logger.error("=" * 60)
|
|
1376
|
+
error_message = job_data.get('error_message', 'Unknown error')
|
|
1377
|
+
self.logger.error(f"Error: {error_message}")
|
|
1378
|
+
error_details = job_data.get('error_details')
|
|
1379
|
+
if error_details:
|
|
1380
|
+
self.logger.error(f"Details: {json.dumps(error_details, indent=2)}")
|
|
1381
|
+
return 1
|
|
1382
|
+
|
|
1383
|
+
elif status == 'cancelled':
|
|
1384
|
+
self.logger.info("")
|
|
1385
|
+
self.logger.warning("Job was cancelled")
|
|
1386
|
+
return 1
|
|
1387
|
+
|
|
1388
|
+
time.sleep(self.config.poll_interval)
|
|
1389
|
+
|
|
1390
|
+
except KeyboardInterrupt:
|
|
1391
|
+
self.logger.info("")
|
|
1392
|
+
self.logger.warning(f"Monitoring interrupted. Job ID: {job_id}")
|
|
1393
|
+
self.logger.info(f"Resume with: karaoke-gen-remote --resume {job_id}")
|
|
1394
|
+
return 130
|
|
1395
|
+
except Exception as e:
|
|
1396
|
+
self.logger.warning(f"Error polling job status: {e}")
|
|
1397
|
+
time.sleep(self.config.poll_interval)
|
|
1398
|
+
|
|
1399
|
+
|
|
1400
|
+
def check_prerequisites(logger: logging.Logger) -> bool:
|
|
1401
|
+
"""Check that required tools are available."""
|
|
1402
|
+
# Check for gcloud
|
|
1403
|
+
try:
|
|
1404
|
+
subprocess.run(['gcloud', '--version'], capture_output=True, check=True)
|
|
1405
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
1406
|
+
logger.warning("gcloud CLI not found. Authentication may be limited.")
|
|
1407
|
+
|
|
1408
|
+
# Check for gsutil
|
|
1409
|
+
try:
|
|
1410
|
+
subprocess.run(['gsutil', 'version'], capture_output=True, check=True)
|
|
1411
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
1412
|
+
logger.warning("gsutil not found. File downloads may fail. Install with: pip install gsutil")
|
|
1413
|
+
|
|
1414
|
+
return True
|
|
1415
|
+
|
|
1416
|
+
|
|
1417
|
+
def get_auth_token(logger: logging.Logger) -> Optional[str]:
|
|
1418
|
+
"""Get authentication token from environment or gcloud."""
|
|
1419
|
+
# Check environment variable first
|
|
1420
|
+
token = os.environ.get('KARAOKE_GEN_AUTH_TOKEN')
|
|
1421
|
+
if token:
|
|
1422
|
+
return token
|
|
1423
|
+
|
|
1424
|
+
# Try gcloud
|
|
1425
|
+
try:
|
|
1426
|
+
result = subprocess.run(
|
|
1427
|
+
['gcloud', 'auth', 'print-identity-token'],
|
|
1428
|
+
capture_output=True,
|
|
1429
|
+
text=True,
|
|
1430
|
+
check=True
|
|
1431
|
+
)
|
|
1432
|
+
return result.stdout.strip()
|
|
1433
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
1434
|
+
return None
|
|
1435
|
+
|
|
1436
|
+
|
|
1437
|
+
def main():
|
|
1438
|
+
"""Main entry point for the remote CLI."""
|
|
1439
|
+
# Set up logging - same format as gen_cli.py
|
|
1440
|
+
logger = logging.getLogger(__name__)
|
|
1441
|
+
log_handler = logging.StreamHandler()
|
|
1442
|
+
log_formatter = logging.Formatter(
|
|
1443
|
+
fmt="%(asctime)s.%(msecs)03d - %(levelname)s - %(module)s - %(message)s",
|
|
1444
|
+
datefmt="%Y-%m-%d %H:%M:%S"
|
|
1445
|
+
)
|
|
1446
|
+
log_handler.setFormatter(log_formatter)
|
|
1447
|
+
logger.addHandler(log_handler)
|
|
1448
|
+
|
|
1449
|
+
# Use shared CLI parser
|
|
1450
|
+
parser = create_parser(prog="karaoke-gen-remote")
|
|
1451
|
+
args = parser.parse_args()
|
|
1452
|
+
|
|
1453
|
+
# Set log level
|
|
1454
|
+
log_level = getattr(logging, args.log_level.upper())
|
|
1455
|
+
logger.setLevel(log_level)
|
|
1456
|
+
|
|
1457
|
+
# Check for KARAOKE_GEN_URL - this is REQUIRED for remote mode
|
|
1458
|
+
if not args.service_url:
|
|
1459
|
+
logger.error("KARAOKE_GEN_URL environment variable is required for karaoke-gen-remote")
|
|
1460
|
+
logger.error("")
|
|
1461
|
+
logger.error("Please set it to your cloud backend URL:")
|
|
1462
|
+
logger.error(" export KARAOKE_GEN_URL=https://your-backend.run.app")
|
|
1463
|
+
logger.error("")
|
|
1464
|
+
logger.error("Or pass it via command line:")
|
|
1465
|
+
logger.error(" karaoke-gen-remote --service-url https://your-backend.run.app ...")
|
|
1466
|
+
return 1
|
|
1467
|
+
|
|
1468
|
+
# Check prerequisites
|
|
1469
|
+
check_prerequisites(logger)
|
|
1470
|
+
|
|
1471
|
+
# Get auth token from environment variable
|
|
1472
|
+
auth_token = get_auth_token(logger)
|
|
1473
|
+
|
|
1474
|
+
# Create config
|
|
1475
|
+
config = Config(
|
|
1476
|
+
service_url=args.service_url.rstrip('/'),
|
|
1477
|
+
review_ui_url=args.review_ui_url.rstrip('/'),
|
|
1478
|
+
poll_interval=args.poll_interval,
|
|
1479
|
+
output_dir=args.output_dir,
|
|
1480
|
+
auth_token=auth_token,
|
|
1481
|
+
non_interactive=getattr(args, 'yes', False), # -y / --yes flag
|
|
1482
|
+
# Job tracking metadata
|
|
1483
|
+
environment=getattr(args, 'environment', ''),
|
|
1484
|
+
client_id=getattr(args, 'client_id', ''),
|
|
1485
|
+
)
|
|
1486
|
+
|
|
1487
|
+
# Create client
|
|
1488
|
+
client = RemoteKaraokeClient(config, logger)
|
|
1489
|
+
monitor = JobMonitor(client, config, logger)
|
|
1490
|
+
|
|
1491
|
+
# Handle resume mode
|
|
1492
|
+
if args.resume:
|
|
1493
|
+
logger.info("=" * 60)
|
|
1494
|
+
logger.info("Karaoke Generator (Remote) - Resume Job")
|
|
1495
|
+
logger.info("=" * 60)
|
|
1496
|
+
logger.info(f"Job ID: {args.resume}")
|
|
1497
|
+
|
|
1498
|
+
try:
|
|
1499
|
+
# Verify job exists
|
|
1500
|
+
job_data = client.get_job(args.resume)
|
|
1501
|
+
artist = job_data.get('artist', 'Unknown')
|
|
1502
|
+
title = job_data.get('title', 'Unknown')
|
|
1503
|
+
status = job_data.get('status', 'unknown')
|
|
1504
|
+
|
|
1505
|
+
logger.info(f"Artist: {artist}")
|
|
1506
|
+
logger.info(f"Title: {title}")
|
|
1507
|
+
logger.info(f"Current status: {status}")
|
|
1508
|
+
logger.info("")
|
|
1509
|
+
|
|
1510
|
+
return monitor.monitor(args.resume)
|
|
1511
|
+
except ValueError as e:
|
|
1512
|
+
logger.error(str(e))
|
|
1513
|
+
return 1
|
|
1514
|
+
except Exception as e:
|
|
1515
|
+
logger.error(f"Error resuming job: {e}")
|
|
1516
|
+
return 1
|
|
1517
|
+
|
|
1518
|
+
# Handle bulk delete mode
|
|
1519
|
+
if getattr(args, 'bulk_delete', False):
|
|
1520
|
+
filter_env = getattr(args, 'filter_environment', None)
|
|
1521
|
+
filter_client = getattr(args, 'filter_client_id', None)
|
|
1522
|
+
|
|
1523
|
+
if not filter_env and not filter_client:
|
|
1524
|
+
logger.error("Bulk delete requires at least one filter: --filter-environment or --filter-client-id")
|
|
1525
|
+
return 1
|
|
1526
|
+
|
|
1527
|
+
logger.info("=" * 60)
|
|
1528
|
+
logger.info("Karaoke Generator (Remote) - Bulk Delete Jobs")
|
|
1529
|
+
logger.info("=" * 60)
|
|
1530
|
+
if filter_env:
|
|
1531
|
+
logger.info(f"Environment filter: {filter_env}")
|
|
1532
|
+
if filter_client:
|
|
1533
|
+
logger.info(f"Client ID filter: {filter_client}")
|
|
1534
|
+
logger.info("")
|
|
1535
|
+
|
|
1536
|
+
try:
|
|
1537
|
+
# First get preview
|
|
1538
|
+
result = client.bulk_delete_jobs(
|
|
1539
|
+
environment=filter_env,
|
|
1540
|
+
client_id=filter_client,
|
|
1541
|
+
confirm=False
|
|
1542
|
+
)
|
|
1543
|
+
|
|
1544
|
+
jobs_to_delete = result.get('jobs_to_delete', 0)
|
|
1545
|
+
sample_jobs = result.get('sample_jobs', [])
|
|
1546
|
+
|
|
1547
|
+
if jobs_to_delete == 0:
|
|
1548
|
+
logger.info("No jobs match the specified filters.")
|
|
1549
|
+
return 0
|
|
1550
|
+
|
|
1551
|
+
logger.info(f"Found {jobs_to_delete} jobs matching filters:")
|
|
1552
|
+
logger.info("")
|
|
1553
|
+
|
|
1554
|
+
# Show sample
|
|
1555
|
+
for job in sample_jobs:
|
|
1556
|
+
logger.info(f" {job.get('job_id', 'unknown')[:10]}: {job.get('artist', 'Unknown')} - {job.get('title', 'Unknown')} ({job.get('status', 'unknown')})")
|
|
1557
|
+
|
|
1558
|
+
if len(sample_jobs) < jobs_to_delete:
|
|
1559
|
+
logger.info(f" ... and {jobs_to_delete - len(sample_jobs)} more")
|
|
1560
|
+
|
|
1561
|
+
logger.info("")
|
|
1562
|
+
|
|
1563
|
+
# Confirm unless -y flag is set
|
|
1564
|
+
if not config.non_interactive:
|
|
1565
|
+
confirm = input(f"Are you sure you want to delete {jobs_to_delete} jobs and all their files? [y/N]: ")
|
|
1566
|
+
if confirm.lower() != 'y':
|
|
1567
|
+
logger.info("Bulk deletion cancelled.")
|
|
1568
|
+
return 0
|
|
1569
|
+
|
|
1570
|
+
# Execute deletion
|
|
1571
|
+
result = client.bulk_delete_jobs(
|
|
1572
|
+
environment=filter_env,
|
|
1573
|
+
client_id=filter_client,
|
|
1574
|
+
confirm=True
|
|
1575
|
+
)
|
|
1576
|
+
|
|
1577
|
+
logger.info(f"✓ Deleted {result.get('jobs_deleted', 0)} jobs")
|
|
1578
|
+
if result.get('files_deleted'):
|
|
1579
|
+
logger.info(f"✓ Cleaned up files from {result.get('files_deleted', 0)} jobs")
|
|
1580
|
+
return 0
|
|
1581
|
+
|
|
1582
|
+
except Exception as e:
|
|
1583
|
+
logger.error(f"Error bulk deleting jobs: {e}")
|
|
1584
|
+
return 1
|
|
1585
|
+
|
|
1586
|
+
# Handle list jobs mode
|
|
1587
|
+
if getattr(args, 'list_jobs', False):
|
|
1588
|
+
filter_env = getattr(args, 'filter_environment', None)
|
|
1589
|
+
filter_client = getattr(args, 'filter_client_id', None)
|
|
1590
|
+
|
|
1591
|
+
logger.info("=" * 60)
|
|
1592
|
+
logger.info("Karaoke Generator (Remote) - List Jobs")
|
|
1593
|
+
logger.info("=" * 60)
|
|
1594
|
+
if filter_env:
|
|
1595
|
+
logger.info(f"Environment filter: {filter_env}")
|
|
1596
|
+
if filter_client:
|
|
1597
|
+
logger.info(f"Client ID filter: {filter_client}")
|
|
1598
|
+
logger.info("")
|
|
1599
|
+
|
|
1600
|
+
try:
|
|
1601
|
+
jobs = client.list_jobs(
|
|
1602
|
+
environment=filter_env,
|
|
1603
|
+
client_id=filter_client,
|
|
1604
|
+
limit=100
|
|
1605
|
+
)
|
|
1606
|
+
|
|
1607
|
+
if not jobs:
|
|
1608
|
+
logger.info("No jobs found.")
|
|
1609
|
+
return 0
|
|
1610
|
+
|
|
1611
|
+
# Print header - include environment/client if available
|
|
1612
|
+
logger.info(f"{'JOB ID':<12} {'STATUS':<25} {'ENV':<8} {'ARTIST':<18} {'TITLE':<25}")
|
|
1613
|
+
logger.info("-" * 92)
|
|
1614
|
+
|
|
1615
|
+
# Print each job
|
|
1616
|
+
for job in jobs:
|
|
1617
|
+
# Use 'or' to handle None values (not just missing keys)
|
|
1618
|
+
job_id = (job.get('job_id') or 'unknown')[:10]
|
|
1619
|
+
status = (job.get('status') or 'unknown')[:23]
|
|
1620
|
+
artist = (job.get('artist') or 'Unknown')[:16]
|
|
1621
|
+
title = (job.get('title') or 'Unknown')[:23]
|
|
1622
|
+
# Get environment from request_metadata
|
|
1623
|
+
req_metadata = job.get('request_metadata') or {}
|
|
1624
|
+
env = (req_metadata.get('environment') or '-')[:6]
|
|
1625
|
+
logger.info(f"{job_id:<12} {status:<25} {env:<8} {artist:<18} {title:<25}")
|
|
1626
|
+
|
|
1627
|
+
logger.info("")
|
|
1628
|
+
logger.info(f"Total: {len(jobs)} jobs")
|
|
1629
|
+
logger.info("")
|
|
1630
|
+
logger.info("To retry a failed job: karaoke-gen-remote --retry <JOB_ID>")
|
|
1631
|
+
logger.info("To delete a job: karaoke-gen-remote --delete <JOB_ID>")
|
|
1632
|
+
logger.info("To bulk delete: karaoke-gen-remote --bulk-delete --filter-environment=test")
|
|
1633
|
+
logger.info("To cancel a job: karaoke-gen-remote --cancel <JOB_ID>")
|
|
1634
|
+
return 0
|
|
1635
|
+
|
|
1636
|
+
except Exception as e:
|
|
1637
|
+
logger.error(f"Error listing jobs: {e}")
|
|
1638
|
+
return 1
|
|
1639
|
+
|
|
1640
|
+
# Handle cancel job mode
|
|
1641
|
+
if args.cancel:
|
|
1642
|
+
logger.info("=" * 60)
|
|
1643
|
+
logger.info("Karaoke Generator (Remote) - Cancel Job")
|
|
1644
|
+
logger.info("=" * 60)
|
|
1645
|
+
logger.info(f"Job ID: {args.cancel}")
|
|
1646
|
+
|
|
1647
|
+
try:
|
|
1648
|
+
# Get job info first
|
|
1649
|
+
job_data = client.get_job(args.cancel)
|
|
1650
|
+
artist = job_data.get('artist', 'Unknown')
|
|
1651
|
+
title = job_data.get('title', 'Unknown')
|
|
1652
|
+
status = job_data.get('status', 'unknown')
|
|
1653
|
+
|
|
1654
|
+
logger.info(f"Artist: {artist}")
|
|
1655
|
+
logger.info(f"Title: {title}")
|
|
1656
|
+
logger.info(f"Current status: {status}")
|
|
1657
|
+
logger.info("")
|
|
1658
|
+
|
|
1659
|
+
# Cancel the job
|
|
1660
|
+
result = client.cancel_job(args.cancel)
|
|
1661
|
+
logger.info(f"✓ Job cancelled successfully")
|
|
1662
|
+
return 0
|
|
1663
|
+
|
|
1664
|
+
except ValueError as e:
|
|
1665
|
+
logger.error(str(e))
|
|
1666
|
+
return 1
|
|
1667
|
+
except RuntimeError as e:
|
|
1668
|
+
logger.error(str(e))
|
|
1669
|
+
return 1
|
|
1670
|
+
except Exception as e:
|
|
1671
|
+
logger.error(f"Error cancelling job: {e}")
|
|
1672
|
+
return 1
|
|
1673
|
+
|
|
1674
|
+
# Handle retry job mode
|
|
1675
|
+
if args.retry:
|
|
1676
|
+
logger.info("=" * 60)
|
|
1677
|
+
logger.info("Karaoke Generator (Remote) - Retry Failed Job")
|
|
1678
|
+
logger.info("=" * 60)
|
|
1679
|
+
logger.info(f"Job ID: {args.retry}")
|
|
1680
|
+
|
|
1681
|
+
try:
|
|
1682
|
+
# Get job info first
|
|
1683
|
+
job_data = client.get_job(args.retry)
|
|
1684
|
+
artist = job_data.get('artist', 'Unknown')
|
|
1685
|
+
title = job_data.get('title', 'Unknown')
|
|
1686
|
+
status = job_data.get('status', 'unknown')
|
|
1687
|
+
error_message = job_data.get('error_message', 'No error message')
|
|
1688
|
+
|
|
1689
|
+
logger.info(f"Artist: {artist}")
|
|
1690
|
+
logger.info(f"Title: {title}")
|
|
1691
|
+
logger.info(f"Current status: {status}")
|
|
1692
|
+
if status == 'failed':
|
|
1693
|
+
logger.info(f"Error: {error_message}")
|
|
1694
|
+
logger.info("")
|
|
1695
|
+
|
|
1696
|
+
if status != 'failed':
|
|
1697
|
+
logger.error(f"Only failed jobs can be retried (current status: {status})")
|
|
1698
|
+
return 1
|
|
1699
|
+
|
|
1700
|
+
# Retry the job
|
|
1701
|
+
result = client.retry_job(args.retry)
|
|
1702
|
+
retry_stage = result.get('retry_stage', 'unknown')
|
|
1703
|
+
logger.info(f"✓ Job retry started from stage: {retry_stage}")
|
|
1704
|
+
logger.info("")
|
|
1705
|
+
logger.info(f"Monitoring job progress...")
|
|
1706
|
+
logger.info("")
|
|
1707
|
+
|
|
1708
|
+
# Monitor the retried job
|
|
1709
|
+
return monitor.monitor(args.retry)
|
|
1710
|
+
|
|
1711
|
+
except ValueError as e:
|
|
1712
|
+
logger.error(str(e))
|
|
1713
|
+
return 1
|
|
1714
|
+
except RuntimeError as e:
|
|
1715
|
+
logger.error(str(e))
|
|
1716
|
+
return 1
|
|
1717
|
+
except Exception as e:
|
|
1718
|
+
logger.error(f"Error retrying job: {e}")
|
|
1719
|
+
return 1
|
|
1720
|
+
|
|
1721
|
+
# Handle delete job mode
|
|
1722
|
+
if args.delete:
|
|
1723
|
+
logger.info("=" * 60)
|
|
1724
|
+
logger.info("Karaoke Generator (Remote) - Delete Job")
|
|
1725
|
+
logger.info("=" * 60)
|
|
1726
|
+
logger.info(f"Job ID: {args.delete}")
|
|
1727
|
+
|
|
1728
|
+
try:
|
|
1729
|
+
# Get job info first
|
|
1730
|
+
job_data = client.get_job(args.delete)
|
|
1731
|
+
artist = job_data.get('artist', 'Unknown')
|
|
1732
|
+
title = job_data.get('title', 'Unknown')
|
|
1733
|
+
status = job_data.get('status', 'unknown')
|
|
1734
|
+
|
|
1735
|
+
logger.info(f"Artist: {artist}")
|
|
1736
|
+
logger.info(f"Title: {title}")
|
|
1737
|
+
logger.info(f"Status: {status}")
|
|
1738
|
+
logger.info("")
|
|
1739
|
+
|
|
1740
|
+
# Confirm deletion unless -y flag is set
|
|
1741
|
+
if not config.non_interactive:
|
|
1742
|
+
confirm = input("Are you sure you want to delete this job and all its files? [y/N]: ")
|
|
1743
|
+
if confirm.lower() != 'y':
|
|
1744
|
+
logger.info("Deletion cancelled.")
|
|
1745
|
+
return 0
|
|
1746
|
+
|
|
1747
|
+
# Delete the job
|
|
1748
|
+
result = client.delete_job(args.delete, delete_files=True)
|
|
1749
|
+
logger.info(f"✓ Job deleted successfully (including all files)")
|
|
1750
|
+
return 0
|
|
1751
|
+
|
|
1752
|
+
except ValueError as e:
|
|
1753
|
+
logger.error(str(e))
|
|
1754
|
+
return 1
|
|
1755
|
+
except Exception as e:
|
|
1756
|
+
logger.error(f"Error deleting job: {e}")
|
|
1757
|
+
return 1
|
|
1758
|
+
|
|
1759
|
+
# Warn about unsupported features
|
|
1760
|
+
if args.finalise_only:
|
|
1761
|
+
logger.error("--finalise-only is not supported in remote mode")
|
|
1762
|
+
return 1
|
|
1763
|
+
|
|
1764
|
+
if args.edit_lyrics:
|
|
1765
|
+
logger.error("--edit-lyrics is not yet supported in remote mode")
|
|
1766
|
+
return 1
|
|
1767
|
+
|
|
1768
|
+
if args.test_email_template:
|
|
1769
|
+
logger.error("--test_email_template is not supported in remote mode")
|
|
1770
|
+
return 1
|
|
1771
|
+
|
|
1772
|
+
# Warn about features that are not yet supported in remote mode
|
|
1773
|
+
ignored_features = []
|
|
1774
|
+
if args.prep_only:
|
|
1775
|
+
ignored_features.append("--prep-only")
|
|
1776
|
+
if args.skip_separation:
|
|
1777
|
+
ignored_features.append("--skip-separation")
|
|
1778
|
+
if args.skip_transcription:
|
|
1779
|
+
ignored_features.append("--skip-transcription")
|
|
1780
|
+
if args.lyrics_only:
|
|
1781
|
+
ignored_features.append("--lyrics-only")
|
|
1782
|
+
if args.background_video:
|
|
1783
|
+
ignored_features.append("--background_video")
|
|
1784
|
+
if getattr(args, 'auto_download', False):
|
|
1785
|
+
ignored_features.append("--auto-download (audio search not yet supported)")
|
|
1786
|
+
# These are now supported but server-side handling may be partial
|
|
1787
|
+
if args.organised_dir:
|
|
1788
|
+
ignored_features.append("--organised_dir (local-only)")
|
|
1789
|
+
# organised_dir_rclone_root is now supported in remote mode
|
|
1790
|
+
if args.public_share_dir:
|
|
1791
|
+
ignored_features.append("--public_share_dir (local-only)")
|
|
1792
|
+
if args.youtube_client_secrets_file:
|
|
1793
|
+
ignored_features.append("--youtube_client_secrets_file (not yet implemented)")
|
|
1794
|
+
if args.rclone_destination:
|
|
1795
|
+
ignored_features.append("--rclone_destination (local-only)")
|
|
1796
|
+
if args.email_template_file:
|
|
1797
|
+
ignored_features.append("--email_template_file (not yet implemented)")
|
|
1798
|
+
|
|
1799
|
+
if ignored_features:
|
|
1800
|
+
logger.warning(f"The following options are not yet supported in remote mode and will be ignored:")
|
|
1801
|
+
for feature in ignored_features:
|
|
1802
|
+
logger.warning(f" - {feature}")
|
|
1803
|
+
|
|
1804
|
+
# Handle new job submission - parse input arguments same as gen_cli
|
|
1805
|
+
input_media, artist, title, filename_pattern = None, None, None, None
|
|
1806
|
+
|
|
1807
|
+
if not args.args:
|
|
1808
|
+
parser.print_help()
|
|
1809
|
+
return 1
|
|
1810
|
+
|
|
1811
|
+
# Allow 3 forms of positional arguments:
|
|
1812
|
+
# 1. URL or Media File only
|
|
1813
|
+
# 2. Artist and Title only
|
|
1814
|
+
# 3. URL, Artist, and Title
|
|
1815
|
+
if args.args and (is_url(args.args[0]) or is_file(args.args[0])):
|
|
1816
|
+
input_media = args.args[0]
|
|
1817
|
+
if len(args.args) > 2:
|
|
1818
|
+
artist = args.args[1]
|
|
1819
|
+
title = args.args[2]
|
|
1820
|
+
elif len(args.args) > 1:
|
|
1821
|
+
artist = args.args[1]
|
|
1822
|
+
else:
|
|
1823
|
+
logger.error("Input media provided without Artist and Title")
|
|
1824
|
+
return 1
|
|
1825
|
+
elif os.path.isdir(args.args[0]):
|
|
1826
|
+
logger.error("Folder processing is not yet supported in remote mode")
|
|
1827
|
+
return 1
|
|
1828
|
+
elif len(args.args) > 1:
|
|
1829
|
+
artist = args.args[0]
|
|
1830
|
+
title = args.args[1]
|
|
1831
|
+
logger.error("Audio search (artist+title) is not yet supported in remote mode.")
|
|
1832
|
+
logger.error("Please provide a local audio file path instead.")
|
|
1833
|
+
logger.error("")
|
|
1834
|
+
logger.error("For local flacfetch search, use karaoke-gen instead:")
|
|
1835
|
+
logger.error(f" karaoke-gen \"{artist}\" \"{title}\"")
|
|
1836
|
+
return 1
|
|
1837
|
+
else:
|
|
1838
|
+
parser.print_help()
|
|
1839
|
+
return 1
|
|
1840
|
+
|
|
1841
|
+
# For now, remote mode only supports file uploads
|
|
1842
|
+
if not input_media or not os.path.isfile(input_media):
|
|
1843
|
+
logger.error("Remote mode currently only supports local file uploads")
|
|
1844
|
+
logger.error("Please provide a path to an audio file (mp3, wav, flac, m4a, ogg, aac)")
|
|
1845
|
+
return 1
|
|
1846
|
+
|
|
1847
|
+
# Validate artist and title are provided
|
|
1848
|
+
if not artist or not title:
|
|
1849
|
+
logger.error("Artist and Title are required")
|
|
1850
|
+
parser.print_help()
|
|
1851
|
+
return 1
|
|
1852
|
+
|
|
1853
|
+
logger.info("=" * 60)
|
|
1854
|
+
logger.info("Karaoke Generator (Remote) - Job Submission")
|
|
1855
|
+
logger.info("=" * 60)
|
|
1856
|
+
logger.info(f"File: {input_media}")
|
|
1857
|
+
logger.info(f"Artist: {artist}")
|
|
1858
|
+
logger.info(f"Title: {title}")
|
|
1859
|
+
if args.style_params_json:
|
|
1860
|
+
logger.info(f"Style: {args.style_params_json}")
|
|
1861
|
+
logger.info(f"CDG: {args.enable_cdg}, TXT: {args.enable_txt}")
|
|
1862
|
+
if args.brand_prefix:
|
|
1863
|
+
logger.info(f"Brand: {args.brand_prefix}")
|
|
1864
|
+
if getattr(args, 'enable_youtube_upload', False):
|
|
1865
|
+
logger.info(f"YouTube Upload: enabled (server-side)")
|
|
1866
|
+
# Native API distribution (preferred for remote CLI)
|
|
1867
|
+
if getattr(args, 'dropbox_path', None):
|
|
1868
|
+
logger.info(f"Dropbox (native): {args.dropbox_path}")
|
|
1869
|
+
if getattr(args, 'gdrive_folder_id', None):
|
|
1870
|
+
logger.info(f"Google Drive (native): {args.gdrive_folder_id}")
|
|
1871
|
+
# Legacy rclone distribution
|
|
1872
|
+
if args.organised_dir_rclone_root:
|
|
1873
|
+
logger.info(f"Dropbox (rclone): {args.organised_dir_rclone_root}")
|
|
1874
|
+
if args.discord_webhook_url:
|
|
1875
|
+
logger.info(f"Discord: enabled")
|
|
1876
|
+
# Lyrics configuration
|
|
1877
|
+
if getattr(args, 'lyrics_artist', None):
|
|
1878
|
+
logger.info(f"Lyrics Artist Override: {args.lyrics_artist}")
|
|
1879
|
+
if getattr(args, 'lyrics_title', None):
|
|
1880
|
+
logger.info(f"Lyrics Title Override: {args.lyrics_title}")
|
|
1881
|
+
if getattr(args, 'lyrics_file', None):
|
|
1882
|
+
logger.info(f"Lyrics File: {args.lyrics_file}")
|
|
1883
|
+
if getattr(args, 'subtitle_offset_ms', 0):
|
|
1884
|
+
logger.info(f"Subtitle Offset: {args.subtitle_offset_ms}ms")
|
|
1885
|
+
# Audio model configuration
|
|
1886
|
+
if getattr(args, 'clean_instrumental_model', None):
|
|
1887
|
+
logger.info(f"Clean Instrumental Model: {args.clean_instrumental_model}")
|
|
1888
|
+
if getattr(args, 'backing_vocals_models', None):
|
|
1889
|
+
logger.info(f"Backing Vocals Models: {args.backing_vocals_models}")
|
|
1890
|
+
if getattr(args, 'other_stems_models', None):
|
|
1891
|
+
logger.info(f"Other Stems Models: {args.other_stems_models}")
|
|
1892
|
+
if getattr(args, 'existing_instrumental', None):
|
|
1893
|
+
logger.info(f"Existing Instrumental: {args.existing_instrumental}")
|
|
1894
|
+
logger.info(f"Service URL: {config.service_url}")
|
|
1895
|
+
logger.info(f"Review UI: {config.review_ui_url}")
|
|
1896
|
+
if config.non_interactive:
|
|
1897
|
+
logger.info(f"Non-interactive mode: enabled (will auto-accept defaults)")
|
|
1898
|
+
logger.info("")
|
|
1899
|
+
|
|
1900
|
+
# Read youtube description from file if provided
|
|
1901
|
+
youtube_description = None
|
|
1902
|
+
if args.youtube_description_file and os.path.isfile(args.youtube_description_file):
|
|
1903
|
+
try:
|
|
1904
|
+
with open(args.youtube_description_file, 'r') as f:
|
|
1905
|
+
youtube_description = f.read()
|
|
1906
|
+
logger.info(f"Loaded YouTube description from: {args.youtube_description_file}")
|
|
1907
|
+
except Exception as e:
|
|
1908
|
+
logger.warning(f"Failed to read YouTube description file: {e}")
|
|
1909
|
+
|
|
1910
|
+
try:
|
|
1911
|
+
# Submit job with all options
|
|
1912
|
+
result = client.submit_job(
|
|
1913
|
+
filepath=input_media,
|
|
1914
|
+
artist=artist,
|
|
1915
|
+
title=title,
|
|
1916
|
+
style_params_path=args.style_params_json,
|
|
1917
|
+
enable_cdg=args.enable_cdg,
|
|
1918
|
+
enable_txt=args.enable_txt,
|
|
1919
|
+
brand_prefix=args.brand_prefix,
|
|
1920
|
+
discord_webhook_url=args.discord_webhook_url,
|
|
1921
|
+
youtube_description=youtube_description,
|
|
1922
|
+
organised_dir_rclone_root=args.organised_dir_rclone_root,
|
|
1923
|
+
enable_youtube_upload=getattr(args, 'enable_youtube_upload', False),
|
|
1924
|
+
# Native API distribution (preferred for remote CLI)
|
|
1925
|
+
dropbox_path=getattr(args, 'dropbox_path', None),
|
|
1926
|
+
gdrive_folder_id=getattr(args, 'gdrive_folder_id', None),
|
|
1927
|
+
# Lyrics configuration
|
|
1928
|
+
lyrics_artist=getattr(args, 'lyrics_artist', None),
|
|
1929
|
+
lyrics_title=getattr(args, 'lyrics_title', None),
|
|
1930
|
+
lyrics_file=getattr(args, 'lyrics_file', None),
|
|
1931
|
+
subtitle_offset_ms=getattr(args, 'subtitle_offset_ms', 0) or 0,
|
|
1932
|
+
# Audio separation model configuration
|
|
1933
|
+
clean_instrumental_model=getattr(args, 'clean_instrumental_model', None),
|
|
1934
|
+
backing_vocals_models=getattr(args, 'backing_vocals_models', None),
|
|
1935
|
+
other_stems_models=getattr(args, 'other_stems_models', None),
|
|
1936
|
+
# Existing instrumental (Batch 3)
|
|
1937
|
+
existing_instrumental=getattr(args, 'existing_instrumental', None),
|
|
1938
|
+
)
|
|
1939
|
+
job_id = result.get('job_id')
|
|
1940
|
+
style_assets = result.get('style_assets_uploaded', [])
|
|
1941
|
+
server_version = result.get('server_version', 'unknown')
|
|
1942
|
+
|
|
1943
|
+
logger.info(f"Job submitted successfully: {job_id}")
|
|
1944
|
+
logger.info(f"Server version: {server_version}")
|
|
1945
|
+
if style_assets:
|
|
1946
|
+
logger.info(f"Style assets uploaded: {', '.join(style_assets)}")
|
|
1947
|
+
logger.info("")
|
|
1948
|
+
|
|
1949
|
+
# Monitor job
|
|
1950
|
+
return monitor.monitor(job_id)
|
|
1951
|
+
|
|
1952
|
+
except FileNotFoundError as e:
|
|
1953
|
+
logger.error(str(e))
|
|
1954
|
+
return 1
|
|
1955
|
+
except ValueError as e:
|
|
1956
|
+
logger.error(str(e))
|
|
1957
|
+
return 1
|
|
1958
|
+
except Exception as e:
|
|
1959
|
+
logger.error(f"Error: {e}")
|
|
1960
|
+
logger.exception("Full error details:")
|
|
1961
|
+
return 1
|
|
1962
|
+
|
|
1963
|
+
|
|
1964
|
+
if __name__ == "__main__":
|
|
1965
|
+
sys.exit(main())
|