karaoke-gen 0.86.7__py3-none-any.whl → 0.96.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- backend/.coveragerc +20 -0
- backend/.gitignore +37 -0
- backend/Dockerfile +43 -0
- backend/Dockerfile.base +74 -0
- backend/README.md +242 -0
- backend/__init__.py +0 -0
- backend/api/__init__.py +0 -0
- backend/api/dependencies.py +457 -0
- backend/api/routes/__init__.py +0 -0
- backend/api/routes/admin.py +742 -0
- backend/api/routes/audio_search.py +903 -0
- backend/api/routes/auth.py +348 -0
- backend/api/routes/file_upload.py +2076 -0
- backend/api/routes/health.py +344 -0
- backend/api/routes/internal.py +435 -0
- backend/api/routes/jobs.py +1610 -0
- backend/api/routes/review.py +652 -0
- backend/api/routes/themes.py +162 -0
- backend/api/routes/users.py +1014 -0
- backend/config.py +172 -0
- backend/main.py +133 -0
- backend/middleware/__init__.py +5 -0
- backend/middleware/audit_logging.py +124 -0
- backend/models/__init__.py +0 -0
- backend/models/job.py +519 -0
- backend/models/requests.py +123 -0
- backend/models/theme.py +153 -0
- backend/models/user.py +254 -0
- backend/models/worker_log.py +164 -0
- backend/pyproject.toml +29 -0
- backend/quick-check.sh +93 -0
- backend/requirements.txt +29 -0
- backend/run_tests.sh +60 -0
- backend/services/__init__.py +0 -0
- backend/services/audio_analysis_service.py +243 -0
- backend/services/audio_editing_service.py +278 -0
- backend/services/audio_search_service.py +702 -0
- backend/services/auth_service.py +630 -0
- backend/services/credential_manager.py +792 -0
- backend/services/discord_service.py +172 -0
- backend/services/dropbox_service.py +301 -0
- backend/services/email_service.py +1093 -0
- backend/services/encoding_interface.py +454 -0
- backend/services/encoding_service.py +405 -0
- backend/services/firestore_service.py +512 -0
- backend/services/flacfetch_client.py +573 -0
- backend/services/gce_encoding/README.md +72 -0
- backend/services/gce_encoding/__init__.py +22 -0
- backend/services/gce_encoding/main.py +589 -0
- backend/services/gce_encoding/requirements.txt +16 -0
- backend/services/gdrive_service.py +356 -0
- backend/services/job_logging.py +258 -0
- backend/services/job_manager.py +842 -0
- backend/services/job_notification_service.py +271 -0
- backend/services/local_encoding_service.py +590 -0
- backend/services/local_preview_encoding_service.py +407 -0
- backend/services/lyrics_cache_service.py +216 -0
- backend/services/metrics.py +413 -0
- backend/services/packaging_service.py +287 -0
- backend/services/rclone_service.py +106 -0
- backend/services/storage_service.py +209 -0
- backend/services/stripe_service.py +275 -0
- backend/services/structured_logging.py +254 -0
- backend/services/template_service.py +330 -0
- backend/services/theme_service.py +469 -0
- backend/services/tracing.py +543 -0
- backend/services/user_service.py +721 -0
- backend/services/worker_service.py +558 -0
- backend/services/youtube_service.py +112 -0
- backend/services/youtube_upload_service.py +445 -0
- backend/tests/__init__.py +4 -0
- backend/tests/conftest.py +224 -0
- backend/tests/emulator/__init__.py +7 -0
- backend/tests/emulator/conftest.py +88 -0
- backend/tests/emulator/test_e2e_cli_backend.py +1053 -0
- backend/tests/emulator/test_emulator_integration.py +356 -0
- backend/tests/emulator/test_style_loading_direct.py +436 -0
- backend/tests/emulator/test_worker_logs_direct.py +229 -0
- backend/tests/emulator/test_worker_logs_subcollection.py +443 -0
- backend/tests/requirements-test.txt +10 -0
- backend/tests/requirements.txt +6 -0
- backend/tests/test_admin_email_endpoints.py +411 -0
- backend/tests/test_api_integration.py +460 -0
- backend/tests/test_api_routes.py +93 -0
- backend/tests/test_audio_analysis_service.py +294 -0
- backend/tests/test_audio_editing_service.py +386 -0
- backend/tests/test_audio_search.py +1398 -0
- backend/tests/test_audio_services.py +378 -0
- backend/tests/test_auth_firestore.py +231 -0
- backend/tests/test_config_extended.py +68 -0
- backend/tests/test_credential_manager.py +377 -0
- backend/tests/test_dependencies.py +54 -0
- backend/tests/test_discord_service.py +244 -0
- backend/tests/test_distribution_services.py +820 -0
- backend/tests/test_dropbox_service.py +472 -0
- backend/tests/test_email_service.py +492 -0
- backend/tests/test_emulator_integration.py +322 -0
- backend/tests/test_encoding_interface.py +412 -0
- backend/tests/test_file_upload.py +1739 -0
- backend/tests/test_flacfetch_client.py +632 -0
- backend/tests/test_gdrive_service.py +524 -0
- backend/tests/test_instrumental_api.py +431 -0
- backend/tests/test_internal_api.py +343 -0
- backend/tests/test_job_creation_regression.py +583 -0
- backend/tests/test_job_manager.py +339 -0
- backend/tests/test_job_manager_notifications.py +329 -0
- backend/tests/test_job_notification_service.py +443 -0
- backend/tests/test_jobs_api.py +273 -0
- backend/tests/test_local_encoding_service.py +423 -0
- backend/tests/test_local_preview_encoding_service.py +567 -0
- backend/tests/test_main.py +87 -0
- backend/tests/test_models.py +918 -0
- backend/tests/test_packaging_service.py +382 -0
- backend/tests/test_requests.py +201 -0
- backend/tests/test_routes_jobs.py +282 -0
- backend/tests/test_routes_review.py +337 -0
- backend/tests/test_services.py +556 -0
- backend/tests/test_services_extended.py +112 -0
- backend/tests/test_storage_service.py +448 -0
- backend/tests/test_style_upload.py +261 -0
- backend/tests/test_template_service.py +295 -0
- backend/tests/test_theme_service.py +516 -0
- backend/tests/test_unicode_sanitization.py +522 -0
- backend/tests/test_upload_api.py +256 -0
- backend/tests/test_validate.py +156 -0
- backend/tests/test_video_worker_orchestrator.py +847 -0
- backend/tests/test_worker_log_subcollection.py +509 -0
- backend/tests/test_worker_logging.py +365 -0
- backend/tests/test_workers.py +1116 -0
- backend/tests/test_workers_extended.py +178 -0
- backend/tests/test_youtube_service.py +247 -0
- backend/tests/test_youtube_upload_service.py +568 -0
- backend/validate.py +173 -0
- backend/version.py +27 -0
- backend/workers/README.md +597 -0
- backend/workers/__init__.py +11 -0
- backend/workers/audio_worker.py +618 -0
- backend/workers/lyrics_worker.py +683 -0
- backend/workers/render_video_worker.py +483 -0
- backend/workers/screens_worker.py +525 -0
- backend/workers/style_helper.py +198 -0
- backend/workers/video_worker.py +1277 -0
- backend/workers/video_worker_orchestrator.py +701 -0
- backend/workers/worker_logging.py +278 -0
- karaoke_gen/instrumental_review/static/index.html +7 -4
- karaoke_gen/karaoke_finalise/karaoke_finalise.py +6 -1
- karaoke_gen/style_loader.py +3 -1
- karaoke_gen/utils/__init__.py +163 -8
- karaoke_gen/video_background_processor.py +9 -4
- {karaoke_gen-0.86.7.dist-info → karaoke_gen-0.96.0.dist-info}/METADATA +2 -1
- {karaoke_gen-0.86.7.dist-info → karaoke_gen-0.96.0.dist-info}/RECORD +187 -42
- lyrics_transcriber/correction/agentic/providers/config.py +9 -5
- lyrics_transcriber/correction/agentic/providers/langchain_bridge.py +1 -51
- lyrics_transcriber/correction/corrector.py +192 -130
- lyrics_transcriber/correction/operations.py +24 -9
- lyrics_transcriber/frontend/package-lock.json +2 -2
- lyrics_transcriber/frontend/package.json +1 -1
- lyrics_transcriber/frontend/src/components/AIFeedbackModal.tsx +1 -1
- lyrics_transcriber/frontend/src/components/CorrectedWordWithActions.tsx +11 -7
- lyrics_transcriber/frontend/src/components/EditActionBar.tsx +31 -5
- lyrics_transcriber/frontend/src/components/EditModal.tsx +28 -10
- lyrics_transcriber/frontend/src/components/EditTimelineSection.tsx +123 -27
- lyrics_transcriber/frontend/src/components/EditWordList.tsx +112 -60
- lyrics_transcriber/frontend/src/components/Header.tsx +90 -76
- lyrics_transcriber/frontend/src/components/LyricsAnalyzer.tsx +53 -31
- lyrics_transcriber/frontend/src/components/LyricsSynchronizer/SyncControls.tsx +44 -13
- lyrics_transcriber/frontend/src/components/LyricsSynchronizer/TimelineCanvas.tsx +66 -50
- lyrics_transcriber/frontend/src/components/LyricsSynchronizer/index.tsx +124 -30
- lyrics_transcriber/frontend/src/components/ReferenceView.tsx +1 -1
- lyrics_transcriber/frontend/src/components/TimelineEditor.tsx +12 -5
- lyrics_transcriber/frontend/src/components/TimingOffsetModal.tsx +3 -3
- lyrics_transcriber/frontend/src/components/TranscriptionView.tsx +1 -1
- lyrics_transcriber/frontend/src/components/WordDivider.tsx +11 -7
- lyrics_transcriber/frontend/src/components/shared/components/Word.tsx +4 -2
- lyrics_transcriber/frontend/src/hooks/useManualSync.ts +103 -1
- lyrics_transcriber/frontend/src/theme.ts +42 -15
- lyrics_transcriber/frontend/tsconfig.tsbuildinfo +1 -1
- lyrics_transcriber/frontend/vite.config.js +5 -0
- lyrics_transcriber/frontend/web_assets/assets/{index-BECn1o8Q.js → index-BSMgOq4Z.js} +6959 -5782
- lyrics_transcriber/frontend/web_assets/assets/index-BSMgOq4Z.js.map +1 -0
- lyrics_transcriber/frontend/web_assets/index.html +6 -2
- lyrics_transcriber/frontend/web_assets/nomad-karaoke-logo.svg +5 -0
- lyrics_transcriber/output/generator.py +17 -3
- lyrics_transcriber/output/video.py +60 -95
- lyrics_transcriber/frontend/web_assets/assets/index-BECn1o8Q.js.map +0 -1
- {karaoke_gen-0.86.7.dist-info → karaoke_gen-0.96.0.dist-info}/WHEEL +0 -0
- {karaoke_gen-0.86.7.dist-info → karaoke_gen-0.96.0.dist-info}/entry_points.txt +0 -0
- {karaoke_gen-0.86.7.dist-info → karaoke_gen-0.96.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1277 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Video generation and finalization worker.
|
|
3
|
+
|
|
4
|
+
This worker has two code paths controlled by USE_NEW_ORCHESTRATOR:
|
|
5
|
+
|
|
6
|
+
1. NEW PATH (USE_NEW_ORCHESTRATOR=true):
|
|
7
|
+
Uses VideoWorkerOrchestrator which provides a unified pipeline for all
|
|
8
|
+
encoding backends (GCE or local). This ensures features like YouTube upload,
|
|
9
|
+
Discord notifications, and CDG/TXT packaging work regardless of encoding backend.
|
|
10
|
+
|
|
11
|
+
2. LEGACY PATH (USE_NEW_ORCHESTRATOR=false):
|
|
12
|
+
Uses KaraokeFinalise from the karaoke_gen package. This path has divergent
|
|
13
|
+
behavior between GCE and local encoding - GCE encoding bypasses many features.
|
|
14
|
+
|
|
15
|
+
The orchestrator-based approach is preferred as it:
|
|
16
|
+
- Eliminates GCE vs local code path divergence
|
|
17
|
+
- Ensures all features work consistently
|
|
18
|
+
- Provides better testability and maintainability
|
|
19
|
+
|
|
20
|
+
Observability:
|
|
21
|
+
- All operations wrapped in tracing spans for Cloud Trace visibility
|
|
22
|
+
- Logs include [job:ID] prefix for easy filtering in Cloud Logging
|
|
23
|
+
- Worker start/end timing logged with WORKER_START/WORKER_END markers
|
|
24
|
+
"""
|
|
25
|
+
import logging
|
|
26
|
+
import os
|
|
27
|
+
import shutil
|
|
28
|
+
import tempfile
|
|
29
|
+
import time
|
|
30
|
+
import json
|
|
31
|
+
from typing import Optional, Dict, Any
|
|
32
|
+
from pathlib import Path
|
|
33
|
+
|
|
34
|
+
from backend.models.job import JobStatus
|
|
35
|
+
from backend.services.job_manager import JobManager
|
|
36
|
+
from backend.services.storage_service import StorageService
|
|
37
|
+
from backend.services.rclone_service import get_rclone_service
|
|
38
|
+
from backend.services.youtube_service import get_youtube_service
|
|
39
|
+
from backend.services.encoding_service import get_encoding_service
|
|
40
|
+
from backend.config import get_settings
|
|
41
|
+
from backend.workers.style_helper import load_style_config
|
|
42
|
+
from backend.workers.worker_logging import create_job_logger, setup_job_logging, job_logging_context
|
|
43
|
+
from backend.services.tracing import job_span, add_span_event, add_span_attribute
|
|
44
|
+
|
|
45
|
+
# Import from karaoke_gen package - reuse existing implementation
|
|
46
|
+
from karaoke_gen.karaoke_finalise.karaoke_finalise import KaraokeFinalise
|
|
47
|
+
from karaoke_gen.utils import sanitize_filename
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
logger = logging.getLogger(__name__)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# Feature flag for new orchestrator-based pipeline
|
|
54
|
+
# Set to True to use the new unified pipeline that works with any encoding backend
|
|
55
|
+
# Set to False to use the legacy KaraokeFinalise-based pipeline
|
|
56
|
+
USE_NEW_ORCHESTRATOR = os.environ.get("USE_NEW_ORCHESTRATOR", "true").lower() == "true"
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
# Loggers to capture for video worker
|
|
60
|
+
# Include the full module path to properly capture KaraokeFinalise logs
|
|
61
|
+
VIDEO_WORKER_LOGGERS = [
|
|
62
|
+
"karaoke_gen.karaoke_finalise",
|
|
63
|
+
"karaoke_gen.karaoke_finalise.karaoke_finalise", # The actual logger name from __name__
|
|
64
|
+
]
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
async def _encode_via_gce(
|
|
68
|
+
job_id: str,
|
|
69
|
+
job,
|
|
70
|
+
job_manager: JobManager,
|
|
71
|
+
storage: StorageService,
|
|
72
|
+
temp_dir: str,
|
|
73
|
+
base_name: str,
|
|
74
|
+
job_log,
|
|
75
|
+
) -> Dict[str, Any]:
|
|
76
|
+
"""
|
|
77
|
+
Encode videos using the high-performance GCE encoding worker.
|
|
78
|
+
|
|
79
|
+
This offloads FFmpeg encoding to a dedicated C4 GCE instance with
|
|
80
|
+
Intel Granite Rapids 3.9 GHz CPU for 2-3x faster encoding.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
job_id: Job ID
|
|
84
|
+
job: Job object
|
|
85
|
+
job_manager: Job manager instance
|
|
86
|
+
storage: Storage service instance
|
|
87
|
+
temp_dir: Temporary directory for output files
|
|
88
|
+
base_name: Base name for output files (e.g., "Artist - Title")
|
|
89
|
+
job_log: Job-specific logger
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
Result dict with paths to encoded files
|
|
93
|
+
"""
|
|
94
|
+
settings = get_settings()
|
|
95
|
+
encoding_service = get_encoding_service()
|
|
96
|
+
|
|
97
|
+
job_log.info("Using GCE encoding worker for high-performance encoding")
|
|
98
|
+
logger.info(f"[job:{job_id}] Using GCE encoding worker")
|
|
99
|
+
|
|
100
|
+
# Construct GCS paths for the encoding worker
|
|
101
|
+
bucket_name = settings.gcs_bucket_name
|
|
102
|
+
input_gcs_path = f"gs://{bucket_name}/jobs/{job_id}/"
|
|
103
|
+
output_gcs_path = f"gs://{bucket_name}/jobs/{job_id}/encoded/"
|
|
104
|
+
|
|
105
|
+
# Determine instrumental selection for encoding config
|
|
106
|
+
instrumental_selection = job.state_data.get('instrumental_selection', 'clean')
|
|
107
|
+
existing_instrumental = getattr(job, 'existing_instrumental_gcs_path', None)
|
|
108
|
+
|
|
109
|
+
encoding_config = {
|
|
110
|
+
"formats": ["mp4_4k_lossless", "mp4_4k_lossy", "mp4_720p"],
|
|
111
|
+
"base_name": base_name,
|
|
112
|
+
"instrumental_selection": instrumental_selection,
|
|
113
|
+
"existing_instrumental": existing_instrumental,
|
|
114
|
+
"ffmpeg_threads": 8, # c4-standard-8 has 8 vCPUs
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
job_log.info(f"Submitting encoding job to GCE worker")
|
|
118
|
+
job_log.info(f" Input: {input_gcs_path}")
|
|
119
|
+
job_log.info(f" Output: {output_gcs_path}")
|
|
120
|
+
|
|
121
|
+
def progress_callback(progress: int):
|
|
122
|
+
# Update job progress (encoding is 75-95% of total)
|
|
123
|
+
scaled_progress = 75 + int(progress * 0.2) # Map 0-100 to 75-95
|
|
124
|
+
job_manager.transition_to_state(
|
|
125
|
+
job_id=job_id,
|
|
126
|
+
new_status=JobStatus.ENCODING,
|
|
127
|
+
progress=scaled_progress,
|
|
128
|
+
message=f"Encoding videos ({progress}%)"
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
# Submit and wait for encoding
|
|
133
|
+
with job_span("gce-encoding", job_id) as encoding_span:
|
|
134
|
+
encode_start = time.time()
|
|
135
|
+
add_span_event("gce_encoding_started")
|
|
136
|
+
|
|
137
|
+
result = await encoding_service.encode_videos(
|
|
138
|
+
job_id=job_id,
|
|
139
|
+
input_gcs_path=input_gcs_path,
|
|
140
|
+
output_gcs_path=output_gcs_path,
|
|
141
|
+
encoding_config=encoding_config,
|
|
142
|
+
progress_callback=progress_callback,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
encode_duration = time.time() - encode_start
|
|
146
|
+
encoding_span.set_attribute("duration_seconds", encode_duration)
|
|
147
|
+
add_span_event("gce_encoding_completed", {"duration_seconds": encode_duration})
|
|
148
|
+
|
|
149
|
+
job_log.info(f"GCE encoding complete in {encode_duration:.1f}s")
|
|
150
|
+
logger.info(f"[job:{job_id}] GCE encoding complete in {encode_duration:.1f}s")
|
|
151
|
+
|
|
152
|
+
# Download encoded files from GCS to temp_dir
|
|
153
|
+
output_files = result.get("output_files", [])
|
|
154
|
+
local_files = {}
|
|
155
|
+
|
|
156
|
+
for gcs_path in output_files:
|
|
157
|
+
filename = os.path.basename(gcs_path)
|
|
158
|
+
local_path = os.path.join(temp_dir, filename)
|
|
159
|
+
|
|
160
|
+
job_log.info(f"Downloading encoded file: {filename}")
|
|
161
|
+
storage.download_file(gcs_path, local_path)
|
|
162
|
+
|
|
163
|
+
# Map to result keys expected by _upload_results
|
|
164
|
+
# Files are named like "Artist - Title (Final Karaoke Lossless 4k).mp4"
|
|
165
|
+
filename_lower = filename.lower()
|
|
166
|
+
if "lossless 4k" in filename_lower:
|
|
167
|
+
if filename.endswith(".mkv"):
|
|
168
|
+
local_files["final_video_mkv"] = local_path
|
|
169
|
+
else:
|
|
170
|
+
local_files["final_video"] = local_path
|
|
171
|
+
elif "lossy 4k" in filename_lower:
|
|
172
|
+
local_files["final_video_lossy"] = local_path
|
|
173
|
+
elif "720p" in filename_lower:
|
|
174
|
+
local_files["final_video_720p"] = local_path
|
|
175
|
+
|
|
176
|
+
job_log.info(f"Downloaded {len(local_files)} encoded files")
|
|
177
|
+
|
|
178
|
+
return local_files
|
|
179
|
+
|
|
180
|
+
except Exception as e:
|
|
181
|
+
job_log.error(f"GCE encoding failed: {e}")
|
|
182
|
+
logger.error(f"[job:{job_id}] GCE encoding failed: {e}")
|
|
183
|
+
raise
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
async def generate_video_orchestrated(job_id: str) -> bool:
|
|
187
|
+
"""
|
|
188
|
+
Generate final karaoke videos using the new VideoWorkerOrchestrator.
|
|
189
|
+
|
|
190
|
+
This provides a unified pipeline that works with any encoding backend
|
|
191
|
+
(GCE or local), ensuring all features like YouTube upload, Discord
|
|
192
|
+
notifications, and CDG/TXT packaging work consistently.
|
|
193
|
+
|
|
194
|
+
Args:
|
|
195
|
+
job_id: Job ID to process
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
True if successful, False otherwise
|
|
199
|
+
"""
|
|
200
|
+
from backend.workers.video_worker_orchestrator import (
|
|
201
|
+
VideoWorkerOrchestrator,
|
|
202
|
+
create_orchestrator_config_from_job,
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
start_time = time.time()
|
|
206
|
+
job_manager = JobManager()
|
|
207
|
+
storage = StorageService()
|
|
208
|
+
|
|
209
|
+
# Create job logger for remote debugging
|
|
210
|
+
job_log = create_job_logger(job_id, "video")
|
|
211
|
+
|
|
212
|
+
# Log with structured markers for easy Cloud Logging queries
|
|
213
|
+
logger.info(f"[job:{job_id}] WORKER_START worker=video orchestrator=true")
|
|
214
|
+
|
|
215
|
+
# Set up log capture for KaraokeFinalise (still used by some services)
|
|
216
|
+
log_handler = setup_job_logging(job_id, "video", *VIDEO_WORKER_LOGGERS)
|
|
217
|
+
|
|
218
|
+
job = job_manager.get_job(job_id)
|
|
219
|
+
if not job:
|
|
220
|
+
logger.error(f"[job:{job_id}] Job not found")
|
|
221
|
+
return False
|
|
222
|
+
|
|
223
|
+
# Validate prerequisites
|
|
224
|
+
if not _validate_prerequisites(job):
|
|
225
|
+
logger.error(f"[job:{job_id}] Prerequisites not met for video generation")
|
|
226
|
+
return False
|
|
227
|
+
|
|
228
|
+
# Create temporary working directory
|
|
229
|
+
temp_dir = tempfile.mkdtemp(prefix=f"karaoke_video_{job_id}_")
|
|
230
|
+
original_cwd = os.getcwd()
|
|
231
|
+
|
|
232
|
+
# Set up rclone config if needed (for legacy Dropbox upload path)
|
|
233
|
+
rclone_service = None
|
|
234
|
+
if getattr(job, 'organised_dir_rclone_root', None):
|
|
235
|
+
rclone_service = get_rclone_service()
|
|
236
|
+
if rclone_service.setup_rclone_config():
|
|
237
|
+
job_log.info("Rclone config loaded for Dropbox upload")
|
|
238
|
+
|
|
239
|
+
# Load YouTube credentials if needed
|
|
240
|
+
youtube_credentials = None
|
|
241
|
+
if getattr(job, 'enable_youtube_upload', False):
|
|
242
|
+
youtube_service = get_youtube_service()
|
|
243
|
+
if youtube_service.is_configured:
|
|
244
|
+
youtube_credentials = youtube_service.get_credentials_dict()
|
|
245
|
+
job_log.info("YouTube credentials loaded for video upload")
|
|
246
|
+
else:
|
|
247
|
+
job_log.warning("YouTube credentials not available - upload will be skipped")
|
|
248
|
+
|
|
249
|
+
try:
|
|
250
|
+
# Wrap entire worker in a tracing span
|
|
251
|
+
with job_span("video-worker-orchestrated", job_id, {"artist": job.artist, "title": job.title}) as root_span:
|
|
252
|
+
with job_logging_context(job_id):
|
|
253
|
+
job_log.info(f"Starting orchestrated video generation for {job.artist} - {job.title}")
|
|
254
|
+
logger.info(f"[job:{job_id}] Starting orchestrated video generation")
|
|
255
|
+
|
|
256
|
+
# Transition to GENERATING_VIDEO state
|
|
257
|
+
job_manager.transition_to_state(
|
|
258
|
+
job_id=job_id,
|
|
259
|
+
new_status=JobStatus.GENERATING_VIDEO,
|
|
260
|
+
progress=70,
|
|
261
|
+
message="Preparing files for video generation"
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
# Download and set up files
|
|
265
|
+
# Sanitize artist/title to handle Unicode characters (curly quotes, em dashes, etc.)
|
|
266
|
+
safe_artist = sanitize_filename(job.artist) if job.artist else "Unknown"
|
|
267
|
+
safe_title = sanitize_filename(job.title) if job.title else "Unknown"
|
|
268
|
+
base_name = f"{safe_artist} - {safe_title}"
|
|
269
|
+
with job_span("download-files", job_id):
|
|
270
|
+
job_log.info("Downloading files from GCS...")
|
|
271
|
+
await _setup_working_directory(job_id, job, storage, temp_dir, base_name, job_log)
|
|
272
|
+
job_log.info("All files downloaded successfully")
|
|
273
|
+
|
|
274
|
+
# Load style config for CDG styles
|
|
275
|
+
style_config = await load_style_config(job, storage, temp_dir)
|
|
276
|
+
cdg_styles = style_config.get_cdg_styles()
|
|
277
|
+
|
|
278
|
+
# Change to working directory
|
|
279
|
+
os.chdir(temp_dir)
|
|
280
|
+
|
|
281
|
+
# Create orchestrator config from job
|
|
282
|
+
config = create_orchestrator_config_from_job(
|
|
283
|
+
job=job,
|
|
284
|
+
temp_dir=temp_dir,
|
|
285
|
+
youtube_credentials=youtube_credentials,
|
|
286
|
+
cdg_styles=cdg_styles,
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
# Run the orchestrated pipeline
|
|
290
|
+
orchestrator = VideoWorkerOrchestrator(
|
|
291
|
+
config=config,
|
|
292
|
+
job_manager=job_manager,
|
|
293
|
+
storage=storage,
|
|
294
|
+
job_logger=job_log,
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
with job_span("orchestrator-run", job_id):
|
|
298
|
+
result = await orchestrator.run()
|
|
299
|
+
|
|
300
|
+
if not result.success:
|
|
301
|
+
raise Exception(result.error_message or "Orchestrator failed")
|
|
302
|
+
|
|
303
|
+
# Prepare distribution directory for native uploads
|
|
304
|
+
with job_span("distribution", job_id):
|
|
305
|
+
await _handle_native_distribution(
|
|
306
|
+
job_id=job_id,
|
|
307
|
+
job=job,
|
|
308
|
+
job_log=job_log,
|
|
309
|
+
job_manager=job_manager,
|
|
310
|
+
temp_dir=temp_dir,
|
|
311
|
+
result={
|
|
312
|
+
'brand_code': result.brand_code,
|
|
313
|
+
'youtube_url': result.youtube_url,
|
|
314
|
+
'final_video': result.final_video,
|
|
315
|
+
'final_video_lossy': result.final_video_lossy,
|
|
316
|
+
'final_video_720p': result.final_video_720p,
|
|
317
|
+
'final_karaoke_cdg_zip': result.final_karaoke_cdg_zip,
|
|
318
|
+
'dropbox_link': result.dropbox_link,
|
|
319
|
+
'gdrive_files': result.gdrive_files,
|
|
320
|
+
},
|
|
321
|
+
storage=storage,
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
# Upload generated files to GCS
|
|
325
|
+
job_manager.transition_to_state(
|
|
326
|
+
job_id=job_id,
|
|
327
|
+
new_status=JobStatus.PACKAGING,
|
|
328
|
+
progress=95,
|
|
329
|
+
message="Uploading final files"
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
with job_span("upload-results", job_id):
|
|
333
|
+
await _upload_results(job_id, job_manager, storage, temp_dir, {
|
|
334
|
+
'final_video': result.final_video,
|
|
335
|
+
'final_video_mkv': result.final_video_mkv,
|
|
336
|
+
'final_video_lossy': result.final_video_lossy,
|
|
337
|
+
'final_video_720p': result.final_video_720p,
|
|
338
|
+
'final_karaoke_cdg_zip': result.final_karaoke_cdg_zip,
|
|
339
|
+
'final_karaoke_txt_zip': result.final_karaoke_txt_zip,
|
|
340
|
+
})
|
|
341
|
+
|
|
342
|
+
# Store result metadata in job BEFORE transitioning to COMPLETE
|
|
343
|
+
# This ensures youtube_url is available when completion email is sent
|
|
344
|
+
logger.info(f"[job:{job_id}] Video generation complete")
|
|
345
|
+
job_manager.update_job(job_id, {
|
|
346
|
+
'state_data': {
|
|
347
|
+
**job.state_data,
|
|
348
|
+
'brand_code': result.brand_code,
|
|
349
|
+
'youtube_url': result.youtube_url,
|
|
350
|
+
'dropbox_link': result.dropbox_link,
|
|
351
|
+
'gdrive_files': result.gdrive_files,
|
|
352
|
+
}
|
|
353
|
+
})
|
|
354
|
+
|
|
355
|
+
# Mark job as complete (triggers completion email with youtube_url now available)
|
|
356
|
+
job_manager.transition_to_state(
|
|
357
|
+
job_id=job_id,
|
|
358
|
+
new_status=JobStatus.COMPLETE,
|
|
359
|
+
progress=100,
|
|
360
|
+
message="Karaoke generation complete!"
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
duration = time.time() - start_time
|
|
364
|
+
root_span.set_attribute("duration_seconds", duration)
|
|
365
|
+
root_span.set_attribute("brand_code", result.brand_code or '')
|
|
366
|
+
logger.info(f"[job:{job_id}] WORKER_END worker=video orchestrator=true status=success duration={duration:.1f}s")
|
|
367
|
+
return True
|
|
368
|
+
|
|
369
|
+
except Exception as e:
|
|
370
|
+
duration = time.time() - start_time
|
|
371
|
+
logger.error(f"[job:{job_id}] WORKER_END worker=video orchestrator=true status=error duration={duration:.1f}s error={e}")
|
|
372
|
+
job_manager.mark_job_failed(
|
|
373
|
+
job_id=job_id,
|
|
374
|
+
error_message=f"Video generation failed: {str(e)}",
|
|
375
|
+
error_details={"stage": "video_generation", "error": str(e)}
|
|
376
|
+
)
|
|
377
|
+
return False
|
|
378
|
+
|
|
379
|
+
finally:
|
|
380
|
+
# Restore original working directory
|
|
381
|
+
os.chdir(original_cwd)
|
|
382
|
+
|
|
383
|
+
# Remove log handler
|
|
384
|
+
for logger_name in VIDEO_WORKER_LOGGERS:
|
|
385
|
+
try:
|
|
386
|
+
logging.getLogger(logger_name).removeHandler(log_handler)
|
|
387
|
+
except Exception:
|
|
388
|
+
pass
|
|
389
|
+
|
|
390
|
+
# Cleanup rclone config file
|
|
391
|
+
if rclone_service:
|
|
392
|
+
rclone_service.cleanup()
|
|
393
|
+
|
|
394
|
+
# Cleanup temporary directory
|
|
395
|
+
if os.path.exists(temp_dir):
|
|
396
|
+
shutil.rmtree(temp_dir)
|
|
397
|
+
logger.debug(f"Cleaned up temp directory: {temp_dir}")
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
async def generate_video(job_id: str) -> bool:
|
|
401
|
+
"""
|
|
402
|
+
Generate final karaoke videos.
|
|
403
|
+
|
|
404
|
+
Routes to either the new orchestrator-based pipeline or the legacy
|
|
405
|
+
KaraokeFinalise-based pipeline based on USE_NEW_ORCHESTRATOR flag.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
job_id: Job ID to process
|
|
409
|
+
|
|
410
|
+
Returns:
|
|
411
|
+
True if successful, False otherwise
|
|
412
|
+
"""
|
|
413
|
+
if USE_NEW_ORCHESTRATOR:
|
|
414
|
+
logger.info(f"[job:{job_id}] Using new orchestrator-based pipeline")
|
|
415
|
+
return await generate_video_orchestrated(job_id)
|
|
416
|
+
else:
|
|
417
|
+
logger.info(f"[job:{job_id}] Using legacy KaraokeFinalise pipeline")
|
|
418
|
+
return await generate_video_legacy(job_id)
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
async def generate_video_legacy(job_id: str) -> bool:
|
|
422
|
+
"""
|
|
423
|
+
Generate final karaoke videos using KaraokeFinalise (legacy path).
|
|
424
|
+
|
|
425
|
+
This is the original implementation that has divergent behavior between
|
|
426
|
+
GCE and local encoding paths. Kept for rollback purposes.
|
|
427
|
+
|
|
428
|
+
Args:
|
|
429
|
+
job_id: Job ID to process
|
|
430
|
+
|
|
431
|
+
Returns:
|
|
432
|
+
True if successful, False otherwise
|
|
433
|
+
"""
|
|
434
|
+
start_time = time.time()
|
|
435
|
+
job_manager = JobManager()
|
|
436
|
+
storage = StorageService()
|
|
437
|
+
settings = get_settings()
|
|
438
|
+
|
|
439
|
+
# Create job logger for remote debugging
|
|
440
|
+
job_log = create_job_logger(job_id, "video")
|
|
441
|
+
|
|
442
|
+
# Log with structured markers for easy Cloud Logging queries
|
|
443
|
+
logger.info(f"[job:{job_id}] WORKER_START worker=video")
|
|
444
|
+
|
|
445
|
+
# Set up log capture for KaraokeFinalise
|
|
446
|
+
log_handler = setup_job_logging(job_id, "video", *VIDEO_WORKER_LOGGERS)
|
|
447
|
+
|
|
448
|
+
job = job_manager.get_job(job_id)
|
|
449
|
+
if not job:
|
|
450
|
+
logger.error(f"[job:{job_id}] Job not found")
|
|
451
|
+
return False
|
|
452
|
+
|
|
453
|
+
# Validate prerequisites
|
|
454
|
+
if not _validate_prerequisites(job):
|
|
455
|
+
logger.error(f"[job:{job_id}] Prerequisites not met for video generation")
|
|
456
|
+
return False
|
|
457
|
+
|
|
458
|
+
# Create temporary working directory
|
|
459
|
+
temp_dir = tempfile.mkdtemp(prefix=f"karaoke_video_{job_id}_")
|
|
460
|
+
original_cwd = os.getcwd()
|
|
461
|
+
|
|
462
|
+
# Set up rclone config if needed for Dropbox upload
|
|
463
|
+
rclone_service = None
|
|
464
|
+
if getattr(job, 'organised_dir_rclone_root', None):
|
|
465
|
+
rclone_service = get_rclone_service()
|
|
466
|
+
if rclone_service.setup_rclone_config():
|
|
467
|
+
job_log.info("Rclone config loaded for Dropbox upload")
|
|
468
|
+
else:
|
|
469
|
+
job_log.warning("Rclone config not available - Dropbox upload will be skipped")
|
|
470
|
+
|
|
471
|
+
# Load YouTube credentials if needed
|
|
472
|
+
youtube_credentials = None
|
|
473
|
+
if getattr(job, 'enable_youtube_upload', False):
|
|
474
|
+
youtube_service = get_youtube_service()
|
|
475
|
+
if youtube_service.is_configured:
|
|
476
|
+
youtube_credentials = youtube_service.get_credentials_dict()
|
|
477
|
+
job_log.info("YouTube credentials loaded for video upload")
|
|
478
|
+
else:
|
|
479
|
+
job_log.warning("YouTube credentials not available - upload will be skipped")
|
|
480
|
+
|
|
481
|
+
try:
|
|
482
|
+
# Wrap entire worker in a tracing span
|
|
483
|
+
with job_span("video-worker", job_id, {"artist": job.artist, "title": job.title}) as root_span:
|
|
484
|
+
# Use job_logging_context for proper log isolation when multiple jobs run concurrently
|
|
485
|
+
# This ensures logs from third-party libraries (karaoke_gen.karaoke_finalise) are only
|
|
486
|
+
# captured by this job's handler, not handlers from other concurrent jobs
|
|
487
|
+
with job_logging_context(job_id):
|
|
488
|
+
job_log.info(f"Starting video finalization for {job.artist} - {job.title}")
|
|
489
|
+
logger.info(f"[job:{job_id}] Starting video generation for {job.artist} - {job.title}")
|
|
490
|
+
|
|
491
|
+
# Transition to GENERATING_VIDEO state
|
|
492
|
+
job_manager.transition_to_state(
|
|
493
|
+
job_id=job_id,
|
|
494
|
+
new_status=JobStatus.GENERATING_VIDEO,
|
|
495
|
+
progress=70,
|
|
496
|
+
message="Preparing files for video generation"
|
|
497
|
+
)
|
|
498
|
+
|
|
499
|
+
# Download and set up files in the format KaraokeFinalise expects
|
|
500
|
+
# Sanitize artist/title to handle Unicode characters (curly quotes, em dashes, etc.)
|
|
501
|
+
safe_artist = sanitize_filename(job.artist) if job.artist else "Unknown"
|
|
502
|
+
safe_title = sanitize_filename(job.title) if job.title else "Unknown"
|
|
503
|
+
base_name = f"{safe_artist} - {safe_title}"
|
|
504
|
+
with job_span("download-files", job_id):
|
|
505
|
+
job_log.info("Downloading files from GCS (this may take a few minutes for large files)...")
|
|
506
|
+
await _setup_working_directory(job_id, job, storage, temp_dir, base_name, job_log)
|
|
507
|
+
job_log.info("All files downloaded successfully")
|
|
508
|
+
|
|
509
|
+
# Load style config for CDG styles
|
|
510
|
+
job_log.info("Loading CDG style configuration...")
|
|
511
|
+
style_config = await load_style_config(job, storage, temp_dir)
|
|
512
|
+
cdg_styles = style_config.get_cdg_styles()
|
|
513
|
+
if cdg_styles:
|
|
514
|
+
job_log.info("CDG styles loaded from custom configuration")
|
|
515
|
+
else:
|
|
516
|
+
job_log.info("Using default CDG styles")
|
|
517
|
+
|
|
518
|
+
# Change to working directory (KaraokeFinalise works in cwd)
|
|
519
|
+
os.chdir(temp_dir)
|
|
520
|
+
|
|
521
|
+
# Get the selected instrumental file path
|
|
522
|
+
# Batch 3: If user provided existing instrumental, use that; otherwise use AI-separated
|
|
523
|
+
instrumental_selection = job.state_data['instrumental_selection']
|
|
524
|
+
existing_instrumental_path = getattr(job, 'existing_instrumental_gcs_path', None)
|
|
525
|
+
|
|
526
|
+
if existing_instrumental_path:
|
|
527
|
+
# User provided existing instrumental
|
|
528
|
+
ext = Path(existing_instrumental_path).suffix.lower()
|
|
529
|
+
instrumental_file = os.path.join(temp_dir, f"{base_name} (Instrumental User){ext}")
|
|
530
|
+
instrumental_source = "user-provided"
|
|
531
|
+
else:
|
|
532
|
+
# Use AI-separated instrumental
|
|
533
|
+
instrumental_suffix = "Clean" if instrumental_selection == 'clean' else "Backing"
|
|
534
|
+
instrumental_file = os.path.join(temp_dir, f"{base_name} (Instrumental {instrumental_suffix}).flac")
|
|
535
|
+
instrumental_source = instrumental_selection
|
|
536
|
+
|
|
537
|
+
# Transition to encoding state
|
|
538
|
+
job_manager.transition_to_state(
|
|
539
|
+
job_id=job_id,
|
|
540
|
+
new_status=JobStatus.ENCODING,
|
|
541
|
+
progress=75,
|
|
542
|
+
message="Encoding videos (15-20 min)"
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
# Get countdown padding info from lyrics metadata
|
|
546
|
+
# This ensures instrumental is padded to match vocals if countdown was added
|
|
547
|
+
lyrics_metadata = job.state_data.get('lyrics_metadata', {})
|
|
548
|
+
countdown_padding_seconds = None
|
|
549
|
+
if lyrics_metadata.get('has_countdown_padding'):
|
|
550
|
+
countdown_padding_seconds = lyrics_metadata.get('countdown_padding_seconds', 3.0)
|
|
551
|
+
job_log.info(f"Countdown padding detected: {countdown_padding_seconds}s - instrumental will be padded if needed")
|
|
552
|
+
|
|
553
|
+
# Check if GCE encoding is enabled for high-performance encoding
|
|
554
|
+
encoding_service = get_encoding_service()
|
|
555
|
+
use_gce_encoding = encoding_service.is_enabled
|
|
556
|
+
|
|
557
|
+
if use_gce_encoding:
|
|
558
|
+
# ============ GCE ENCODING PATH ============
|
|
559
|
+
# Use dedicated C4 GCE instance for 2-3x faster FFmpeg encoding
|
|
560
|
+
job_log.info("GCE encoding enabled - using high-performance encoding worker")
|
|
561
|
+
job_log.info(f" instrumental source: {instrumental_source}")
|
|
562
|
+
|
|
563
|
+
result = await _encode_via_gce(
|
|
564
|
+
job_id=job_id,
|
|
565
|
+
job=job,
|
|
566
|
+
job_manager=job_manager,
|
|
567
|
+
storage=storage,
|
|
568
|
+
temp_dir=temp_dir,
|
|
569
|
+
base_name=base_name,
|
|
570
|
+
job_log=job_log,
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
# GCE encoding doesn't generate brand_code, CDG/TXT, or Discord notifications
|
|
574
|
+
# These will be handled by distribution if needed
|
|
575
|
+
job_log.info("GCE encoding complete - encoded files downloaded")
|
|
576
|
+
|
|
577
|
+
else:
|
|
578
|
+
# ============ STANDARD ENCODING PATH ============
|
|
579
|
+
# Use KaraokeFinalise for encoding (runs on Cloud Run)
|
|
580
|
+
|
|
581
|
+
# Log finalization parameters
|
|
582
|
+
job_log.info("Using KaraokeFinalise for encoding (standard path)")
|
|
583
|
+
job_log.info(f" enable_cdg: {getattr(job, 'enable_cdg', False)}")
|
|
584
|
+
job_log.info(f" enable_txt: {getattr(job, 'enable_txt', False)}")
|
|
585
|
+
job_log.info(f" brand_prefix: {getattr(job, 'brand_prefix', None)}")
|
|
586
|
+
job_log.info(f" discord_webhook: {'configured' if getattr(job, 'discord_webhook_url', None) else 'not configured'}")
|
|
587
|
+
job_log.info(f" instrumental source: {instrumental_source}")
|
|
588
|
+
job_log.info(f" countdown_padding_seconds: {countdown_padding_seconds}")
|
|
589
|
+
if existing_instrumental_path:
|
|
590
|
+
job_log.info(f" using user-provided instrumental (selection was: {instrumental_selection})")
|
|
591
|
+
|
|
592
|
+
# Set up YouTube description file if template is provided
|
|
593
|
+
youtube_desc_path = None
|
|
594
|
+
if youtube_credentials and getattr(job, 'youtube_description_template', None):
|
|
595
|
+
youtube_desc_path = os.path.join(temp_dir, "youtube_description.txt")
|
|
596
|
+
with open(youtube_desc_path, 'w') as f:
|
|
597
|
+
f.write(job.youtube_description_template)
|
|
598
|
+
job_log.info("YouTube description template written to temp file")
|
|
599
|
+
|
|
600
|
+
finalise = KaraokeFinalise(
|
|
601
|
+
logger=logger,
|
|
602
|
+
log_level=logging.INFO,
|
|
603
|
+
dry_run=False,
|
|
604
|
+
instrumental_format="flac",
|
|
605
|
+
# CDG/TXT generation
|
|
606
|
+
enable_cdg=getattr(job, 'enable_cdg', False),
|
|
607
|
+
enable_txt=getattr(job, 'enable_txt', False),
|
|
608
|
+
cdg_styles=cdg_styles,
|
|
609
|
+
# Brand code and organization (server-side mode uses rclone)
|
|
610
|
+
brand_prefix=getattr(job, 'brand_prefix', None),
|
|
611
|
+
organised_dir=None, # Not used in cloud - files stay in GCS
|
|
612
|
+
organised_dir_rclone_root=getattr(job, 'organised_dir_rclone_root', None),
|
|
613
|
+
public_share_dir=None, # Not used in cloud
|
|
614
|
+
# Notifications
|
|
615
|
+
discord_webhook_url=getattr(job, 'discord_webhook_url', None),
|
|
616
|
+
# YouTube upload (server-side with pre-loaded credentials)
|
|
617
|
+
youtube_client_secrets_file=None, # Not used with pre-stored credentials
|
|
618
|
+
youtube_description_file=youtube_desc_path,
|
|
619
|
+
user_youtube_credentials=youtube_credentials, # Pre-loaded from Secret Manager
|
|
620
|
+
rclone_destination=None,
|
|
621
|
+
email_template_file=None,
|
|
622
|
+
# Server-side optimizations
|
|
623
|
+
non_interactive=True,
|
|
624
|
+
server_side_mode=True,
|
|
625
|
+
selected_instrumental_file=instrumental_file,
|
|
626
|
+
# Audio synchronization - ensure instrumental matches vocal padding
|
|
627
|
+
countdown_padding_seconds=countdown_padding_seconds,
|
|
628
|
+
)
|
|
629
|
+
|
|
630
|
+
# Call process() - this does ALL the work:
|
|
631
|
+
# - Encodes to 4 video formats
|
|
632
|
+
# - Generates CDG/TXT packages if enabled
|
|
633
|
+
# - Posts Discord notification if configured
|
|
634
|
+
# - Handles brand code generation
|
|
635
|
+
with job_span("karaoke-finalise", job_id) as finalise_span:
|
|
636
|
+
finalise_start = time.time()
|
|
637
|
+
job_log.info("Starting KaraokeFinalise.process() - this may take 15-20 minutes...")
|
|
638
|
+
logger.info(f"[job:{job_id}] Starting KaraokeFinalise.process()")
|
|
639
|
+
add_span_event("finalise_started")
|
|
640
|
+
|
|
641
|
+
result = finalise.process()
|
|
642
|
+
|
|
643
|
+
finalise_duration = time.time() - finalise_start
|
|
644
|
+
finalise_span.set_attribute("duration_seconds", finalise_duration)
|
|
645
|
+
add_span_event("finalise_completed", {"duration_seconds": finalise_duration})
|
|
646
|
+
|
|
647
|
+
job_log.info("KaraokeFinalise.process() complete!")
|
|
648
|
+
if result.get('brand_code'):
|
|
649
|
+
job_log.info(f"Brand code: {result.get('brand_code')}")
|
|
650
|
+
logger.info(f"[job:{job_id}] KaraokeFinalise.process() complete in {finalise_duration:.1f}s")
|
|
651
|
+
logger.info(f"[job:{job_id}] Brand code: {result.get('brand_code')}")
|
|
652
|
+
|
|
653
|
+
# Native API distribution uploads (used by remote CLI instead of rclone)
|
|
654
|
+
with job_span("distribution", job_id):
|
|
655
|
+
await _handle_native_distribution(
|
|
656
|
+
job_id=job_id,
|
|
657
|
+
job=job,
|
|
658
|
+
job_log=job_log,
|
|
659
|
+
job_manager=job_manager,
|
|
660
|
+
temp_dir=temp_dir,
|
|
661
|
+
result=result,
|
|
662
|
+
storage=storage,
|
|
663
|
+
)
|
|
664
|
+
|
|
665
|
+
# Upload generated files to GCS
|
|
666
|
+
job_manager.transition_to_state(
|
|
667
|
+
job_id=job_id,
|
|
668
|
+
new_status=JobStatus.PACKAGING,
|
|
669
|
+
progress=95,
|
|
670
|
+
message="Uploading final files"
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
with job_span("upload-results", job_id):
|
|
674
|
+
await _upload_results(job_id, job_manager, storage, temp_dir, result)
|
|
675
|
+
|
|
676
|
+
# Store result metadata in job BEFORE transitioning to COMPLETE
|
|
677
|
+
# This ensures youtube_url is available when completion email is sent
|
|
678
|
+
# NOTE: Must include dropbox_link and gdrive_files from result, since
|
|
679
|
+
# _handle_native_distribution already saved them but job.state_data
|
|
680
|
+
# is stale (fetched before distribution ran)
|
|
681
|
+
logger.info(f"[job:{job_id}] Video generation complete")
|
|
682
|
+
job_manager.update_job(job_id, {
|
|
683
|
+
'state_data': {
|
|
684
|
+
**job.state_data,
|
|
685
|
+
'brand_code': result.get('brand_code'),
|
|
686
|
+
'youtube_url': result.get('youtube_url'),
|
|
687
|
+
'dropbox_link': result.get('dropbox_link'),
|
|
688
|
+
'gdrive_files': result.get('gdrive_files'),
|
|
689
|
+
}
|
|
690
|
+
})
|
|
691
|
+
|
|
692
|
+
# Mark job as complete (triggers completion email with youtube_url now available)
|
|
693
|
+
job_manager.transition_to_state(
|
|
694
|
+
job_id=job_id,
|
|
695
|
+
new_status=JobStatus.COMPLETE,
|
|
696
|
+
progress=100,
|
|
697
|
+
message="Karaoke generation complete!"
|
|
698
|
+
)
|
|
699
|
+
|
|
700
|
+
duration = time.time() - start_time
|
|
701
|
+
root_span.set_attribute("duration_seconds", duration)
|
|
702
|
+
root_span.set_attribute("brand_code", result.get('brand_code', ''))
|
|
703
|
+
logger.info(f"[job:{job_id}] WORKER_END worker=video status=success duration={duration:.1f}s")
|
|
704
|
+
return True
|
|
705
|
+
|
|
706
|
+
except Exception as e:
|
|
707
|
+
duration = time.time() - start_time
|
|
708
|
+
logger.error(f"[job:{job_id}] WORKER_END worker=video status=error duration={duration:.1f}s error={e}")
|
|
709
|
+
job_manager.mark_job_failed(
|
|
710
|
+
job_id=job_id,
|
|
711
|
+
error_message=f"Video generation failed: {str(e)}",
|
|
712
|
+
error_details={"stage": "video_generation", "error": str(e)}
|
|
713
|
+
)
|
|
714
|
+
return False
|
|
715
|
+
|
|
716
|
+
finally:
|
|
717
|
+
# Restore original working directory
|
|
718
|
+
os.chdir(original_cwd)
|
|
719
|
+
|
|
720
|
+
# Remove log handler to avoid duplicate logging on future runs
|
|
721
|
+
for logger_name in VIDEO_WORKER_LOGGERS:
|
|
722
|
+
try:
|
|
723
|
+
logging.getLogger(logger_name).removeHandler(log_handler)
|
|
724
|
+
except Exception:
|
|
725
|
+
pass
|
|
726
|
+
|
|
727
|
+
# Cleanup rclone config file
|
|
728
|
+
if rclone_service:
|
|
729
|
+
rclone_service.cleanup()
|
|
730
|
+
|
|
731
|
+
# Cleanup temporary directory
|
|
732
|
+
if os.path.exists(temp_dir):
|
|
733
|
+
shutil.rmtree(temp_dir)
|
|
734
|
+
logger.debug(f"Cleaned up temp directory: {temp_dir}")
|
|
735
|
+
|
|
736
|
+
|
|
737
|
+
async def _handle_native_distribution(
|
|
738
|
+
job_id: str,
|
|
739
|
+
job,
|
|
740
|
+
job_log,
|
|
741
|
+
job_manager: JobManager,
|
|
742
|
+
temp_dir: str,
|
|
743
|
+
result: Dict[str, Any],
|
|
744
|
+
storage: StorageService = None,
|
|
745
|
+
) -> None:
|
|
746
|
+
"""
|
|
747
|
+
Handle distribution uploads using native APIs (Dropbox SDK, Google Drive API).
|
|
748
|
+
|
|
749
|
+
This is used by the remote CLI instead of rclone-based uploads.
|
|
750
|
+
The native APIs provide:
|
|
751
|
+
- Better error handling and retry logic
|
|
752
|
+
- No need for rclone binary in container
|
|
753
|
+
- Credentials managed via Secret Manager
|
|
754
|
+
|
|
755
|
+
Args:
|
|
756
|
+
job_id: Job ID
|
|
757
|
+
job: Job object with dropbox_path and gdrive_folder_id fields
|
|
758
|
+
job_log: Job-specific logger
|
|
759
|
+
job_manager: Job manager for updating job state
|
|
760
|
+
temp_dir: Temporary directory with output files
|
|
761
|
+
result: Result dict from KaraokeFinalise.process()
|
|
762
|
+
storage: StorageService instance for downloading stems/lyrics
|
|
763
|
+
"""
|
|
764
|
+
brand_code = result.get('brand_code')
|
|
765
|
+
# Sanitize artist/title to handle Unicode characters (curly quotes, em dashes, etc.)
|
|
766
|
+
safe_artist = sanitize_filename(job.artist) if job.artist else "Unknown"
|
|
767
|
+
safe_title = sanitize_filename(job.title) if job.title else "Unknown"
|
|
768
|
+
base_name = f"{safe_artist} - {safe_title}"
|
|
769
|
+
|
|
770
|
+
# Check if we should preserve existing brand code (Batch 6: --keep-brand-code)
|
|
771
|
+
keep_brand_code = getattr(job, 'keep_brand_code', None)
|
|
772
|
+
if keep_brand_code:
|
|
773
|
+
brand_code = keep_brand_code
|
|
774
|
+
result['brand_code'] = brand_code
|
|
775
|
+
job_log.info(f"Using preserved brand code: {brand_code}")
|
|
776
|
+
|
|
777
|
+
# Upload to Dropbox using native SDK
|
|
778
|
+
dropbox_path = getattr(job, 'dropbox_path', None)
|
|
779
|
+
brand_prefix = getattr(job, 'brand_prefix', None)
|
|
780
|
+
|
|
781
|
+
if dropbox_path and brand_prefix:
|
|
782
|
+
try:
|
|
783
|
+
from backend.services.dropbox_service import get_dropbox_service
|
|
784
|
+
|
|
785
|
+
dropbox = get_dropbox_service()
|
|
786
|
+
|
|
787
|
+
if not dropbox.is_configured:
|
|
788
|
+
job_log.warning("Dropbox credentials not configured - skipping upload")
|
|
789
|
+
else:
|
|
790
|
+
job_log.info(f"Starting native Dropbox upload to {dropbox_path}")
|
|
791
|
+
|
|
792
|
+
# Calculate brand code from existing folders (if not already generated)
|
|
793
|
+
if not brand_code:
|
|
794
|
+
brand_code = dropbox.get_next_brand_code(dropbox_path, brand_prefix)
|
|
795
|
+
result['brand_code'] = brand_code
|
|
796
|
+
job_log.info(f"Generated brand code: {brand_code}")
|
|
797
|
+
|
|
798
|
+
# Prepare full distribution directory with stems/ and lyrics/ subfolders
|
|
799
|
+
# This must be done before upload to ensure complete output structure
|
|
800
|
+
if storage:
|
|
801
|
+
await _prepare_distribution_directory(
|
|
802
|
+
job_id=job_id,
|
|
803
|
+
job=job,
|
|
804
|
+
storage=storage,
|
|
805
|
+
temp_dir=temp_dir,
|
|
806
|
+
base_name=base_name,
|
|
807
|
+
job_log=job_log,
|
|
808
|
+
)
|
|
809
|
+
|
|
810
|
+
# Create folder name and upload files
|
|
811
|
+
folder_name = f"{brand_code} - {base_name}"
|
|
812
|
+
remote_folder = f"{dropbox_path}/{folder_name}"
|
|
813
|
+
|
|
814
|
+
job_log.info(f"Uploading to Dropbox folder: {remote_folder}")
|
|
815
|
+
dropbox.upload_folder(temp_dir, remote_folder)
|
|
816
|
+
|
|
817
|
+
# Create sharing link
|
|
818
|
+
try:
|
|
819
|
+
sharing_link = dropbox.create_shared_link(remote_folder)
|
|
820
|
+
result['dropbox_link'] = sharing_link
|
|
821
|
+
job_log.info(f"Dropbox sharing link: {sharing_link}")
|
|
822
|
+
except Exception as e:
|
|
823
|
+
job_log.warning(f"Failed to create Dropbox sharing link: {e}")
|
|
824
|
+
|
|
825
|
+
job_log.info("Native Dropbox upload complete")
|
|
826
|
+
|
|
827
|
+
except ImportError as e:
|
|
828
|
+
job_log.warning(f"Dropbox SDK not installed: {e}")
|
|
829
|
+
except Exception as e:
|
|
830
|
+
job_log.error(f"Native Dropbox upload failed: {e}", exc_info=True)
|
|
831
|
+
# Don't fail the job - distribution is optional
|
|
832
|
+
|
|
833
|
+
# Upload to Google Drive using native API
|
|
834
|
+
gdrive_folder_id = getattr(job, 'gdrive_folder_id', None)
|
|
835
|
+
|
|
836
|
+
if gdrive_folder_id:
|
|
837
|
+
try:
|
|
838
|
+
from backend.services.gdrive_service import get_gdrive_service
|
|
839
|
+
|
|
840
|
+
gdrive = get_gdrive_service()
|
|
841
|
+
|
|
842
|
+
if not gdrive.is_configured:
|
|
843
|
+
job_log.warning("Google Drive credentials not configured - skipping upload")
|
|
844
|
+
else:
|
|
845
|
+
job_log.info(f"Starting native Google Drive upload to folder {gdrive_folder_id}")
|
|
846
|
+
|
|
847
|
+
# Use brand code from Dropbox or generate placeholder
|
|
848
|
+
upload_brand_code = brand_code or f"{brand_prefix or 'TRACK'}-0000"
|
|
849
|
+
|
|
850
|
+
# Map result keys to expected keys for upload_to_public_share
|
|
851
|
+
output_files = {
|
|
852
|
+
'final_karaoke_lossy_mp4': result.get('final_video_lossy'),
|
|
853
|
+
'final_karaoke_lossy_720p_mp4': result.get('final_video_720p'),
|
|
854
|
+
'final_karaoke_cdg_zip': result.get('final_karaoke_cdg_zip'),
|
|
855
|
+
}
|
|
856
|
+
|
|
857
|
+
uploaded = gdrive.upload_to_public_share(
|
|
858
|
+
root_folder_id=gdrive_folder_id,
|
|
859
|
+
brand_code=upload_brand_code,
|
|
860
|
+
base_name=base_name,
|
|
861
|
+
output_files=output_files,
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
result['gdrive_files'] = uploaded
|
|
865
|
+
job_log.info(f"Google Drive upload complete: {len(uploaded)} files uploaded")
|
|
866
|
+
|
|
867
|
+
except ImportError as e:
|
|
868
|
+
job_log.warning(f"Google API packages not installed: {e}")
|
|
869
|
+
except Exception as e:
|
|
870
|
+
job_log.error(f"Native Google Drive upload failed: {e}", exc_info=True)
|
|
871
|
+
# Don't fail the job - distribution is optional
|
|
872
|
+
|
|
873
|
+
# Update job state_data with brand code and links
|
|
874
|
+
if brand_code or result.get('dropbox_link') or result.get('gdrive_files'):
|
|
875
|
+
try:
|
|
876
|
+
job_manager.update_job(job_id, {
|
|
877
|
+
'state_data': {
|
|
878
|
+
**job.state_data,
|
|
879
|
+
'brand_code': brand_code,
|
|
880
|
+
'dropbox_link': result.get('dropbox_link'),
|
|
881
|
+
'gdrive_files': result.get('gdrive_files'),
|
|
882
|
+
}
|
|
883
|
+
})
|
|
884
|
+
except Exception as e:
|
|
885
|
+
job_log.warning(f"Failed to update job state_data: {e}")
|
|
886
|
+
|
|
887
|
+
|
|
888
|
+
def _validate_prerequisites(job) -> bool:
|
|
889
|
+
"""Validate that all prerequisites are met for video generation."""
|
|
890
|
+
# Check instrumental selection
|
|
891
|
+
instrumental_selection = job.state_data.get('instrumental_selection')
|
|
892
|
+
if not instrumental_selection:
|
|
893
|
+
logger.error(f"Job {job.job_id}: No instrumental selected")
|
|
894
|
+
return False
|
|
895
|
+
|
|
896
|
+
# 'custom' is valid when user provided existing_instrumental
|
|
897
|
+
if instrumental_selection not in ['clean', 'with_backing', 'custom']:
|
|
898
|
+
logger.error(f"Job {job.job_id}: Invalid instrumental selection: {instrumental_selection}")
|
|
899
|
+
return False
|
|
900
|
+
|
|
901
|
+
# Check screens exist
|
|
902
|
+
screens = job.file_urls.get('screens', {})
|
|
903
|
+
if not screens.get('title') or not screens.get('end'):
|
|
904
|
+
logger.error(f"Job {job.job_id}: Missing title or end screen")
|
|
905
|
+
return False
|
|
906
|
+
|
|
907
|
+
# Check lyrics video exists
|
|
908
|
+
videos = job.file_urls.get('videos', {})
|
|
909
|
+
if not videos.get('with_vocals'):
|
|
910
|
+
logger.error(f"Job {job.job_id}: Missing lyrics video")
|
|
911
|
+
return False
|
|
912
|
+
|
|
913
|
+
# Check instrumental exists - for 'custom', check existing_instrumental_gcs_path instead of stems
|
|
914
|
+
if instrumental_selection == 'custom':
|
|
915
|
+
existing_instrumental_path = getattr(job, 'existing_instrumental_gcs_path', None)
|
|
916
|
+
if not existing_instrumental_path:
|
|
917
|
+
logger.error(f"Job {job.job_id}: Custom instrumental selected but no existing_instrumental_gcs_path")
|
|
918
|
+
return False
|
|
919
|
+
return True # Other stem checks not needed for custom instrumental
|
|
920
|
+
|
|
921
|
+
stems = job.file_urls.get('stems', {})
|
|
922
|
+
instrumental_key = 'instrumental_clean' if instrumental_selection == 'clean' else 'instrumental_with_backing'
|
|
923
|
+
if not stems.get(instrumental_key):
|
|
924
|
+
logger.error(f"Job {job.job_id}: Missing instrumental: {instrumental_key}")
|
|
925
|
+
return False
|
|
926
|
+
|
|
927
|
+
return True
|
|
928
|
+
|
|
929
|
+
|
|
930
|
+
async def _setup_working_directory(
|
|
931
|
+
job_id: str,
|
|
932
|
+
job,
|
|
933
|
+
storage: StorageService,
|
|
934
|
+
temp_dir: str,
|
|
935
|
+
base_name: str,
|
|
936
|
+
job_log=None
|
|
937
|
+
) -> None:
|
|
938
|
+
"""
|
|
939
|
+
Download files from GCS and set up the directory structure KaraokeFinalise expects.
|
|
940
|
+
|
|
941
|
+
KaraokeFinalise.process() looks for files with specific naming conventions:
|
|
942
|
+
- {base_name} (Title).mov
|
|
943
|
+
- {base_name} (End).mov
|
|
944
|
+
- {base_name} (With Vocals).mov
|
|
945
|
+
- {base_name} (Karaoke).lrc
|
|
946
|
+
- Instrumental audio file
|
|
947
|
+
"""
|
|
948
|
+
def log_progress(message: str):
|
|
949
|
+
"""Log to both module logger and job logger if available."""
|
|
950
|
+
logger.info(f"Job {job_id}: {message}")
|
|
951
|
+
if job_log:
|
|
952
|
+
job_log.info(message)
|
|
953
|
+
|
|
954
|
+
log_progress("Setting up working directory")
|
|
955
|
+
|
|
956
|
+
# Download title screen
|
|
957
|
+
log_progress("Downloading title screen...")
|
|
958
|
+
title_url = job.file_urls['screens']['title']
|
|
959
|
+
title_path = os.path.join(temp_dir, f"{base_name} (Title).mov")
|
|
960
|
+
storage.download_file(title_url, title_path)
|
|
961
|
+
log_progress("Downloaded title screen")
|
|
962
|
+
|
|
963
|
+
# Download end screen
|
|
964
|
+
log_progress("Downloading end screen...")
|
|
965
|
+
end_url = job.file_urls['screens']['end']
|
|
966
|
+
end_path = os.path.join(temp_dir, f"{base_name} (End).mov")
|
|
967
|
+
storage.download_file(end_url, end_path)
|
|
968
|
+
log_progress("Downloaded end screen")
|
|
969
|
+
|
|
970
|
+
# Download lyrics video (with vocals) - this is the largest file, ~1-2GB
|
|
971
|
+
log_progress("Downloading karaoke video (largest file, may take 1-2 minutes)...")
|
|
972
|
+
lyrics_video_url = job.file_urls['videos']['with_vocals']
|
|
973
|
+
lyrics_video_path = os.path.join(temp_dir, f"{base_name} (With Vocals).mov")
|
|
974
|
+
storage.download_file(lyrics_video_url, lyrics_video_path)
|
|
975
|
+
log_progress("Downloaded karaoke video")
|
|
976
|
+
|
|
977
|
+
# Download instrumental - either user-provided existing instrumental or AI-separated
|
|
978
|
+
existing_instrumental_path = getattr(job, 'existing_instrumental_gcs_path', None)
|
|
979
|
+
instrumental_selection = job.state_data['instrumental_selection']
|
|
980
|
+
|
|
981
|
+
if existing_instrumental_path:
|
|
982
|
+
# Batch 3: Use user-provided existing instrumental
|
|
983
|
+
ext = Path(existing_instrumental_path).suffix.lower()
|
|
984
|
+
instrumental_path = os.path.join(temp_dir, f"{base_name} (Instrumental User){ext}")
|
|
985
|
+
log_progress(f"Downloading user-provided existing instrumental...")
|
|
986
|
+
storage.download_file(existing_instrumental_path, instrumental_path)
|
|
987
|
+
log_progress("Downloaded user-provided instrumental")
|
|
988
|
+
else:
|
|
989
|
+
# Use AI-separated instrumental based on selection
|
|
990
|
+
instrumental_key = 'instrumental_clean' if instrumental_selection == 'clean' else 'instrumental_with_backing'
|
|
991
|
+
instrumental_url = job.file_urls['stems'][instrumental_key]
|
|
992
|
+
instrumental_suffix = "Clean" if instrumental_selection == 'clean' else "Backing"
|
|
993
|
+
instrumental_path = os.path.join(temp_dir, f"{base_name} (Instrumental {instrumental_suffix}).flac")
|
|
994
|
+
log_progress(f"Downloading {instrumental_selection} instrumental audio...")
|
|
995
|
+
storage.download_file(instrumental_url, instrumental_path)
|
|
996
|
+
log_progress(f"Downloaded instrumental ({instrumental_selection})")
|
|
997
|
+
|
|
998
|
+
# Download LRC file if available (needed for CDG/TXT)
|
|
999
|
+
lyrics_urls = job.file_urls.get('lyrics', {})
|
|
1000
|
+
if 'lrc' in lyrics_urls:
|
|
1001
|
+
lrc_url = lyrics_urls['lrc']
|
|
1002
|
+
lrc_path = os.path.join(temp_dir, f"{base_name} (Karaoke).lrc")
|
|
1003
|
+
storage.download_file(lrc_url, lrc_path)
|
|
1004
|
+
log_progress("Downloaded LRC file")
|
|
1005
|
+
|
|
1006
|
+
# Download title/end JPG files (used for YouTube thumbnail)
|
|
1007
|
+
screens = job.file_urls.get('screens', {})
|
|
1008
|
+
if screens.get('title_jpg'):
|
|
1009
|
+
title_jpg_path = os.path.join(temp_dir, f"{base_name} (Title).jpg")
|
|
1010
|
+
storage.download_file(screens['title_jpg'], title_jpg_path)
|
|
1011
|
+
log_progress("Downloaded title JPG for thumbnail")
|
|
1012
|
+
|
|
1013
|
+
if screens.get('end_jpg'):
|
|
1014
|
+
end_jpg_path = os.path.join(temp_dir, f"{base_name} (End).jpg")
|
|
1015
|
+
storage.download_file(screens['end_jpg'], end_jpg_path)
|
|
1016
|
+
log_progress("Downloaded end JPG")
|
|
1017
|
+
|
|
1018
|
+
|
|
1019
|
+
async def _prepare_distribution_directory(
|
|
1020
|
+
job_id: str,
|
|
1021
|
+
job,
|
|
1022
|
+
storage: StorageService,
|
|
1023
|
+
temp_dir: str,
|
|
1024
|
+
base_name: str,
|
|
1025
|
+
job_log=None
|
|
1026
|
+
) -> None:
|
|
1027
|
+
"""
|
|
1028
|
+
Prepare the full distribution directory structure matching local CLI output.
|
|
1029
|
+
|
|
1030
|
+
Creates:
|
|
1031
|
+
- stems/ subfolder with all audio stems (proper model names)
|
|
1032
|
+
- lyrics/ subfolder with intermediate lyrics files
|
|
1033
|
+
- Root-level instrumentals with proper model names
|
|
1034
|
+
|
|
1035
|
+
This ensures the Dropbox upload contains the complete output structure.
|
|
1036
|
+
"""
|
|
1037
|
+
def log_progress(message: str):
|
|
1038
|
+
"""Log to both module logger and job logger if available."""
|
|
1039
|
+
logger.info(f"Job {job_id}: {message}")
|
|
1040
|
+
if job_log:
|
|
1041
|
+
job_log.info(message)
|
|
1042
|
+
|
|
1043
|
+
log_progress("Preparing full distribution directory structure")
|
|
1044
|
+
|
|
1045
|
+
# Get model names from state_data (stored by audio_worker)
|
|
1046
|
+
model_names = job.state_data.get('model_names', {})
|
|
1047
|
+
clean_model = model_names.get('clean_instrumental_model', 'model_bs_roformer_ep_317_sdr_12.9755.ckpt')
|
|
1048
|
+
backing_models = model_names.get('backing_vocals_models', ['mel_band_roformer_karaoke_aufr33_viperx_sdr_10.1956.ckpt'])
|
|
1049
|
+
other_models = model_names.get('other_stems_models', ['htdemucs_6s.yaml'])
|
|
1050
|
+
backing_model = backing_models[0] if backing_models else 'mel_band_roformer_karaoke_aufr33_viperx_sdr_10.1956.ckpt'
|
|
1051
|
+
other_model = other_models[0] if other_models else 'htdemucs_6s.yaml'
|
|
1052
|
+
|
|
1053
|
+
# Create stems subdirectory
|
|
1054
|
+
stems_dir = os.path.join(temp_dir, "stems")
|
|
1055
|
+
os.makedirs(stems_dir, exist_ok=True)
|
|
1056
|
+
|
|
1057
|
+
# Create lyrics subdirectory
|
|
1058
|
+
lyrics_dir = os.path.join(temp_dir, "lyrics")
|
|
1059
|
+
os.makedirs(lyrics_dir, exist_ok=True)
|
|
1060
|
+
|
|
1061
|
+
stems = job.file_urls.get('stems', {})
|
|
1062
|
+
lyrics = job.file_urls.get('lyrics', {})
|
|
1063
|
+
|
|
1064
|
+
# --- Download stems to stems/ subfolder with proper model names ---
|
|
1065
|
+
|
|
1066
|
+
# Clean up simplified-named instrumentals from _setup_working_directory
|
|
1067
|
+
# These were needed for KaraokeFinalise but shouldn't go to distribution
|
|
1068
|
+
simplified_instrumental_patterns = [
|
|
1069
|
+
f"{base_name} (Instrumental Clean).flac",
|
|
1070
|
+
f"{base_name} (Instrumental Backing).flac",
|
|
1071
|
+
]
|
|
1072
|
+
for pattern in simplified_instrumental_patterns:
|
|
1073
|
+
simplified_path = os.path.join(temp_dir, pattern)
|
|
1074
|
+
if os.path.exists(simplified_path):
|
|
1075
|
+
os.remove(simplified_path)
|
|
1076
|
+
log_progress(f"Removed simplified instrumental: {pattern}")
|
|
1077
|
+
|
|
1078
|
+
# Clean instrumental (from clean_instrumental_model)
|
|
1079
|
+
if stems.get('instrumental_clean'):
|
|
1080
|
+
dest_path = os.path.join(stems_dir, f"{base_name} (Instrumental {clean_model}).flac")
|
|
1081
|
+
storage.download_file(stems['instrumental_clean'], dest_path)
|
|
1082
|
+
log_progress("Downloaded clean instrumental to stems/")
|
|
1083
|
+
|
|
1084
|
+
# Also copy to root level for distribution
|
|
1085
|
+
root_path = os.path.join(temp_dir, f"{base_name} (Instrumental {clean_model}).flac")
|
|
1086
|
+
shutil.copy2(dest_path, root_path)
|
|
1087
|
+
|
|
1088
|
+
# Clean vocals (from clean_instrumental_model)
|
|
1089
|
+
if stems.get('vocals_clean'):
|
|
1090
|
+
dest_path = os.path.join(stems_dir, f"{base_name} (Vocals {clean_model}).flac")
|
|
1091
|
+
storage.download_file(stems['vocals_clean'], dest_path)
|
|
1092
|
+
log_progress("Downloaded clean vocals to stems/")
|
|
1093
|
+
|
|
1094
|
+
# Instrumental with backing vocals (from backing_vocals_model)
|
|
1095
|
+
if stems.get('instrumental_with_backing'):
|
|
1096
|
+
dest_path = os.path.join(stems_dir, f"{base_name} (Instrumental +BV {backing_model}).flac")
|
|
1097
|
+
storage.download_file(stems['instrumental_with_backing'], dest_path)
|
|
1098
|
+
log_progress("Downloaded instrumental+BV to stems/")
|
|
1099
|
+
|
|
1100
|
+
# Also copy to root level for distribution
|
|
1101
|
+
root_path = os.path.join(temp_dir, f"{base_name} (Instrumental +BV {backing_model}).flac")
|
|
1102
|
+
shutil.copy2(dest_path, root_path)
|
|
1103
|
+
|
|
1104
|
+
# Lead vocals (from backing_vocals_model)
|
|
1105
|
+
if stems.get('lead_vocals'):
|
|
1106
|
+
dest_path = os.path.join(stems_dir, f"{base_name} (Lead Vocals {backing_model}).flac")
|
|
1107
|
+
storage.download_file(stems['lead_vocals'], dest_path)
|
|
1108
|
+
log_progress("Downloaded lead vocals to stems/")
|
|
1109
|
+
|
|
1110
|
+
# Backing vocals (from backing_vocals_model)
|
|
1111
|
+
if stems.get('backing_vocals'):
|
|
1112
|
+
dest_path = os.path.join(stems_dir, f"{base_name} (Backing Vocals {backing_model}).flac")
|
|
1113
|
+
storage.download_file(stems['backing_vocals'], dest_path)
|
|
1114
|
+
log_progress("Downloaded backing vocals to stems/")
|
|
1115
|
+
|
|
1116
|
+
# Other stems (from other_stems_model - typically htdemucs_6s)
|
|
1117
|
+
other_stem_keys = ['bass', 'drums', 'guitar', 'piano', 'other']
|
|
1118
|
+
for stem_key in other_stem_keys:
|
|
1119
|
+
if stems.get(stem_key):
|
|
1120
|
+
stem_name = stem_key.capitalize()
|
|
1121
|
+
dest_path = os.path.join(stems_dir, f"{base_name} ({stem_name} {other_model}).flac")
|
|
1122
|
+
storage.download_file(stems[stem_key], dest_path)
|
|
1123
|
+
log_progress(f"Downloaded {stem_key} to stems/")
|
|
1124
|
+
|
|
1125
|
+
# --- Download lyrics files to lyrics/ subfolder ---
|
|
1126
|
+
# Use proper filenames matching local CLI output structure
|
|
1127
|
+
|
|
1128
|
+
# Map job.files.lyrics keys to proper local filenames
|
|
1129
|
+
# Format: (job_files_key, local_filename_pattern)
|
|
1130
|
+
lyrics_file_mappings = [
|
|
1131
|
+
# Karaoke output files
|
|
1132
|
+
('lrc', f"{base_name} (Karaoke).lrc"),
|
|
1133
|
+
('ass', f"{base_name} (Karaoke).ass"),
|
|
1134
|
+
# Corrections and transcription files
|
|
1135
|
+
('corrections', f"{base_name} (Lyrics Corrections).json"),
|
|
1136
|
+
('corrected_txt', f"{base_name} (Lyrics Corrected).txt"),
|
|
1137
|
+
('uncorrected', f"{base_name} (Lyrics Uncorrected).txt"),
|
|
1138
|
+
# Reference lyrics from various sources
|
|
1139
|
+
('reference_genius', f"{base_name} (Lyrics Genius).txt"),
|
|
1140
|
+
('reference_spotify', f"{base_name} (Lyrics Spotify).txt"),
|
|
1141
|
+
('reference_musixmatch', f"{base_name} (Lyrics Musixmatch).txt"),
|
|
1142
|
+
('reference_lrclib', f"{base_name} (Lyrics Lrclib).txt"),
|
|
1143
|
+
]
|
|
1144
|
+
|
|
1145
|
+
for lyrics_key, local_filename in lyrics_file_mappings:
|
|
1146
|
+
if lyrics.get(lyrics_key):
|
|
1147
|
+
dest_path = os.path.join(lyrics_dir, local_filename)
|
|
1148
|
+
try:
|
|
1149
|
+
storage.download_file(lyrics[lyrics_key], dest_path)
|
|
1150
|
+
log_progress(f"Downloaded {lyrics_key} to lyrics/{local_filename}")
|
|
1151
|
+
except Exception as e:
|
|
1152
|
+
log_progress(f"Could not download {lyrics_key}: {e}")
|
|
1153
|
+
|
|
1154
|
+
# --- Download (With Vocals).mkv to lyrics/ subfolder ---
|
|
1155
|
+
# Local CLI places the karaoke video with vocals in the lyrics folder
|
|
1156
|
+
videos = job.file_urls.get('videos', {})
|
|
1157
|
+
if videos.get('with_vocals'):
|
|
1158
|
+
with_vocals_dest = os.path.join(lyrics_dir, f"{base_name} (With Vocals).mkv")
|
|
1159
|
+
try:
|
|
1160
|
+
storage.download_file(videos['with_vocals'], with_vocals_dest)
|
|
1161
|
+
log_progress(f"Downloaded with_vocals to lyrics/{base_name} (With Vocals).mkv")
|
|
1162
|
+
except Exception as e:
|
|
1163
|
+
log_progress(f"Could not download with_vocals to lyrics/: {e}")
|
|
1164
|
+
|
|
1165
|
+
# --- Download preview ASS files to lyrics/previews/ subfolder ---
|
|
1166
|
+
# Preview files are stored in jobs/{job_id}/previews/ in GCS
|
|
1167
|
+
previews_dir = os.path.join(lyrics_dir, "previews")
|
|
1168
|
+
os.makedirs(previews_dir, exist_ok=True)
|
|
1169
|
+
|
|
1170
|
+
# List all preview files from GCS and download them
|
|
1171
|
+
try:
|
|
1172
|
+
preview_prefix = f"jobs/{job_id}/previews/"
|
|
1173
|
+
preview_files = storage.list_files(preview_prefix)
|
|
1174
|
+
for blob_name in preview_files:
|
|
1175
|
+
if blob_name.endswith('.ass'):
|
|
1176
|
+
filename = os.path.basename(blob_name)
|
|
1177
|
+
dest_path = os.path.join(previews_dir, filename)
|
|
1178
|
+
storage.download_file(blob_name, dest_path)
|
|
1179
|
+
log_progress(f"Downloaded preview {filename} to lyrics/previews/")
|
|
1180
|
+
except Exception as e:
|
|
1181
|
+
log_progress(f"Could not download preview files: {e}")
|
|
1182
|
+
|
|
1183
|
+
log_progress("Distribution directory prepared with stems/ and lyrics/ subfolders")
|
|
1184
|
+
|
|
1185
|
+
|
|
1186
|
+
async def _upload_results(
|
|
1187
|
+
job_id: str,
|
|
1188
|
+
job_manager: JobManager,
|
|
1189
|
+
storage: StorageService,
|
|
1190
|
+
temp_dir: str,
|
|
1191
|
+
result: Dict[str, Any]
|
|
1192
|
+
) -> None:
|
|
1193
|
+
"""Upload generated files to GCS."""
|
|
1194
|
+
|
|
1195
|
+
# Map of result keys to GCS paths
|
|
1196
|
+
file_mappings = [
|
|
1197
|
+
('final_video', 'finals', 'lossless_4k_mp4'),
|
|
1198
|
+
('final_video_mkv', 'finals', 'lossless_4k_mkv'),
|
|
1199
|
+
('final_video_lossy', 'finals', 'lossy_4k_mp4'),
|
|
1200
|
+
('final_video_720p', 'finals', 'lossy_720p_mp4'),
|
|
1201
|
+
('final_karaoke_cdg_zip', 'packages', 'cdg_zip'),
|
|
1202
|
+
('final_karaoke_txt_zip', 'packages', 'txt_zip'),
|
|
1203
|
+
]
|
|
1204
|
+
|
|
1205
|
+
for result_key, category, file_key in file_mappings:
|
|
1206
|
+
if result_key in result and result[result_key]:
|
|
1207
|
+
local_path = result[result_key]
|
|
1208
|
+
if os.path.exists(local_path):
|
|
1209
|
+
ext = Path(local_path).suffix
|
|
1210
|
+
gcs_path = f"jobs/{job_id}/{category}/{file_key}{ext}"
|
|
1211
|
+
try:
|
|
1212
|
+
url = storage.upload_file(local_path, gcs_path)
|
|
1213
|
+
job_manager.update_file_url(job_id, category, file_key, url)
|
|
1214
|
+
logger.info(f"Job {job_id}: Uploaded {file_key}")
|
|
1215
|
+
except Exception as e:
|
|
1216
|
+
logger.error(f"Job {job_id}: Failed to upload {file_key}: {e}")
|
|
1217
|
+
|
|
1218
|
+
|
|
1219
|
+
# ==================== CLI Entry Point for Cloud Run Jobs ====================
|
|
1220
|
+
# This allows the video worker to be run as a standalone Cloud Run Job.
|
|
1221
|
+
# Usage: python -m backend.workers.video_worker --job-id <job_id>
|
|
1222
|
+
|
|
1223
|
+
def main():
|
|
1224
|
+
"""
|
|
1225
|
+
CLI entry point for running video worker as a Cloud Run Job.
|
|
1226
|
+
|
|
1227
|
+
This is used when video encoding needs more than 30 minutes
|
|
1228
|
+
(the Cloud Tasks timeout limit). Cloud Run Jobs support up to 24 hours.
|
|
1229
|
+
|
|
1230
|
+
Usage:
|
|
1231
|
+
python -m backend.workers.video_worker --job-id abc123
|
|
1232
|
+
|
|
1233
|
+
Environment Variables:
|
|
1234
|
+
GOOGLE_CLOUD_PROJECT: GCP project ID (required)
|
|
1235
|
+
GCS_BUCKET_NAME: Storage bucket name (required)
|
|
1236
|
+
"""
|
|
1237
|
+
import argparse
|
|
1238
|
+
import asyncio
|
|
1239
|
+
import sys
|
|
1240
|
+
|
|
1241
|
+
# Set up argument parser
|
|
1242
|
+
parser = argparse.ArgumentParser(
|
|
1243
|
+
description="Video encoding worker for karaoke generation"
|
|
1244
|
+
)
|
|
1245
|
+
parser.add_argument(
|
|
1246
|
+
"--job-id",
|
|
1247
|
+
required=True,
|
|
1248
|
+
help="Job ID to process"
|
|
1249
|
+
)
|
|
1250
|
+
|
|
1251
|
+
args = parser.parse_args()
|
|
1252
|
+
job_id = args.job_id
|
|
1253
|
+
|
|
1254
|
+
# Configure logging
|
|
1255
|
+
logging.basicConfig(
|
|
1256
|
+
level=logging.INFO,
|
|
1257
|
+
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
|
1258
|
+
)
|
|
1259
|
+
|
|
1260
|
+
logger.info(f"Starting video worker CLI for job {job_id}")
|
|
1261
|
+
|
|
1262
|
+
# Run the async worker function
|
|
1263
|
+
try:
|
|
1264
|
+
success = asyncio.run(generate_video(job_id))
|
|
1265
|
+
if success:
|
|
1266
|
+
logger.info(f"Video generation completed successfully for job {job_id}")
|
|
1267
|
+
sys.exit(0)
|
|
1268
|
+
else:
|
|
1269
|
+
logger.error(f"Video generation failed for job {job_id}")
|
|
1270
|
+
sys.exit(1)
|
|
1271
|
+
except Exception as e:
|
|
1272
|
+
logger.error(f"Video worker crashed: {e}", exc_info=True)
|
|
1273
|
+
sys.exit(1)
|
|
1274
|
+
|
|
1275
|
+
|
|
1276
|
+
if __name__ == "__main__":
|
|
1277
|
+
main()
|