karaoke-gen 0.90.1__py3-none-any.whl → 0.96.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- backend/.coveragerc +20 -0
- backend/.gitignore +37 -0
- backend/Dockerfile +43 -0
- backend/Dockerfile.base +74 -0
- backend/README.md +242 -0
- backend/__init__.py +0 -0
- backend/api/__init__.py +0 -0
- backend/api/dependencies.py +457 -0
- backend/api/routes/__init__.py +0 -0
- backend/api/routes/admin.py +742 -0
- backend/api/routes/audio_search.py +903 -0
- backend/api/routes/auth.py +348 -0
- backend/api/routes/file_upload.py +2076 -0
- backend/api/routes/health.py +344 -0
- backend/api/routes/internal.py +435 -0
- backend/api/routes/jobs.py +1610 -0
- backend/api/routes/review.py +652 -0
- backend/api/routes/themes.py +162 -0
- backend/api/routes/users.py +1014 -0
- backend/config.py +172 -0
- backend/main.py +133 -0
- backend/middleware/__init__.py +5 -0
- backend/middleware/audit_logging.py +124 -0
- backend/models/__init__.py +0 -0
- backend/models/job.py +519 -0
- backend/models/requests.py +123 -0
- backend/models/theme.py +153 -0
- backend/models/user.py +254 -0
- backend/models/worker_log.py +164 -0
- backend/pyproject.toml +29 -0
- backend/quick-check.sh +93 -0
- backend/requirements.txt +29 -0
- backend/run_tests.sh +60 -0
- backend/services/__init__.py +0 -0
- backend/services/audio_analysis_service.py +243 -0
- backend/services/audio_editing_service.py +278 -0
- backend/services/audio_search_service.py +702 -0
- backend/services/auth_service.py +630 -0
- backend/services/credential_manager.py +792 -0
- backend/services/discord_service.py +172 -0
- backend/services/dropbox_service.py +301 -0
- backend/services/email_service.py +1093 -0
- backend/services/encoding_interface.py +454 -0
- backend/services/encoding_service.py +405 -0
- backend/services/firestore_service.py +512 -0
- backend/services/flacfetch_client.py +573 -0
- backend/services/gce_encoding/README.md +72 -0
- backend/services/gce_encoding/__init__.py +22 -0
- backend/services/gce_encoding/main.py +589 -0
- backend/services/gce_encoding/requirements.txt +16 -0
- backend/services/gdrive_service.py +356 -0
- backend/services/job_logging.py +258 -0
- backend/services/job_manager.py +842 -0
- backend/services/job_notification_service.py +271 -0
- backend/services/local_encoding_service.py +590 -0
- backend/services/local_preview_encoding_service.py +407 -0
- backend/services/lyrics_cache_service.py +216 -0
- backend/services/metrics.py +413 -0
- backend/services/packaging_service.py +287 -0
- backend/services/rclone_service.py +106 -0
- backend/services/storage_service.py +209 -0
- backend/services/stripe_service.py +275 -0
- backend/services/structured_logging.py +254 -0
- backend/services/template_service.py +330 -0
- backend/services/theme_service.py +469 -0
- backend/services/tracing.py +543 -0
- backend/services/user_service.py +721 -0
- backend/services/worker_service.py +558 -0
- backend/services/youtube_service.py +112 -0
- backend/services/youtube_upload_service.py +445 -0
- backend/tests/__init__.py +4 -0
- backend/tests/conftest.py +224 -0
- backend/tests/emulator/__init__.py +7 -0
- backend/tests/emulator/conftest.py +88 -0
- backend/tests/emulator/test_e2e_cli_backend.py +1053 -0
- backend/tests/emulator/test_emulator_integration.py +356 -0
- backend/tests/emulator/test_style_loading_direct.py +436 -0
- backend/tests/emulator/test_worker_logs_direct.py +229 -0
- backend/tests/emulator/test_worker_logs_subcollection.py +443 -0
- backend/tests/requirements-test.txt +10 -0
- backend/tests/requirements.txt +6 -0
- backend/tests/test_admin_email_endpoints.py +411 -0
- backend/tests/test_api_integration.py +460 -0
- backend/tests/test_api_routes.py +93 -0
- backend/tests/test_audio_analysis_service.py +294 -0
- backend/tests/test_audio_editing_service.py +386 -0
- backend/tests/test_audio_search.py +1398 -0
- backend/tests/test_audio_services.py +378 -0
- backend/tests/test_auth_firestore.py +231 -0
- backend/tests/test_config_extended.py +68 -0
- backend/tests/test_credential_manager.py +377 -0
- backend/tests/test_dependencies.py +54 -0
- backend/tests/test_discord_service.py +244 -0
- backend/tests/test_distribution_services.py +820 -0
- backend/tests/test_dropbox_service.py +472 -0
- backend/tests/test_email_service.py +492 -0
- backend/tests/test_emulator_integration.py +322 -0
- backend/tests/test_encoding_interface.py +412 -0
- backend/tests/test_file_upload.py +1739 -0
- backend/tests/test_flacfetch_client.py +632 -0
- backend/tests/test_gdrive_service.py +524 -0
- backend/tests/test_instrumental_api.py +431 -0
- backend/tests/test_internal_api.py +343 -0
- backend/tests/test_job_creation_regression.py +583 -0
- backend/tests/test_job_manager.py +339 -0
- backend/tests/test_job_manager_notifications.py +329 -0
- backend/tests/test_job_notification_service.py +443 -0
- backend/tests/test_jobs_api.py +273 -0
- backend/tests/test_local_encoding_service.py +423 -0
- backend/tests/test_local_preview_encoding_service.py +567 -0
- backend/tests/test_main.py +87 -0
- backend/tests/test_models.py +918 -0
- backend/tests/test_packaging_service.py +382 -0
- backend/tests/test_requests.py +201 -0
- backend/tests/test_routes_jobs.py +282 -0
- backend/tests/test_routes_review.py +337 -0
- backend/tests/test_services.py +556 -0
- backend/tests/test_services_extended.py +112 -0
- backend/tests/test_storage_service.py +448 -0
- backend/tests/test_style_upload.py +261 -0
- backend/tests/test_template_service.py +295 -0
- backend/tests/test_theme_service.py +516 -0
- backend/tests/test_unicode_sanitization.py +522 -0
- backend/tests/test_upload_api.py +256 -0
- backend/tests/test_validate.py +156 -0
- backend/tests/test_video_worker_orchestrator.py +847 -0
- backend/tests/test_worker_log_subcollection.py +509 -0
- backend/tests/test_worker_logging.py +365 -0
- backend/tests/test_workers.py +1116 -0
- backend/tests/test_workers_extended.py +178 -0
- backend/tests/test_youtube_service.py +247 -0
- backend/tests/test_youtube_upload_service.py +568 -0
- backend/validate.py +173 -0
- backend/version.py +27 -0
- backend/workers/README.md +597 -0
- backend/workers/__init__.py +11 -0
- backend/workers/audio_worker.py +618 -0
- backend/workers/lyrics_worker.py +683 -0
- backend/workers/render_video_worker.py +483 -0
- backend/workers/screens_worker.py +525 -0
- backend/workers/style_helper.py +198 -0
- backend/workers/video_worker.py +1277 -0
- backend/workers/video_worker_orchestrator.py +701 -0
- backend/workers/worker_logging.py +278 -0
- karaoke_gen/instrumental_review/static/index.html +7 -4
- karaoke_gen/karaoke_finalise/karaoke_finalise.py +6 -1
- karaoke_gen/utils/__init__.py +163 -8
- karaoke_gen/video_background_processor.py +9 -4
- {karaoke_gen-0.90.1.dist-info → karaoke_gen-0.96.0.dist-info}/METADATA +1 -1
- {karaoke_gen-0.90.1.dist-info → karaoke_gen-0.96.0.dist-info}/RECORD +186 -41
- lyrics_transcriber/correction/agentic/providers/config.py +9 -5
- lyrics_transcriber/correction/agentic/providers/langchain_bridge.py +1 -51
- lyrics_transcriber/correction/corrector.py +192 -130
- lyrics_transcriber/correction/operations.py +24 -9
- lyrics_transcriber/frontend/package-lock.json +2 -2
- lyrics_transcriber/frontend/package.json +1 -1
- lyrics_transcriber/frontend/src/components/AIFeedbackModal.tsx +1 -1
- lyrics_transcriber/frontend/src/components/CorrectedWordWithActions.tsx +11 -7
- lyrics_transcriber/frontend/src/components/EditActionBar.tsx +31 -5
- lyrics_transcriber/frontend/src/components/EditModal.tsx +28 -10
- lyrics_transcriber/frontend/src/components/EditTimelineSection.tsx +123 -27
- lyrics_transcriber/frontend/src/components/EditWordList.tsx +112 -60
- lyrics_transcriber/frontend/src/components/Header.tsx +90 -76
- lyrics_transcriber/frontend/src/components/LyricsAnalyzer.tsx +53 -31
- lyrics_transcriber/frontend/src/components/LyricsSynchronizer/SyncControls.tsx +44 -13
- lyrics_transcriber/frontend/src/components/LyricsSynchronizer/TimelineCanvas.tsx +66 -50
- lyrics_transcriber/frontend/src/components/LyricsSynchronizer/index.tsx +124 -30
- lyrics_transcriber/frontend/src/components/ReferenceView.tsx +1 -1
- lyrics_transcriber/frontend/src/components/TimelineEditor.tsx +12 -5
- lyrics_transcriber/frontend/src/components/TimingOffsetModal.tsx +3 -3
- lyrics_transcriber/frontend/src/components/TranscriptionView.tsx +1 -1
- lyrics_transcriber/frontend/src/components/WordDivider.tsx +11 -7
- lyrics_transcriber/frontend/src/components/shared/components/Word.tsx +4 -2
- lyrics_transcriber/frontend/src/hooks/useManualSync.ts +103 -1
- lyrics_transcriber/frontend/src/theme.ts +42 -15
- lyrics_transcriber/frontend/tsconfig.tsbuildinfo +1 -1
- lyrics_transcriber/frontend/vite.config.js +5 -0
- lyrics_transcriber/frontend/web_assets/assets/{index-BECn1o8Q.js → index-BSMgOq4Z.js} +6959 -5782
- lyrics_transcriber/frontend/web_assets/assets/index-BSMgOq4Z.js.map +1 -0
- lyrics_transcriber/frontend/web_assets/index.html +6 -2
- lyrics_transcriber/frontend/web_assets/nomad-karaoke-logo.svg +5 -0
- lyrics_transcriber/output/generator.py +17 -3
- lyrics_transcriber/output/video.py +60 -95
- lyrics_transcriber/frontend/web_assets/assets/index-BECn1o8Q.js.map +0 -1
- {karaoke_gen-0.90.1.dist-info → karaoke_gen-0.96.0.dist-info}/WHEEL +0 -0
- {karaoke_gen-0.90.1.dist-info → karaoke_gen-0.96.0.dist-info}/entry_points.txt +0 -0
- {karaoke_gen-0.90.1.dist-info → karaoke_gen-0.96.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,683 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Lyrics transcription and correction worker.
|
|
3
|
+
|
|
4
|
+
Handles the lyrics processing track of parallel processing:
|
|
5
|
+
1. Fetch reference lyrics from multiple sources (Genius, Spotify, Musixmatch, LRCLib)
|
|
6
|
+
2. Transcribe audio with AudioShake API (1-2 min)
|
|
7
|
+
3. Run automatic correction using LyricsTranscriber
|
|
8
|
+
4. Generate corrections JSON for human review
|
|
9
|
+
5. Upload all data to GCS
|
|
10
|
+
6. Transition to AWAITING_REVIEW state
|
|
11
|
+
|
|
12
|
+
Re-uses:
|
|
13
|
+
- karaoke_gen.lyrics_processor.LyricsProcessor for lyrics fetching and orchestration
|
|
14
|
+
- lyrics_transcriber library (submodule) for transcription and correction
|
|
15
|
+
|
|
16
|
+
Observability:
|
|
17
|
+
- All operations wrapped in tracing spans for Cloud Trace visibility
|
|
18
|
+
- Logs include [job:ID] prefix for easy filtering in Cloud Logging
|
|
19
|
+
- Worker start/end timing logged with WORKER_START/WORKER_END markers
|
|
20
|
+
"""
|
|
21
|
+
import asyncio
|
|
22
|
+
import logging
|
|
23
|
+
import os
|
|
24
|
+
import shutil
|
|
25
|
+
import tempfile
|
|
26
|
+
import time
|
|
27
|
+
import json
|
|
28
|
+
from typing import Optional, Dict, Any
|
|
29
|
+
from pathlib import Path
|
|
30
|
+
|
|
31
|
+
from backend.models.job import JobStatus
|
|
32
|
+
from karaoke_gen.utils import sanitize_filename
|
|
33
|
+
from backend.services.job_manager import JobManager
|
|
34
|
+
from backend.services.storage_service import StorageService
|
|
35
|
+
from backend.services.lyrics_cache_service import LyricsCacheService
|
|
36
|
+
from backend.workers.worker_logging import create_job_logger, setup_job_logging, job_logging_context
|
|
37
|
+
from backend.workers.style_helper import load_style_config
|
|
38
|
+
from backend.services.tracing import job_span, add_span_event, add_span_attribute
|
|
39
|
+
from backend.services.metrics import metrics
|
|
40
|
+
|
|
41
|
+
# Import from karaoke_gen package
|
|
42
|
+
from karaoke_gen.lyrics_processor import LyricsProcessor
|
|
43
|
+
from backend.config import get_settings
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
logger = logging.getLogger(__name__)
|
|
47
|
+
|
|
48
|
+
# Timeout for entire transcription process including agentic AI correction (20 minutes)
|
|
49
|
+
# This needs to account for:
|
|
50
|
+
# - Cloud Run cold start / worker initialization (1-5 min)
|
|
51
|
+
# - AudioShake transcription (1-2 min)
|
|
52
|
+
# - spaCy model loading for correction (2-3 min on cold start)
|
|
53
|
+
# - Agentic AI correction (1-3 min)
|
|
54
|
+
TRANSCRIPTION_TIMEOUT_SECONDS = 1200
|
|
55
|
+
|
|
56
|
+
# Default agentic correction timeout (3 minutes)
|
|
57
|
+
# Configurable via AGENTIC_CORRECTION_TIMEOUT_SECONDS environment variable
|
|
58
|
+
# If agentic AI takes longer than this, abort and use uncorrected transcription
|
|
59
|
+
# Human review will correct any issues
|
|
60
|
+
DEFAULT_AGENTIC_TIMEOUT_SECONDS = 180
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _configure_agentic_ai():
|
|
64
|
+
"""Configure environment variables for agentic AI correction.
|
|
65
|
+
|
|
66
|
+
The lyrics_transcriber library reads these directly from environment.
|
|
67
|
+
This ensures the settings from backend config are available.
|
|
68
|
+
"""
|
|
69
|
+
settings = get_settings()
|
|
70
|
+
|
|
71
|
+
# Enable agentic AI if configured
|
|
72
|
+
if settings.use_agentic_ai:
|
|
73
|
+
os.environ["USE_AGENTIC_AI"] = "1"
|
|
74
|
+
os.environ["AGENTIC_AI_MODEL"] = settings.agentic_ai_model
|
|
75
|
+
logger.info(f"Agentic AI enabled with model: {settings.agentic_ai_model}")
|
|
76
|
+
else:
|
|
77
|
+
os.environ["USE_AGENTIC_AI"] = "0"
|
|
78
|
+
logger.info("Agentic AI disabled")
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
# Loggers to capture for lyrics worker (top-level package loggers only)
|
|
82
|
+
# Note: We only capture at top-level package loggers to avoid duplicate log entries.
|
|
83
|
+
# Logs from sub-modules (e.g., lyrics_transcriber.correction.anchor_sequence) will
|
|
84
|
+
# propagate up to their parent logger (lyrics_transcriber) where they get captured.
|
|
85
|
+
LYRICS_WORKER_LOGGERS = [
|
|
86
|
+
"karaoke_gen.lyrics_processor",
|
|
87
|
+
"lyrics_transcriber",
|
|
88
|
+
]
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def create_lyrics_processor(
|
|
92
|
+
style_params_json: Optional[str] = None,
|
|
93
|
+
lyrics_file: Optional[str] = None,
|
|
94
|
+
subtitle_offset_ms: int = 0
|
|
95
|
+
) -> LyricsProcessor:
|
|
96
|
+
"""
|
|
97
|
+
Create a LyricsProcessor instance configured for Cloud Run processing.
|
|
98
|
+
|
|
99
|
+
This reuses the karaoke_gen LyricsProcessor with settings optimized for Cloud Run:
|
|
100
|
+
- Uses AudioShake API for transcription (via AUDIOSHAKE_API_TOKEN env var)
|
|
101
|
+
- Uses Genius/Spotify/Musixmatch APIs for reference lyrics (via env vars)
|
|
102
|
+
- Skips interactive review (will be handled by separate React UI)
|
|
103
|
+
- Generates corrections JSON for review interface
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
style_params_json: Optional path to style parameters JSON file
|
|
107
|
+
lyrics_file: Optional path to user-provided lyrics file (overrides API fetch)
|
|
108
|
+
subtitle_offset_ms: Offset for subtitle timing in milliseconds
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
Configured LyricsProcessor instance
|
|
112
|
+
"""
|
|
113
|
+
# Configure logger for LyricsProcessor
|
|
114
|
+
lyrics_logger = logging.getLogger("karaoke_gen.lyrics_processor")
|
|
115
|
+
lyrics_logger.setLevel(logging.INFO)
|
|
116
|
+
|
|
117
|
+
return LyricsProcessor(
|
|
118
|
+
logger=lyrics_logger,
|
|
119
|
+
style_params_json=style_params_json,
|
|
120
|
+
lyrics_file=lyrics_file, # Use user-provided lyrics if available
|
|
121
|
+
skip_transcription=False, # We want transcription
|
|
122
|
+
skip_transcription_review=True, # Skip interactive review (use React UI instead)
|
|
123
|
+
render_video=False, # Skip video generation for now (will be done after review)
|
|
124
|
+
subtitle_offset_ms=subtitle_offset_ms
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
async def process_lyrics_transcription(job_id: str) -> bool:
|
|
129
|
+
"""
|
|
130
|
+
Process lyrics transcription and correction for a job using karaoke_gen.LyricsProcessor.
|
|
131
|
+
|
|
132
|
+
This is the main entry point for the lyrics worker.
|
|
133
|
+
Called asynchronously from the job submission endpoint.
|
|
134
|
+
|
|
135
|
+
Runs in parallel with audio_worker, coordinated via job state.
|
|
136
|
+
|
|
137
|
+
Workflow:
|
|
138
|
+
1. Download audio from GCS
|
|
139
|
+
2. Create LyricsProcessor instance
|
|
140
|
+
3. Run transcription (calls AudioShake API internally)
|
|
141
|
+
4. Fetch reference lyrics from Genius/Spotify/Musixmatch/LRCLib
|
|
142
|
+
5. Run automatic correction using lyrics_transcriber library
|
|
143
|
+
6. Generate corrections JSON for review interface
|
|
144
|
+
7. Upload results to GCS
|
|
145
|
+
8. Transition to AWAITING_REVIEW state
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
job_id: Job ID to process
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
True if successful, False otherwise
|
|
152
|
+
"""
|
|
153
|
+
start_time = time.time()
|
|
154
|
+
job_manager = JobManager()
|
|
155
|
+
storage = StorageService()
|
|
156
|
+
|
|
157
|
+
# Configure agentic AI before any lyrics processing
|
|
158
|
+
_configure_agentic_ai()
|
|
159
|
+
|
|
160
|
+
# Create job logger for remote debugging FIRST
|
|
161
|
+
job_log = create_job_logger(job_id, "lyrics")
|
|
162
|
+
|
|
163
|
+
# Log with structured markers for easy Cloud Logging queries
|
|
164
|
+
logger.info(f"[job:{job_id}] WORKER_START worker=lyrics")
|
|
165
|
+
job_log.info("=== LYRICS WORKER STARTED ===")
|
|
166
|
+
job_log.info(f"Job ID: {job_id}")
|
|
167
|
+
|
|
168
|
+
# Set up log capture for LyricsTranscriber and its dependencies
|
|
169
|
+
# This ensures logs from the lyrics_transcriber library are also captured
|
|
170
|
+
log_handler = setup_job_logging(job_id, "lyrics", *LYRICS_WORKER_LOGGERS)
|
|
171
|
+
job_log.info(f"Log handler attached for {len(LYRICS_WORKER_LOGGERS)} loggers")
|
|
172
|
+
|
|
173
|
+
# Initialize temp_dir before try block so finally can check it
|
|
174
|
+
temp_dir = None
|
|
175
|
+
|
|
176
|
+
try:
|
|
177
|
+
# Wrap entire worker in a tracing span
|
|
178
|
+
with job_span("lyrics-worker", job_id) as root_span:
|
|
179
|
+
# Use job_logging_context for proper log isolation when multiple jobs run concurrently
|
|
180
|
+
# This ensures logs from third-party libraries (lyrics_transcriber) are only captured
|
|
181
|
+
# by this job's handler, not handlers from other concurrent jobs
|
|
182
|
+
with job_logging_context(job_id):
|
|
183
|
+
job = job_manager.get_job(job_id)
|
|
184
|
+
if not job:
|
|
185
|
+
logger.error(f"[job:{job_id}] Job not found in Firestore")
|
|
186
|
+
job_log.error(f"Job {job_id} not found in Firestore!")
|
|
187
|
+
return False
|
|
188
|
+
|
|
189
|
+
add_span_attribute("artist", job.artist)
|
|
190
|
+
add_span_attribute("title", job.title)
|
|
191
|
+
|
|
192
|
+
# Create temporary working directory
|
|
193
|
+
temp_dir = tempfile.mkdtemp(prefix=f"karaoke_lyrics_{job_id}_")
|
|
194
|
+
job_log.info(f"Created temp directory: {temp_dir}")
|
|
195
|
+
job_log.info(f"Starting lyrics transcription for {job.artist} - {job.title}")
|
|
196
|
+
job_log.info(f"Log capture enabled for: {', '.join(LYRICS_WORKER_LOGGERS)}")
|
|
197
|
+
logger.info(f"[job:{job_id}] Starting lyrics transcription for {job.artist} - {job.title}")
|
|
198
|
+
|
|
199
|
+
# Log environment configuration
|
|
200
|
+
with job_span("check-api-config", job_id):
|
|
201
|
+
job_log.info("Checking API configuration...")
|
|
202
|
+
apis_configured = []
|
|
203
|
+
if os.environ.get("GENIUS_API_TOKEN"):
|
|
204
|
+
job_log.info(" Genius API: configured")
|
|
205
|
+
apis_configured.append("genius")
|
|
206
|
+
else:
|
|
207
|
+
job_log.warning(" Genius API: NOT configured")
|
|
208
|
+
if os.environ.get("SPOTIFY_COOKIE_SP_DC"):
|
|
209
|
+
job_log.info(" Spotify: configured")
|
|
210
|
+
apis_configured.append("spotify")
|
|
211
|
+
else:
|
|
212
|
+
job_log.warning(" Spotify: NOT configured")
|
|
213
|
+
if os.environ.get("RAPIDAPI_KEY"):
|
|
214
|
+
job_log.info(" Musixmatch (RapidAPI): configured")
|
|
215
|
+
apis_configured.append("musixmatch")
|
|
216
|
+
else:
|
|
217
|
+
job_log.warning(" Musixmatch (RapidAPI): NOT configured")
|
|
218
|
+
add_span_attribute("lyrics_apis", ",".join(apis_configured))
|
|
219
|
+
|
|
220
|
+
# Ensure required environment variables are set
|
|
221
|
+
if not os.environ.get("AUDIOSHAKE_API_TOKEN"):
|
|
222
|
+
job_log.warning("AUDIOSHAKE_API_TOKEN not set - transcription may fail")
|
|
223
|
+
logger.warning(f"[job:{job_id}] AUDIOSHAKE_API_TOKEN not set - transcription may fail")
|
|
224
|
+
|
|
225
|
+
# Download audio file from GCS (waits for audio worker if URL job)
|
|
226
|
+
with job_span("download-audio", job_id) as download_span:
|
|
227
|
+
job_log.info("Downloading audio file from GCS...")
|
|
228
|
+
audio_path = await download_audio(job_id, temp_dir, storage, job, job_manager)
|
|
229
|
+
if not audio_path:
|
|
230
|
+
raise Exception("Failed to download audio file")
|
|
231
|
+
job_log.info(f"Audio downloaded: {os.path.basename(audio_path)}")
|
|
232
|
+
download_span.set_attribute("audio_file", os.path.basename(audio_path))
|
|
233
|
+
|
|
234
|
+
# Set up LyricsTranscriber cache directory and sync from GCS
|
|
235
|
+
# This allows reusing cached AudioShake/lyrics API responses across Cloud Run instances
|
|
236
|
+
cache_dir = os.path.join(temp_dir, "lyrics-cache")
|
|
237
|
+
os.makedirs(cache_dir, exist_ok=True)
|
|
238
|
+
os.environ["LYRICS_TRANSCRIBER_CACHE_DIR"] = cache_dir
|
|
239
|
+
job_log.info(f"LyricsTranscriber cache dir: {cache_dir}")
|
|
240
|
+
|
|
241
|
+
# Sync cache from GCS (download any existing cached API responses)
|
|
242
|
+
# Note: Cache sync is best-effort - failures should not fail the job
|
|
243
|
+
cache_service = LyricsCacheService(storage)
|
|
244
|
+
audio_hash = None
|
|
245
|
+
lyrics_hash = None
|
|
246
|
+
with job_span("sync-cache-from-gcs", job_id):
|
|
247
|
+
try:
|
|
248
|
+
# Compute cache keys
|
|
249
|
+
audio_hash = cache_service.compute_audio_hash(audio_path)
|
|
250
|
+
# Use lyrics_artist/lyrics_title for lyrics hash (these are what LyricsTranscriber uses)
|
|
251
|
+
lyrics_search_artist = getattr(job, 'lyrics_artist', None) or job.artist
|
|
252
|
+
lyrics_search_title = getattr(job, 'lyrics_title', None) or job.title
|
|
253
|
+
lyrics_hash = cache_service.compute_lyrics_hash(lyrics_search_artist, lyrics_search_title)
|
|
254
|
+
|
|
255
|
+
job_log.info(f"Cache keys: audio_hash={audio_hash[:8]}..., lyrics_hash={lyrics_hash[:8]}...")
|
|
256
|
+
add_span_attribute("audio_hash", audio_hash[:8])
|
|
257
|
+
add_span_attribute("lyrics_hash", lyrics_hash[:8])
|
|
258
|
+
|
|
259
|
+
# Download relevant cache files from GCS
|
|
260
|
+
cache_stats = cache_service.sync_cache_from_gcs(cache_dir, audio_hash, lyrics_hash)
|
|
261
|
+
job_log.info(f"Cache sync from GCS: {cache_stats['downloaded']} downloaded, {cache_stats['not_found']} not found")
|
|
262
|
+
add_span_attribute("cache_hits", cache_stats["downloaded"])
|
|
263
|
+
except Exception as e:
|
|
264
|
+
job_log.warning(f"Cache sync from GCS failed (non-fatal): {e}", exc_info=True)
|
|
265
|
+
add_span_attribute("cache_sync_error", str(e)[:100])
|
|
266
|
+
|
|
267
|
+
# Update progress using state_data (don't change status during parallel processing)
|
|
268
|
+
# The status is managed at a higher level - workers just track their progress
|
|
269
|
+
job_manager.update_state_data(job_id, 'lyrics_progress', {
|
|
270
|
+
'stage': 'transcribing',
|
|
271
|
+
'progress': 10,
|
|
272
|
+
'message': 'Starting lyrics transcription via AudioShake'
|
|
273
|
+
})
|
|
274
|
+
|
|
275
|
+
# Load style configuration (downloads assets from GCS if available)
|
|
276
|
+
# This is needed for max_line_length and other style settings in LyricsTranscriber
|
|
277
|
+
with job_span("load-style-config", job_id):
|
|
278
|
+
job_log.info("Loading style configuration for lyrics processing...")
|
|
279
|
+
style_config = await load_style_config(job, storage, temp_dir)
|
|
280
|
+
style_params_json_path = style_config.get_style_params_path() if style_config.has_custom_styles() else None
|
|
281
|
+
|
|
282
|
+
if style_params_json_path:
|
|
283
|
+
job_log.info(f"Using custom style params: {style_params_json_path}")
|
|
284
|
+
# Log the contents of the style params for debugging
|
|
285
|
+
try:
|
|
286
|
+
with open(style_params_json_path, 'r') as f:
|
|
287
|
+
style_content = json.load(f)
|
|
288
|
+
job_log.info(f"Style params sections: {list(style_content.keys())}")
|
|
289
|
+
if 'karaoke' in style_content:
|
|
290
|
+
karaoke_style = style_content['karaoke']
|
|
291
|
+
job_log.info(f" karaoke.background_image: {karaoke_style.get('background_image', 'NOT SET')}")
|
|
292
|
+
job_log.info(f" karaoke.font_path: {karaoke_style.get('font_path', 'NOT SET')}")
|
|
293
|
+
except Exception as e:
|
|
294
|
+
job_log.warning(f"Could not read style params for logging: {e}")
|
|
295
|
+
else:
|
|
296
|
+
job_log.info("No custom style params found, using defaults")
|
|
297
|
+
|
|
298
|
+
# Download user-provided lyrics file if available
|
|
299
|
+
lyrics_file_path = None
|
|
300
|
+
if hasattr(job, 'lyrics_file_gcs_path') and job.lyrics_file_gcs_path:
|
|
301
|
+
with job_span("download-user-lyrics", job_id):
|
|
302
|
+
lyrics_file_path = os.path.join(temp_dir, "user_lyrics.txt")
|
|
303
|
+
job_log.info(f"Downloading user-provided lyrics file: {job.lyrics_file_gcs_path}")
|
|
304
|
+
storage.download_file(job.lyrics_file_gcs_path, lyrics_file_path)
|
|
305
|
+
job_log.info(f"User lyrics file downloaded to: {lyrics_file_path}")
|
|
306
|
+
|
|
307
|
+
# Get lyrics configuration from job
|
|
308
|
+
subtitle_offset = getattr(job, 'subtitle_offset_ms', 0) or 0
|
|
309
|
+
if subtitle_offset != 0:
|
|
310
|
+
job_log.info(f"Subtitle offset: {subtitle_offset}ms")
|
|
311
|
+
add_span_attribute("subtitle_offset_ms", subtitle_offset)
|
|
312
|
+
|
|
313
|
+
# Create LyricsProcessor instance (reuses karaoke_gen code)
|
|
314
|
+
job_log.info("Creating LyricsProcessor instance...")
|
|
315
|
+
job_log.info(f" style_params_json: {style_params_json_path}")
|
|
316
|
+
job_log.info(f" lyrics_file: {lyrics_file_path}")
|
|
317
|
+
job_log.info(f" subtitle_offset_ms: {subtitle_offset}")
|
|
318
|
+
lyrics_processor = create_lyrics_processor(
|
|
319
|
+
style_params_json=style_params_json_path,
|
|
320
|
+
lyrics_file=lyrics_file_path,
|
|
321
|
+
subtitle_offset_ms=subtitle_offset
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
# Use lyrics_artist/lyrics_title overrides if provided, else fall back to job artist/title
|
|
325
|
+
lyrics_search_artist = getattr(job, 'lyrics_artist', None) or job.artist
|
|
326
|
+
lyrics_search_title = getattr(job, 'lyrics_title', None) or job.title
|
|
327
|
+
|
|
328
|
+
job_log.info("Starting LyricsTranscriber processing...")
|
|
329
|
+
job_log.info(f" Artist: {job.artist}")
|
|
330
|
+
job_log.info(f" Title: {job.title}")
|
|
331
|
+
if lyrics_search_artist != job.artist:
|
|
332
|
+
job_log.info(f" Lyrics search artist override: {lyrics_search_artist}")
|
|
333
|
+
if lyrics_search_title != job.title:
|
|
334
|
+
job_log.info(f" Lyrics search title override: {lyrics_search_title}")
|
|
335
|
+
logger.info(f"[job:{job_id}] Calling lyrics_processor.transcribe_lyrics()")
|
|
336
|
+
|
|
337
|
+
# Run transcription + correction with timeout
|
|
338
|
+
# AudioShake typically takes 1-2 minutes, but can hang.
|
|
339
|
+
with job_span("audioshake-transcription", job_id) as trans_span:
|
|
340
|
+
trans_start = time.time()
|
|
341
|
+
add_span_event("transcription_started")
|
|
342
|
+
|
|
343
|
+
# Calculate deadline for agentic correction
|
|
344
|
+
# If agentic AI is enabled, the correction loop will check this deadline
|
|
345
|
+
# and abort early if exceeded, returning uncorrected transcription for human review
|
|
346
|
+
settings = get_settings()
|
|
347
|
+
agentic_deadline = None
|
|
348
|
+
timeout_seconds = settings.agentic_correction_timeout_seconds
|
|
349
|
+
if settings.use_agentic_ai:
|
|
350
|
+
agentic_deadline = time.time() + timeout_seconds
|
|
351
|
+
job_log.info(
|
|
352
|
+
f"Agentic AI enabled with {timeout_seconds}s timeout"
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
# Run transcription in thread pool
|
|
356
|
+
# When agentic AI is enabled, wrap with outer timeout as safety net
|
|
357
|
+
# Inner deadline check in corrector.py will break out of gap loop gracefully
|
|
358
|
+
# Outer timeout catches case where single LLM call hangs for too long
|
|
359
|
+
transcription_coro = asyncio.to_thread(
|
|
360
|
+
lyrics_processor.transcribe_lyrics,
|
|
361
|
+
input_audio_wav=audio_path,
|
|
362
|
+
artist=job.artist, # Original artist for file naming
|
|
363
|
+
title=job.title, # Original title for file naming
|
|
364
|
+
track_output_dir=temp_dir,
|
|
365
|
+
lyrics_artist=lyrics_search_artist, # Override for lyrics search
|
|
366
|
+
lyrics_title=lyrics_search_title, # Override for lyrics search
|
|
367
|
+
agentic_deadline=agentic_deadline, # Deadline for agentic timeout
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
with metrics.time_external_api("audioshake", job_id):
|
|
371
|
+
if settings.use_agentic_ai:
|
|
372
|
+
# Outer timeout must be generous to allow for AudioShake time
|
|
373
|
+
# (can take 2-5+ minutes for long songs) PLUS agentic correction.
|
|
374
|
+
# The inner deadline check in corrector.py handles the 3-minute
|
|
375
|
+
# correction limit gracefully by returning partial results.
|
|
376
|
+
# This outer timeout is a safety net for completely hung LLM calls.
|
|
377
|
+
outer_timeout = TRANSCRIPTION_TIMEOUT_SECONDS # Same as non-agentic
|
|
378
|
+
try:
|
|
379
|
+
result = await asyncio.wait_for(
|
|
380
|
+
transcription_coro,
|
|
381
|
+
timeout=outer_timeout
|
|
382
|
+
)
|
|
383
|
+
except asyncio.TimeoutError:
|
|
384
|
+
# This should rarely trigger since inner deadline check breaks first
|
|
385
|
+
# But provides hard stop if e.g. single LLM call hangs completely
|
|
386
|
+
job_log.error(
|
|
387
|
+
f"HARD TIMEOUT: Transcription exceeded {outer_timeout}s. "
|
|
388
|
+
"This indicates the inner deadline check failed to trigger."
|
|
389
|
+
)
|
|
390
|
+
raise RuntimeError(
|
|
391
|
+
f"Lyrics transcription timed out after {outer_timeout}s"
|
|
392
|
+
) from None
|
|
393
|
+
else:
|
|
394
|
+
# Non-agentic mode: use general timeout (10 minutes)
|
|
395
|
+
try:
|
|
396
|
+
result = await asyncio.wait_for(
|
|
397
|
+
transcription_coro,
|
|
398
|
+
timeout=TRANSCRIPTION_TIMEOUT_SECONDS
|
|
399
|
+
)
|
|
400
|
+
except asyncio.TimeoutError:
|
|
401
|
+
raise Exception(f"Transcription timed out after {TRANSCRIPTION_TIMEOUT_SECONDS} seconds")
|
|
402
|
+
|
|
403
|
+
trans_duration = time.time() - trans_start
|
|
404
|
+
trans_span.set_attribute("duration_seconds", trans_duration)
|
|
405
|
+
add_span_event("transcription_completed", {"duration_seconds": trans_duration})
|
|
406
|
+
|
|
407
|
+
job_log.info("Transcription processing complete")
|
|
408
|
+
logger.info(f"[job:{job_id}] Transcription complete, uploading results")
|
|
409
|
+
|
|
410
|
+
# Upload lyrics results to GCS
|
|
411
|
+
with job_span("upload-lyrics-results", job_id):
|
|
412
|
+
job_log.info("Uploading lyrics results to GCS...")
|
|
413
|
+
await upload_lyrics_results(job_id, temp_dir, result, storage, job_manager, job_log)
|
|
414
|
+
|
|
415
|
+
job_log.info("All lyrics data uploaded successfully")
|
|
416
|
+
logger.info(f"[job:{job_id}] All lyrics data uploaded successfully")
|
|
417
|
+
|
|
418
|
+
# Sync new cache files to GCS (upload any newly created cache entries)
|
|
419
|
+
# This persists cache across Cloud Run instances for future jobs
|
|
420
|
+
# Note: Cache upload is best-effort - failures should not fail the job
|
|
421
|
+
with job_span("sync-cache-to-gcs", job_id):
|
|
422
|
+
try:
|
|
423
|
+
if audio_hash and lyrics_hash:
|
|
424
|
+
upload_stats = cache_service.sync_cache_to_gcs(cache_dir, audio_hash, lyrics_hash)
|
|
425
|
+
job_log.info(f"Cache sync to GCS: {upload_stats['uploaded']} uploaded, {upload_stats['skipped']} already existed")
|
|
426
|
+
add_span_attribute("cache_uploads", upload_stats["uploaded"])
|
|
427
|
+
else:
|
|
428
|
+
job_log.warning("Skipping cache upload: missing audio_hash or lyrics_hash")
|
|
429
|
+
add_span_attribute("cache_upload_skipped", "missing_hashes")
|
|
430
|
+
except Exception as e:
|
|
431
|
+
job_log.warning(f"Cache upload to GCS failed (non-fatal): {e}")
|
|
432
|
+
add_span_attribute("cache_upload_error", str(e)[:100])
|
|
433
|
+
|
|
434
|
+
# Update progress using state_data (don't change status during parallel processing)
|
|
435
|
+
job_manager.update_state_data(job_id, 'lyrics_progress', {
|
|
436
|
+
'stage': 'lyrics_complete',
|
|
437
|
+
'progress': 45,
|
|
438
|
+
'message': 'Lyrics transcription complete'
|
|
439
|
+
})
|
|
440
|
+
|
|
441
|
+
# Mark lyrics processing complete
|
|
442
|
+
# This will check if audio is also complete and transition to next stage if so
|
|
443
|
+
job_log.info("Lyrics worker complete, checking if audio is also done...")
|
|
444
|
+
job_manager.mark_lyrics_complete(job_id)
|
|
445
|
+
|
|
446
|
+
duration = time.time() - start_time
|
|
447
|
+
root_span.set_attribute("duration_seconds", duration)
|
|
448
|
+
logger.info(f"[job:{job_id}] WORKER_END worker=lyrics status=success duration={duration:.1f}s")
|
|
449
|
+
return True
|
|
450
|
+
|
|
451
|
+
except Exception as e:
|
|
452
|
+
duration = time.time() - start_time
|
|
453
|
+
job_log.error(f"Lyrics transcription failed: {str(e)}", exc_info=True)
|
|
454
|
+
logger.error(f"[job:{job_id}] WORKER_END worker=lyrics status=error duration={duration:.1f}s error={e}")
|
|
455
|
+
job_manager.mark_job_failed(
|
|
456
|
+
job_id=job_id,
|
|
457
|
+
error_message=f"Lyrics transcription failed: {str(e)}",
|
|
458
|
+
error_details={"stage": "lyrics_transcription", "error": str(e)}
|
|
459
|
+
)
|
|
460
|
+
return False
|
|
461
|
+
|
|
462
|
+
finally:
|
|
463
|
+
# Remove log handler to avoid duplicate logging on future runs
|
|
464
|
+
for logger_name in LYRICS_WORKER_LOGGERS:
|
|
465
|
+
try:
|
|
466
|
+
logging.getLogger(logger_name).removeHandler(log_handler)
|
|
467
|
+
except Exception:
|
|
468
|
+
pass
|
|
469
|
+
|
|
470
|
+
# Cleanup temporary directory (only if it was created)
|
|
471
|
+
if temp_dir and os.path.exists(temp_dir):
|
|
472
|
+
shutil.rmtree(temp_dir)
|
|
473
|
+
logger.debug(f"[job:{job_id}] Cleaned up temp directory: {temp_dir}")
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
async def download_audio(
|
|
477
|
+
job_id: str,
|
|
478
|
+
temp_dir: str,
|
|
479
|
+
storage: StorageService,
|
|
480
|
+
job,
|
|
481
|
+
job_manager: JobManager,
|
|
482
|
+
max_wait_seconds: int = 300
|
|
483
|
+
) -> Optional[str]:
|
|
484
|
+
"""
|
|
485
|
+
Download audio file from GCS to local temp directory.
|
|
486
|
+
|
|
487
|
+
For URL jobs, the audio worker downloads from YouTube first and uploads to GCS.
|
|
488
|
+
We wait for the input_media_gcs_path to be set by the audio worker.
|
|
489
|
+
|
|
490
|
+
Args:
|
|
491
|
+
job_id: Job ID
|
|
492
|
+
temp_dir: Temporary directory for download
|
|
493
|
+
storage: StorageService instance
|
|
494
|
+
job: Job object
|
|
495
|
+
job_manager: JobManager instance
|
|
496
|
+
max_wait_seconds: Maximum time to wait for audio worker to download
|
|
497
|
+
|
|
498
|
+
Returns:
|
|
499
|
+
Path to downloaded audio file, or None if failed
|
|
500
|
+
"""
|
|
501
|
+
import time
|
|
502
|
+
|
|
503
|
+
try:
|
|
504
|
+
# If input_media_gcs_path is already set, download directly
|
|
505
|
+
if job.input_media_gcs_path:
|
|
506
|
+
local_path = os.path.join(temp_dir, job.filename or "input.flac")
|
|
507
|
+
storage.download_file(job.input_media_gcs_path, local_path)
|
|
508
|
+
logger.info(f"Job {job_id}: Downloaded audio from {job.input_media_gcs_path} to {local_path}")
|
|
509
|
+
return local_path
|
|
510
|
+
|
|
511
|
+
# For URL jobs, wait for audio worker to download and upload to GCS
|
|
512
|
+
if job.url:
|
|
513
|
+
logger.info(f"Job {job_id}: Waiting for audio worker to download from URL...")
|
|
514
|
+
|
|
515
|
+
start_time = time.time()
|
|
516
|
+
poll_interval = 5 # seconds
|
|
517
|
+
|
|
518
|
+
while time.time() - start_time < max_wait_seconds:
|
|
519
|
+
# Refresh job from Firestore
|
|
520
|
+
updated_job = job_manager.get_job(job_id)
|
|
521
|
+
|
|
522
|
+
if updated_job and updated_job.input_media_gcs_path:
|
|
523
|
+
# Audio worker has uploaded the file
|
|
524
|
+
local_path = os.path.join(temp_dir, "input.flac")
|
|
525
|
+
storage.download_file(updated_job.input_media_gcs_path, local_path)
|
|
526
|
+
logger.info(f"Job {job_id}: Downloaded audio from {updated_job.input_media_gcs_path}")
|
|
527
|
+
return local_path
|
|
528
|
+
|
|
529
|
+
# Check if audio worker failed
|
|
530
|
+
if updated_job and updated_job.status == JobStatus.FAILED:
|
|
531
|
+
logger.error(f"Job {job_id}: Audio worker failed, cannot proceed with lyrics")
|
|
532
|
+
return None
|
|
533
|
+
|
|
534
|
+
# Wait before next poll - use asyncio.sleep to not block event loop
|
|
535
|
+
await asyncio.sleep(poll_interval)
|
|
536
|
+
|
|
537
|
+
logger.error(f"Job {job_id}: Timed out waiting for audio download (waited {max_wait_seconds}s)")
|
|
538
|
+
return None
|
|
539
|
+
|
|
540
|
+
logger.error(f"Job {job_id}: No input_media_gcs_path found and no URL")
|
|
541
|
+
return None
|
|
542
|
+
|
|
543
|
+
except Exception as e:
|
|
544
|
+
logger.error(f"Job {job_id}: Failed to download audio: {e}", exc_info=True)
|
|
545
|
+
return None
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
async def upload_lyrics_results(
|
|
549
|
+
job_id: str,
|
|
550
|
+
temp_dir: str,
|
|
551
|
+
transcription_result: Dict[str, Any],
|
|
552
|
+
storage: StorageService,
|
|
553
|
+
job_manager: JobManager,
|
|
554
|
+
job_log = None
|
|
555
|
+
) -> None:
|
|
556
|
+
"""
|
|
557
|
+
Upload all lyrics transcription results to GCS.
|
|
558
|
+
|
|
559
|
+
The transcription_result dict from LyricsProcessor.transcribe_lyrics() contains:
|
|
560
|
+
- lrc_filepath: Path to LRC file (timed lyrics)
|
|
561
|
+
- ass_filepath: Path to ASS file (karaoke subtitles, may be video file)
|
|
562
|
+
|
|
563
|
+
Additional files in lyrics directory:
|
|
564
|
+
- Corrections JSON (for review interface)
|
|
565
|
+
- Reference lyrics (from Genius/Spotify/Musixmatch/LRCLib)
|
|
566
|
+
- Uncorrected transcription
|
|
567
|
+
|
|
568
|
+
Args:
|
|
569
|
+
job_id: Job ID
|
|
570
|
+
temp_dir: Temporary directory with results
|
|
571
|
+
transcription_result: Result dict from LyricsProcessor
|
|
572
|
+
storage: StorageService instance
|
|
573
|
+
job_manager: JobManager instance
|
|
574
|
+
job_log: Optional JobLogger for remote debugging
|
|
575
|
+
"""
|
|
576
|
+
logger.info(f"Job {job_id}: Uploading lyrics results to GCS")
|
|
577
|
+
|
|
578
|
+
# Get job object for artist/title
|
|
579
|
+
job = job_manager.get_job(job_id)
|
|
580
|
+
|
|
581
|
+
lyrics_dir = os.path.join(temp_dir, "lyrics")
|
|
582
|
+
|
|
583
|
+
# Upload LRC file (timed lyrics)
|
|
584
|
+
if transcription_result.get("lrc_filepath") and os.path.exists(transcription_result["lrc_filepath"]):
|
|
585
|
+
gcs_path = f"jobs/{job_id}/lyrics/karaoke.lrc"
|
|
586
|
+
url = storage.upload_file(transcription_result["lrc_filepath"], gcs_path)
|
|
587
|
+
job_manager.update_file_url(job_id, 'lyrics', 'lrc', url)
|
|
588
|
+
logger.info(f"Job {job_id}: Uploaded LRC file")
|
|
589
|
+
|
|
590
|
+
# Upload corrections JSON (for review interface)
|
|
591
|
+
# LyricsProcessor saves it as "{artist} - {title} (Lyrics Corrections).json"
|
|
592
|
+
# Sanitize artist/title to prevent path injection and match LyricsProcessor's sanitization
|
|
593
|
+
safe_artist = sanitize_filename(job.artist) if job.artist else "Unknown"
|
|
594
|
+
safe_title = sanitize_filename(job.title) if job.title else "Unknown"
|
|
595
|
+
corrections_filename = f"{safe_artist} - {safe_title} (Lyrics Corrections).json"
|
|
596
|
+
corrections_file = os.path.join(lyrics_dir, corrections_filename)
|
|
597
|
+
|
|
598
|
+
# Also check for generic corrections.json (fallback)
|
|
599
|
+
if not os.path.exists(corrections_file):
|
|
600
|
+
corrections_file = os.path.join(lyrics_dir, "corrections.json")
|
|
601
|
+
|
|
602
|
+
if os.path.exists(corrections_file):
|
|
603
|
+
# Always upload as corrections.json for consistent access
|
|
604
|
+
gcs_path = f"jobs/{job_id}/lyrics/corrections.json"
|
|
605
|
+
url = storage.upload_file(corrections_file, gcs_path)
|
|
606
|
+
job_manager.update_file_url(job_id, 'lyrics', 'corrections', url)
|
|
607
|
+
logger.info(f"Job {job_id}: Uploaded corrections JSON from {corrections_file}")
|
|
608
|
+
|
|
609
|
+
# Load corrections to get metadata
|
|
610
|
+
try:
|
|
611
|
+
with open(corrections_file, 'r', encoding='utf-8') as f:
|
|
612
|
+
corrections_data = json.load(f)
|
|
613
|
+
|
|
614
|
+
# Store metadata in state_data
|
|
615
|
+
# Include countdown padding info for instrumental synchronization
|
|
616
|
+
lyrics_metadata = {
|
|
617
|
+
'segment_count': len(corrections_data.get('corrected_segments', [])),
|
|
618
|
+
'has_corrections': True,
|
|
619
|
+
'ready_for_review': True,
|
|
620
|
+
}
|
|
621
|
+
|
|
622
|
+
# Add countdown padding info from transcription result
|
|
623
|
+
# This is needed by video_worker to pad instrumentals for sync
|
|
624
|
+
if transcription_result.get("countdown_padding_added"):
|
|
625
|
+
lyrics_metadata['has_countdown_padding'] = True
|
|
626
|
+
lyrics_metadata['countdown_padding_seconds'] = transcription_result.get("countdown_padding_seconds", 3.0)
|
|
627
|
+
logger.info(f"Job {job_id}: Countdown padding detected: {lyrics_metadata['countdown_padding_seconds']}s")
|
|
628
|
+
else:
|
|
629
|
+
lyrics_metadata['has_countdown_padding'] = False
|
|
630
|
+
lyrics_metadata['countdown_padding_seconds'] = 0.0
|
|
631
|
+
|
|
632
|
+
job_manager.update_state_data(job_id, 'lyrics_metadata', lyrics_metadata)
|
|
633
|
+
except Exception as e:
|
|
634
|
+
logger.warning(f"Job {job_id}: Could not parse corrections JSON: {e}")
|
|
635
|
+
else:
|
|
636
|
+
# CRITICAL: corrections.json is required for the review UI
|
|
637
|
+
# If it's missing, the job cannot proceed to review
|
|
638
|
+
error_msg = f"No corrections JSON found at {corrections_file}. Transcription may have produced no lyrics."
|
|
639
|
+
logger.error(f"Job {job_id}: {error_msg}")
|
|
640
|
+
raise Exception(error_msg)
|
|
641
|
+
|
|
642
|
+
# Upload ALL reference lyrics files (not just first) so they're available for distribution
|
|
643
|
+
# Note: Source names use .title() so "lrclib" -> "Lrclib", "genius" -> "Genius"
|
|
644
|
+
# Use sanitized artist/title to match LyricsProcessor's file naming
|
|
645
|
+
reference_files = [
|
|
646
|
+
(f"{safe_artist} - {safe_title} (Lyrics Genius).txt", "genius"),
|
|
647
|
+
(f"{safe_artist} - {safe_title} (Lyrics Spotify).txt", "spotify"),
|
|
648
|
+
(f"{safe_artist} - {safe_title} (Lyrics Musixmatch).txt", "musixmatch"),
|
|
649
|
+
(f"{safe_artist} - {safe_title} (Lyrics Lrclib).txt", "lrclib"),
|
|
650
|
+
]
|
|
651
|
+
|
|
652
|
+
found_references = []
|
|
653
|
+
for ref_filename, source_key in reference_files:
|
|
654
|
+
ref_path = os.path.join(lyrics_dir, ref_filename)
|
|
655
|
+
if os.path.exists(ref_path):
|
|
656
|
+
# Upload with original filename to preserve proper naming
|
|
657
|
+
gcs_path = f"jobs/{job_id}/lyrics/{ref_filename}"
|
|
658
|
+
url = storage.upload_file(ref_path, gcs_path)
|
|
659
|
+
# Track in job.files so video_worker can download for distribution
|
|
660
|
+
job_manager.update_file_url(job_id, 'lyrics', f'reference_{source_key}', url)
|
|
661
|
+
if job_log:
|
|
662
|
+
job_log.info(f"Found reference lyrics from {source_key}")
|
|
663
|
+
logger.info(f"Job {job_id}: Uploaded reference lyrics: {ref_filename}")
|
|
664
|
+
found_references.append(source_key)
|
|
665
|
+
|
|
666
|
+
if not found_references:
|
|
667
|
+
if job_log:
|
|
668
|
+
job_log.warning("No reference lyrics found from any source (Genius, Spotify, Musixmatch, LRCLib)")
|
|
669
|
+
logger.warning(f"Job {job_id}: No reference lyrics found from any source")
|
|
670
|
+
else:
|
|
671
|
+
logger.info(f"Job {job_id}: Found {len(found_references)} reference lyrics sources: {found_references}")
|
|
672
|
+
|
|
673
|
+
# Upload uncorrected transcription if available (preserve original filename for distribution)
|
|
674
|
+
uncorrected_filename = f"{safe_artist} - {safe_title} (Lyrics Uncorrected).txt"
|
|
675
|
+
uncorrected_file = os.path.join(lyrics_dir, uncorrected_filename)
|
|
676
|
+
if os.path.exists(uncorrected_file):
|
|
677
|
+
gcs_path = f"jobs/{job_id}/lyrics/{uncorrected_filename}"
|
|
678
|
+
url = storage.upload_file(uncorrected_file, gcs_path)
|
|
679
|
+
job_manager.update_file_url(job_id, 'lyrics', 'uncorrected', url)
|
|
680
|
+
logger.info(f"Job {job_id}: Uploaded uncorrected transcription: {uncorrected_filename}")
|
|
681
|
+
|
|
682
|
+
logger.info(f"Job {job_id}: All lyrics results uploaded successfully")
|
|
683
|
+
|