karaoke-gen 0.96.0__py3-none-any.whl → 0.99.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. backend/api/routes/admin.py +184 -91
  2. backend/api/routes/audio_search.py +16 -6
  3. backend/api/routes/file_upload.py +57 -21
  4. backend/api/routes/health.py +65 -0
  5. backend/api/routes/jobs.py +19 -0
  6. backend/api/routes/users.py +543 -44
  7. backend/main.py +25 -1
  8. backend/services/encoding_service.py +128 -31
  9. backend/services/job_manager.py +12 -1
  10. backend/services/langfuse_preloader.py +98 -0
  11. backend/services/nltk_preloader.py +122 -0
  12. backend/services/spacy_preloader.py +65 -0
  13. backend/services/stripe_service.py +96 -0
  14. backend/tests/emulator/conftest.py +22 -1
  15. backend/tests/test_job_manager.py +25 -8
  16. backend/tests/test_jobs_api.py +11 -1
  17. backend/tests/test_spacy_preloader.py +119 -0
  18. backend/utils/test_data.py +27 -0
  19. backend/workers/screens_worker.py +16 -6
  20. {karaoke_gen-0.96.0.dist-info → karaoke_gen-0.99.3.dist-info}/METADATA +1 -1
  21. {karaoke_gen-0.96.0.dist-info → karaoke_gen-0.99.3.dist-info}/RECORD +30 -25
  22. lyrics_transcriber/correction/agentic/agent.py +17 -6
  23. lyrics_transcriber/correction/agentic/providers/langchain_bridge.py +96 -43
  24. lyrics_transcriber/correction/agentic/providers/model_factory.py +27 -6
  25. lyrics_transcriber/correction/anchor_sequence.py +151 -37
  26. lyrics_transcriber/correction/handlers/syllables_match.py +44 -2
  27. lyrics_transcriber/correction/phrase_analyzer.py +18 -0
  28. {karaoke_gen-0.96.0.dist-info → karaoke_gen-0.99.3.dist-info}/WHEEL +0 -0
  29. {karaoke_gen-0.96.0.dist-info → karaoke_gen-0.99.3.dist-info}/entry_points.txt +0 -0
  30. {karaoke_gen-0.96.0.dist-info → karaoke_gen-0.99.3.dist-info}/licenses/LICENSE +0 -0
@@ -13,6 +13,7 @@ from __future__ import annotations
13
13
 
14
14
  import logging
15
15
  import os
16
+ import threading
16
17
  import time
17
18
  from concurrent.futures import ThreadPoolExecutor, TimeoutError as FuturesTimeoutError
18
19
  from typing import List, Dict, Any, Optional
@@ -94,10 +95,80 @@ class LangChainBridge(BaseAIProvider):
94
95
  cache_dir=self._config.cache_dir,
95
96
  enabled=cache_enabled
96
97
  )
97
-
98
- # Lazy-initialized chat model
98
+
99
+ # Lazy-initialized chat model with thread-safe initialization
100
+ # Lock prevents race condition where multiple threads try to initialize simultaneously
99
101
  self._chat_model: Optional[Any] = None
100
-
102
+ self._model_init_lock = threading.Lock()
103
+
104
+ def warmup(self) -> bool:
105
+ """Eagerly initialize the chat model.
106
+
107
+ Call this after creating the bridge to avoid lazy initialization delays
108
+ when multiple threads call generate_correction_proposals() simultaneously.
109
+
110
+ Returns:
111
+ True if model was initialized successfully, False otherwise
112
+ """
113
+ if self._chat_model is not None:
114
+ logger.debug(f"🤖 Model {self._model} already initialized")
115
+ return True
116
+
117
+ logger.info(f"🤖 Warming up model {self._model}...")
118
+ # Trigger initialization by calling the initialization logic directly
119
+ try:
120
+ self._ensure_model_initialized()
121
+ return self._chat_model is not None
122
+ except Exception as e:
123
+ logger.error(f"🤖 Warmup failed for {self._model}: {e}")
124
+ return False
125
+
126
+ def _ensure_model_initialized(self) -> None:
127
+ """Ensure the chat model is initialized (thread-safe).
128
+
129
+ This method handles the lazy initialization with proper locking.
130
+ It's separated out so it can be called from both warmup() and
131
+ generate_correction_proposals().
132
+ """
133
+ if self._chat_model is not None:
134
+ return
135
+
136
+ with self._model_init_lock:
137
+ # Double-check after acquiring lock
138
+ if self._chat_model is not None:
139
+ return
140
+
141
+ timeout = self._config.initialization_timeout_seconds
142
+ logger.info(f"🤖 Initializing model {self._model} with {timeout}s timeout...")
143
+ init_start = time.time()
144
+
145
+ try:
146
+ # Use ThreadPoolExecutor for cross-platform timeout
147
+ with ThreadPoolExecutor(max_workers=1) as executor:
148
+ future = executor.submit(
149
+ self._factory.create_chat_model,
150
+ self._model,
151
+ self._config
152
+ )
153
+ try:
154
+ self._chat_model = future.result(timeout=timeout)
155
+ except FuturesTimeoutError:
156
+ raise InitializationTimeoutError(
157
+ f"Model initialization timed out after {timeout}s. "
158
+ f"This may indicate network issues or service unavailability."
159
+ ) from None
160
+
161
+ init_elapsed = time.time() - init_start
162
+ logger.info(f"🤖 Model initialized in {init_elapsed:.2f}s")
163
+
164
+ except InitializationTimeoutError:
165
+ self._circuit_breaker.record_failure(self._model)
166
+ raise
167
+ except Exception as e:
168
+ self._circuit_breaker.record_failure(self._model)
169
+ logger.error(f"🤖 Failed to initialize chat model: {e}")
170
+ raise
171
+
101
172
  def name(self) -> str:
102
173
  """Return provider name for logging."""
103
174
  return f"langchain:{self._model}"
@@ -140,46 +211,28 @@ class LangChainBridge(BaseAIProvider):
140
211
  "until": open_until
141
212
  }]
142
213
 
143
- # Step 2: Get or create chat model with initialization timeout
144
- if not self._chat_model:
145
- timeout = self._config.initialization_timeout_seconds
146
- logger.info(f"🤖 Initializing model {self._model} with {timeout}s timeout...")
147
- init_start = time.time()
148
-
149
- try:
150
- # Use ThreadPoolExecutor for cross-platform timeout
151
- with ThreadPoolExecutor(max_workers=1) as executor:
152
- future = executor.submit(
153
- self._factory.create_chat_model,
154
- self._model,
155
- self._config
156
- )
157
- try:
158
- self._chat_model = future.result(timeout=timeout)
159
- except FuturesTimeoutError:
160
- raise InitializationTimeoutError(
161
- f"Model initialization timed out after {timeout}s. "
162
- f"This may indicate network issues or service unavailability."
163
- ) from None
164
-
165
- init_elapsed = time.time() - init_start
166
- logger.info(f"🤖 Model initialized in {init_elapsed:.2f}s")
167
-
168
- except InitializationTimeoutError as e:
169
- self._circuit_breaker.record_failure(self._model)
170
- logger.exception("🤖 Model initialization timeout")
171
- return [{
172
- "error": INIT_TIMEOUT_ERROR,
173
- "message": str(e),
174
- "timeout_seconds": timeout
175
- }]
176
- except Exception as e:
177
- self._circuit_breaker.record_failure(self._model)
178
- logger.error(f"🤖 Failed to initialize chat model: {e}")
179
- return [{
180
- "error": MODEL_INIT_ERROR,
181
- "message": str(e)
182
- }]
214
+ # Step 2: Get or create chat model with thread-safe initialization
215
+ # Use double-checked locking to avoid race condition where multiple threads
216
+ # all try to initialize the model simultaneously (which caused job 2ccbdf6b
217
+ # to have 5 concurrent model initializations and 6+ minute delays)
218
+ #
219
+ # NOTE: For best performance, call warmup() after creating the bridge to
220
+ # eagerly initialize the model before parallel processing begins.
221
+ try:
222
+ self._ensure_model_initialized()
223
+ except InitializationTimeoutError as e:
224
+ logger.exception("🤖 Model initialization timeout")
225
+ return [{
226
+ "error": INIT_TIMEOUT_ERROR,
227
+ "message": str(e),
228
+ "timeout_seconds": self._config.initialization_timeout_seconds
229
+ }]
230
+ except Exception as e:
231
+ logger.error(f"🤖 Failed to initialize chat model: {e}")
232
+ return [{
233
+ "error": MODEL_INIT_ERROR,
234
+ "message": str(e)
235
+ }]
183
236
 
184
237
  # Step 3: Execute with retry logic
185
238
  logger.info(
@@ -10,6 +10,14 @@ from .config import ProviderConfig
10
10
 
11
11
  logger = logging.getLogger(__name__)
12
12
 
13
+ # Try to import Langfuse preloader (may not exist in standalone library usage)
14
+ try:
15
+ from backend.services.langfuse_preloader import get_preloaded_langfuse_handler
16
+
17
+ _HAS_LANGFUSE_PRELOADER = True
18
+ except ImportError:
19
+ _HAS_LANGFUSE_PRELOADER = False
20
+
13
21
  # Error message constant for TRY003 compliance
14
22
  GOOGLE_API_KEY_MISSING_ERROR = (
15
23
  "GOOGLE_API_KEY environment variable is required for Google/Gemini models. "
@@ -87,25 +95,38 @@ class ModelFactory:
87
95
 
88
96
  def _initialize_langfuse(self, model_spec: str) -> None:
89
97
  """Initialize Langfuse callback handler if keys are present.
90
-
98
+
99
+ First tries to use a preloaded handler (to avoid 200+ second init delay
100
+ on Cloud Run cold starts), then falls back to creating a new one.
101
+
91
102
  Langfuse reads credentials from environment variables automatically:
92
103
  - LANGFUSE_PUBLIC_KEY
93
- - LANGFUSE_SECRET_KEY
104
+ - LANGFUSE_SECRET_KEY
94
105
  - LANGFUSE_HOST (optional)
95
-
106
+
96
107
  Args:
97
108
  model_spec: Model specification for logging
98
-
109
+
99
110
  Raises:
100
111
  RuntimeError: If Langfuse keys are set but initialization fails
101
112
  """
102
113
  public_key = os.getenv("LANGFUSE_PUBLIC_KEY")
103
114
  secret_key = os.getenv("LANGFUSE_SECRET_KEY")
104
-
115
+
105
116
  if not (public_key and secret_key):
106
117
  logger.debug("🤖 Langfuse keys not found, tracing disabled")
107
118
  return
108
-
119
+
120
+ # Try to use preloaded handler first (avoids 200+ second delay on Cloud Run)
121
+ if _HAS_LANGFUSE_PRELOADER:
122
+ preloaded = get_preloaded_langfuse_handler()
123
+ if preloaded is not None:
124
+ logger.info(f"🤖 Using preloaded Langfuse handler for {model_spec}")
125
+ self._langfuse_handler = preloaded
126
+ return
127
+
128
+ # Fall back to creating new handler
129
+ logger.info(f"🤖 Initializing Langfuse handler (not preloaded) for {model_spec}...")
109
130
  try:
110
131
  from langfuse.langchain import CallbackHandler
111
132
 
@@ -32,19 +32,24 @@ class AnchorSequenceFinder:
32
32
  progress_check_interval: int = 50, # Check progress every N iterations
33
33
  logger: Optional[logging.Logger] = None,
34
34
  ):
35
+ init_start = time.time()
35
36
  self.min_sequence_length = min_sequence_length
36
37
  self.min_sources = min_sources
37
38
  self.timeout_seconds = timeout_seconds
38
39
  self.max_iterations_per_ngram = max_iterations_per_ngram
39
40
  self.progress_check_interval = progress_check_interval
40
41
  self.logger = logger or logging.getLogger(__name__)
42
+
43
+ self.logger.info("Initializing AnchorSequenceFinder...")
41
44
  self.phrase_analyzer = PhraseAnalyzer(logger=self.logger)
42
45
  self.used_positions = {}
43
46
 
44
47
  # Initialize cache directory
45
48
  self.cache_dir = Path(cache_dir)
46
49
  self.cache_dir.mkdir(parents=True, exist_ok=True)
47
- self.logger.info(f"Initialized AnchorSequenceFinder with cache dir: {self.cache_dir}, timeout: {timeout_seconds}s")
50
+
51
+ init_elapsed = time.time() - init_start
52
+ self.logger.info(f"Initialized AnchorSequenceFinder in {init_elapsed:.2f}s (cache: {self.cache_dir}, timeout: {timeout_seconds}s)")
48
53
 
49
54
  def _check_timeout(self, start_time: float, operation_name: str = "operation"):
50
55
  """Check if timeout has occurred and raise exception if so."""
@@ -245,6 +250,65 @@ class AnchorSequenceFinder:
245
250
  self.logger.error(f"Unexpected error loading cache: {type(e).__name__}: {e}")
246
251
  return None
247
252
 
253
+ def _process_ngram_length_no_state(
254
+ self,
255
+ n: int,
256
+ trans_words: List[str],
257
+ all_words: List[Word],
258
+ ref_texts_clean: Dict[str, List[str]],
259
+ ref_words: Dict[str, List[Word]],
260
+ min_sources: int,
261
+ ) -> List[AnchorSequence]:
262
+ """Process a single n-gram length without modifying shared state (thread-safe).
263
+
264
+ This version doesn't track used positions - overlap filtering happens later
265
+ in _remove_overlapping_sequences. This allows parallel processing of different
266
+ n-gram lengths.
267
+ """
268
+ candidate_anchors = []
269
+
270
+ # Build hash-based index for O(1) lookups
271
+ ngram_index = self._build_ngram_index(ref_texts_clean, n)
272
+
273
+ # Generate n-grams from transcribed text
274
+ trans_ngrams = self._find_ngrams(trans_words, n)
275
+
276
+ # Single pass through all transcription n-grams
277
+ for ngram, trans_pos in trans_ngrams:
278
+ # Use indexed lookup (O(1) instead of O(n))
279
+ ngram_tuple = tuple(ngram)
280
+ if ngram_tuple not in ngram_index:
281
+ continue
282
+
283
+ # Find matches in all sources (no used_positions check - handled later)
284
+ matches = {}
285
+ source_positions = ngram_index[ngram_tuple]
286
+ for source, positions in source_positions.items():
287
+ if positions:
288
+ matches[source] = positions[0] # Take first position
289
+
290
+ if len(matches) >= min_sources:
291
+ # Get Word IDs for transcribed words
292
+ transcribed_word_ids = [w.id for w in all_words[trans_pos : trans_pos + n]]
293
+
294
+ # Get Word IDs for reference words
295
+ reference_word_ids = {
296
+ source: [w.id for w in ref_words[source][pos : pos + n]]
297
+ for source, pos in matches.items()
298
+ }
299
+
300
+ anchor = AnchorSequence(
301
+ id=WordUtils.generate_id(),
302
+ transcribed_word_ids=transcribed_word_ids,
303
+ transcription_position=trans_pos,
304
+ reference_positions=matches,
305
+ reference_word_ids=reference_word_ids,
306
+ confidence=len(matches) / len(ref_texts_clean),
307
+ )
308
+ candidate_anchors.append(anchor)
309
+
310
+ return candidate_anchors
311
+
248
312
  def _process_ngram_length(
249
313
  self,
250
314
  n: int,
@@ -408,45 +472,95 @@ class AnchorSequenceFinder:
408
472
  min_sources=self.min_sources,
409
473
  )
410
474
 
411
- # Process n-gram lengths sequentially (single-threaded for cloud compatibility)
475
+ # Process n-gram lengths in parallel for better performance
476
+ # The overlap filtering at the end handles deduplication, so we don't
477
+ # need to track used_positions during processing
412
478
  candidate_anchors = []
413
-
479
+
414
480
  # Check timeout before processing
415
481
  self._check_timeout(start_time, "n-gram processing start")
416
- self.logger.info(f"🔍 ANCHOR SEARCH: Starting sequential n-gram processing ({len(n_gram_lengths)} lengths)")
417
-
418
- batch_size = 10
419
- batch_results = []
420
-
421
- for i, n in enumerate(n_gram_lengths):
422
- try:
423
- # Check timeout periodically
424
- if self.timeout_seconds > 0:
425
- elapsed_time = time.time() - start_time
426
- if elapsed_time > self.timeout_seconds:
427
- self.logger.warning(f"🔍 ANCHOR SEARCH: ⏰ Timeout reached at n-gram {n}, stopping")
428
- break
429
-
430
- anchors = self._process_ngram_length(
431
- n, trans_words, all_words, ref_texts_clean, ref_words, self.min_sources
432
- )
433
- candidate_anchors.extend(anchors)
434
-
435
- # Batch logging
436
- batch_results.append((n, len(anchors)))
437
-
438
- # Log progress every batch_size results or on the last result
439
- if (i + 1) % batch_size == 0 or (i + 1) == len(n_gram_lengths):
440
- total_anchors_in_batch = sum(anchor_count for _, anchor_count in batch_results)
441
- n_gram_ranges = [str(ng) for ng, _ in batch_results]
442
- range_str = f"{n_gram_ranges[0]}-{n_gram_ranges[-1]}" if len(n_gram_ranges) > 1 else n_gram_ranges[0]
443
- self.logger.debug(f"🔍 ANCHOR SEARCH: Completed n-gram lengths {range_str} - found {total_anchors_in_batch} anchors")
444
- batch_results = []
445
-
446
- except Exception as e:
447
- self.logger.warning(f"🔍 ANCHOR SEARCH: ⚠️ n-gram length {n} failed: {str(e)}")
448
- batch_results.append((n, 0))
449
- continue
482
+
483
+ # Determine parallelization strategy
484
+ import os
485
+ from concurrent.futures import ThreadPoolExecutor, as_completed
486
+
487
+ # Use parallel processing by default, can be disabled via env var
488
+ use_parallel = os.getenv("ANCHOR_SEARCH_SEQUENTIAL", "0").lower() not in {"1", "true", "yes"}
489
+ max_workers = int(os.getenv("ANCHOR_SEARCH_WORKERS", "4"))
490
+
491
+ if use_parallel and len(n_gram_lengths) > 1:
492
+ self.logger.info(f"🔍 ANCHOR SEARCH: Starting PARALLEL n-gram processing ({len(n_gram_lengths)} lengths, {max_workers} workers)")
493
+
494
+ # Process in parallel - each n-gram length is independent
495
+ # since we don't track used_positions during processing
496
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
497
+ # Submit all tasks
498
+ future_to_n = {
499
+ executor.submit(
500
+ self._process_ngram_length_no_state,
501
+ n, trans_words, all_words, ref_texts_clean, ref_words, self.min_sources
502
+ ): n
503
+ for n in n_gram_lengths
504
+ }
505
+
506
+ completed = 0
507
+ for future in as_completed(future_to_n):
508
+ n = future_to_n[future]
509
+ completed += 1
510
+
511
+ # Check timeout periodically
512
+ if self.timeout_seconds > 0:
513
+ elapsed_time = time.time() - start_time
514
+ if elapsed_time > self.timeout_seconds:
515
+ self.logger.warning(f"🔍 ANCHOR SEARCH: ⏰ Timeout reached, stopping ({completed}/{len(n_gram_lengths)} completed)")
516
+ # Cancel remaining futures
517
+ for f in future_to_n.keys():
518
+ f.cancel()
519
+ break
520
+
521
+ try:
522
+ anchors = future.result()
523
+ candidate_anchors.extend(anchors)
524
+ if completed % 20 == 0:
525
+ self.logger.debug(f"🔍 ANCHOR SEARCH: Progress {completed}/{len(n_gram_lengths)} lengths processed")
526
+ except Exception as e:
527
+ self.logger.warning(f"🔍 ANCHOR SEARCH: ⚠️ n-gram length {n} failed: {str(e)}")
528
+ else:
529
+ # Sequential fallback
530
+ self.logger.info(f"🔍 ANCHOR SEARCH: Starting sequential n-gram processing ({len(n_gram_lengths)} lengths)")
531
+
532
+ batch_size = 10
533
+ batch_results = []
534
+
535
+ for i, n in enumerate(n_gram_lengths):
536
+ try:
537
+ # Check timeout periodically
538
+ if self.timeout_seconds > 0:
539
+ elapsed_time = time.time() - start_time
540
+ if elapsed_time > self.timeout_seconds:
541
+ self.logger.warning(f"🔍 ANCHOR SEARCH: ⏰ Timeout reached at n-gram {n}, stopping")
542
+ break
543
+
544
+ anchors = self._process_ngram_length(
545
+ n, trans_words, all_words, ref_texts_clean, ref_words, self.min_sources
546
+ )
547
+ candidate_anchors.extend(anchors)
548
+
549
+ # Batch logging
550
+ batch_results.append((n, len(anchors)))
551
+
552
+ # Log progress every batch_size results or on the last result
553
+ if (i + 1) % batch_size == 0 or (i + 1) == len(n_gram_lengths):
554
+ total_anchors_in_batch = sum(anchor_count for _, anchor_count in batch_results)
555
+ n_gram_ranges = [str(ng) for ng, _ in batch_results]
556
+ range_str = f"{n_gram_ranges[0]}-{n_gram_ranges[-1]}" if len(n_gram_ranges) > 1 else n_gram_ranges[0]
557
+ self.logger.debug(f"🔍 ANCHOR SEARCH: Completed n-gram lengths {range_str} - found {total_anchors_in_batch} anchors")
558
+ batch_results = []
559
+
560
+ except Exception as e:
561
+ self.logger.warning(f"🔍 ANCHOR SEARCH: ⚠️ n-gram length {n} failed: {str(e)}")
562
+ batch_results.append((n, 0))
563
+ continue
450
564
 
451
565
  self.logger.info(f"🔍 ANCHOR SEARCH: ✅ Found {len(candidate_anchors)} candidate anchors in {time.time() - start_time:.1f}s")
452
566
 
@@ -1,6 +1,7 @@
1
1
  from typing import List, Tuple, Dict, Any, Optional
2
2
  import spacy
3
3
  import logging
4
+ import time
4
5
  import pyphen
5
6
  import nltk
6
7
  from nltk.corpus import cmudict
@@ -11,6 +12,15 @@ from lyrics_transcriber.types import GapSequence, WordCorrection
11
12
  from lyrics_transcriber.correction.handlers.base import GapCorrectionHandler
12
13
  from lyrics_transcriber.correction.handlers.word_operations import WordOperations
13
14
 
15
+ # Try to import preloaders (may not exist in standalone library usage)
16
+ try:
17
+ from backend.services.spacy_preloader import get_preloaded_model
18
+ from backend.services.nltk_preloader import get_preloaded_cmudict
19
+
20
+ _HAS_PRELOADER = True
21
+ except ImportError:
22
+ _HAS_PRELOADER = False
23
+
14
24
 
15
25
  class SyllablesMatchHandler(GapCorrectionHandler):
16
26
  """Handles gaps where number of syllables in reference text matches number of syllables in transcription."""
@@ -18,11 +28,27 @@ class SyllablesMatchHandler(GapCorrectionHandler):
18
28
  def __init__(self, logger: Optional[logging.Logger] = None):
19
29
  super().__init__(logger)
20
30
  self.logger = logger or logging.getLogger(__name__)
31
+ init_start = time.time()
21
32
 
22
33
  # Marking SpacySyllables as used to prevent unused import warning
23
34
  _ = SpacySyllables
24
35
 
25
- # Load spacy model with syllables pipeline
36
+ # Try to use preloaded model first (avoids 60+ second load on Cloud Run)
37
+ if _HAS_PRELOADER:
38
+ preloaded = get_preloaded_model("en_core_web_sm")
39
+ if preloaded is not None:
40
+ self.logger.info("Using preloaded SpaCy model for syllable analysis")
41
+ self.nlp = preloaded
42
+ # Add syllables component if not already present
43
+ if "syllables" not in self.nlp.pipe_names:
44
+ self.nlp.add_pipe("syllables", after="tagger")
45
+ self._init_nltk_resources()
46
+ init_elapsed = time.time() - init_start
47
+ self.logger.info(f"Initialized SyllablesMatchHandler in {init_elapsed:.2f}s (preloaded)")
48
+ return
49
+
50
+ # Fall back to loading model directly
51
+ self.logger.info("Loading SpaCy model for syllable analysis (not preloaded)...")
26
52
  try:
27
53
  self.nlp = spacy.load("en_core_web_sm")
28
54
  except OSError:
@@ -43,10 +69,26 @@ class SyllablesMatchHandler(GapCorrectionHandler):
43
69
  if "syllables" not in self.nlp.pipe_names:
44
70
  self.nlp.add_pipe("syllables", after="tagger")
45
71
 
72
+ self._init_nltk_resources()
73
+ init_elapsed = time.time() - init_start
74
+ self.logger.info(f"Initialized SyllablesMatchHandler in {init_elapsed:.2f}s (lazy loaded)")
75
+
76
+ def _init_nltk_resources(self):
77
+ """Initialize NLTK resources (Pyphen and CMU dictionary)."""
78
+
46
79
  # Initialize Pyphen for English
47
80
  self.dic = pyphen.Pyphen(lang="en_US")
48
81
 
49
- # Initialize NLTK's CMU dictionary
82
+ # Try to use preloaded cmudict first (avoids 50-100+ second download on Cloud Run)
83
+ if _HAS_PRELOADER:
84
+ preloaded_cmudict = get_preloaded_cmudict()
85
+ if preloaded_cmudict is not None:
86
+ self.logger.debug("Using preloaded NLTK cmudict")
87
+ self.cmudict = preloaded_cmudict
88
+ return
89
+
90
+ # Fall back to loading directly
91
+ self.logger.info("Loading NLTK cmudict (not preloaded)...")
50
92
  try:
51
93
  self.cmudict = cmudict.dict()
52
94
  except LookupError:
@@ -5,6 +5,14 @@ import logging
5
5
  from lyrics_transcriber.correction.text_utils import clean_text
6
6
  from lyrics_transcriber.types import PhraseType, PhraseScore
7
7
 
8
+ # Try to import preloader (may not exist in standalone library usage)
9
+ try:
10
+ from backend.services.spacy_preloader import get_preloaded_model
11
+
12
+ _HAS_PRELOADER = True
13
+ except ImportError:
14
+ _HAS_PRELOADER = False
15
+
8
16
 
9
17
  class PhraseAnalyzer:
10
18
  """Language-agnostic phrase analyzer using spaCy"""
@@ -17,6 +25,16 @@ class PhraseAnalyzer:
17
25
  language_code: spaCy language model to use
18
26
  """
19
27
  self.logger = logger
28
+
29
+ # Try to use preloaded model first (avoids 60+ second load on Cloud Run)
30
+ if _HAS_PRELOADER:
31
+ preloaded = get_preloaded_model(language_code)
32
+ if preloaded is not None:
33
+ self.logger.info(f"Using preloaded SpaCy model: {language_code}")
34
+ self.nlp = preloaded
35
+ return
36
+
37
+ # Fall back to loading model directly
20
38
  self.logger.info(f"Initializing PhraseAnalyzer with language model: {language_code}")
21
39
  try:
22
40
  self.nlp = spacy.load(language_code)