karaoke-gen 0.76.20__py3-none-any.whl → 0.82.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. karaoke_gen/instrumental_review/static/index.html +179 -16
  2. karaoke_gen/karaoke_gen.py +5 -4
  3. karaoke_gen/lyrics_processor.py +25 -6
  4. {karaoke_gen-0.76.20.dist-info → karaoke_gen-0.82.0.dist-info}/METADATA +79 -3
  5. {karaoke_gen-0.76.20.dist-info → karaoke_gen-0.82.0.dist-info}/RECORD +33 -31
  6. lyrics_transcriber/core/config.py +8 -0
  7. lyrics_transcriber/core/controller.py +43 -1
  8. lyrics_transcriber/correction/agentic/observability/langfuse_integration.py +178 -5
  9. lyrics_transcriber/correction/agentic/prompts/__init__.py +23 -0
  10. lyrics_transcriber/correction/agentic/prompts/classifier.py +66 -6
  11. lyrics_transcriber/correction/agentic/prompts/langfuse_prompts.py +298 -0
  12. lyrics_transcriber/correction/agentic/providers/config.py +7 -0
  13. lyrics_transcriber/correction/agentic/providers/constants.py +1 -1
  14. lyrics_transcriber/correction/agentic/providers/langchain_bridge.py +22 -7
  15. lyrics_transcriber/correction/agentic/providers/model_factory.py +28 -13
  16. lyrics_transcriber/correction/agentic/router.py +18 -13
  17. lyrics_transcriber/correction/corrector.py +1 -45
  18. lyrics_transcriber/frontend/.gitignore +1 -0
  19. lyrics_transcriber/frontend/e2e/agentic-corrections.spec.ts +207 -0
  20. lyrics_transcriber/frontend/e2e/fixtures/agentic-correction-data.json +226 -0
  21. lyrics_transcriber/frontend/package.json +4 -1
  22. lyrics_transcriber/frontend/playwright.config.ts +1 -1
  23. lyrics_transcriber/frontend/src/components/CorrectedWordWithActions.tsx +34 -30
  24. lyrics_transcriber/frontend/src/components/Header.tsx +141 -34
  25. lyrics_transcriber/frontend/src/components/LyricsAnalyzer.tsx +120 -3
  26. lyrics_transcriber/frontend/src/components/TranscriptionView.tsx +11 -1
  27. lyrics_transcriber/frontend/src/components/shared/components/HighlightedText.tsx +122 -35
  28. lyrics_transcriber/frontend/src/components/shared/types.ts +6 -0
  29. lyrics_transcriber/output/generator.py +50 -3
  30. lyrics_transcriber/transcribers/local_whisper.py +260 -0
  31. lyrics_transcriber/correction/handlers/llm.py +0 -293
  32. lyrics_transcriber/correction/handlers/llm_providers.py +0 -60
  33. {karaoke_gen-0.76.20.dist-info → karaoke_gen-0.82.0.dist-info}/WHEEL +0 -0
  34. {karaoke_gen-0.76.20.dist-info → karaoke_gen-0.82.0.dist-info}/entry_points.txt +0 -0
  35. {karaoke_gen-0.76.20.dist-info → karaoke_gen-0.82.0.dist-info}/licenses/LICENSE +0 -0
@@ -52,7 +52,7 @@ class OutputGenerator:
52
52
 
53
53
  self.logger.info(f"Initializing OutputGenerator with config: {self.config}")
54
54
 
55
- # Load output styles from JSON if provided
55
+ # Load output styles from JSON if provided, otherwise use defaults
56
56
  if self.config.output_styles_json and os.path.exists(self.config.output_styles_json):
57
57
  try:
58
58
  with open(self.config.output_styles_json, "r") as f:
@@ -67,9 +67,10 @@ class OutputGenerator:
67
67
  self.logger.warning(f"Failed to load output styles file: {str(e)}")
68
68
  self.config.styles = {}
69
69
  else:
70
- # No styles file provided or doesn't exist
70
+ # No styles file provided or doesn't exist - use defaults
71
71
  if self.config.render_video or self.config.generate_cdg:
72
- raise ValueError(f"Output styles file required for video/CDG generation but not found: {self.config.output_styles_json}")
72
+ self.logger.info("No output styles file provided, using default karaoke styles")
73
+ self.config.styles = self._get_default_styles()
73
74
  else:
74
75
  self.config.styles = {}
75
76
 
@@ -242,6 +243,52 @@ class OutputGenerator:
242
243
 
243
244
  return resolution_dims, font_size, line_height
244
245
 
246
+ def _get_default_styles(self) -> dict:
247
+ """Get default styles for video/CDG generation when no styles file is provided."""
248
+ return {
249
+ "karaoke": {
250
+ # Video background
251
+ "background_color": "#000000",
252
+ "background_image": None,
253
+ # Font settings
254
+ "font": "Arial",
255
+ "font_path": "", # Must be string, not None (for ASS generator)
256
+ "ass_name": "Default",
257
+ # Colors in "R, G, B, A" format (required by ASS)
258
+ "primary_color": "112, 112, 247, 255",
259
+ "secondary_color": "255, 255, 255, 255",
260
+ "outline_color": "26, 58, 235, 255",
261
+ "back_color": "0, 0, 0, 0",
262
+ # Boolean style options
263
+ "bold": False,
264
+ "italic": False,
265
+ "underline": False,
266
+ "strike_out": False,
267
+ # Numeric style options (all required for ASS)
268
+ "scale_x": 100,
269
+ "scale_y": 100,
270
+ "spacing": 0,
271
+ "angle": 0.0,
272
+ "border_style": 1,
273
+ "outline": 1,
274
+ "shadow": 0,
275
+ "margin_l": 0,
276
+ "margin_r": 0,
277
+ "margin_v": 0,
278
+ "encoding": 0,
279
+ # Layout settings
280
+ "max_line_length": 40,
281
+ "top_padding": 200,
282
+ "font_size": 100,
283
+ },
284
+ "cdg": {
285
+ "font_path": None,
286
+ "instrumental_background": None,
287
+ "title_screen_background": None,
288
+ "outro_background": None,
289
+ },
290
+ }
291
+
245
292
  def write_corrections_data(self, correction_result: CorrectionResult, output_prefix: str) -> str:
246
293
  """Write corrections data to JSON file."""
247
294
  self.logger.info("Writing corrections data JSON")
@@ -0,0 +1,260 @@
1
+ """Local Whisper transcription service using whisper-timestamped for word-level timestamps."""
2
+
3
+ from dataclasses import dataclass
4
+ import os
5
+ import logging
6
+ from typing import Optional, Dict, Any, Union
7
+ from pathlib import Path
8
+
9
+ from lyrics_transcriber.types import TranscriptionData, LyricsSegment, Word
10
+ from lyrics_transcriber.transcribers.base_transcriber import BaseTranscriber, TranscriptionError
11
+ from lyrics_transcriber.utils.word_utils import WordUtils
12
+
13
+
14
+ @dataclass
15
+ class LocalWhisperConfig:
16
+ """Configuration for local Whisper transcription service."""
17
+
18
+ model_size: str = "medium" # tiny, base, small, medium, large, large-v2, large-v3
19
+ device: Optional[str] = None # None for auto-detect, or "cpu", "cuda", "mps"
20
+ cache_dir: Optional[str] = None # Directory for model downloads (~/.cache/whisper by default)
21
+ language: Optional[str] = None # Language code for transcription, None for auto-detect
22
+ compute_type: str = "auto" # float16, float32, int8, auto
23
+
24
+
25
+ class LocalWhisperTranscriber(BaseTranscriber):
26
+ """
27
+ Transcription service using local Whisper inference via whisper-timestamped.
28
+
29
+ This transcriber runs Whisper models locally on your machine, supporting
30
+ CPU, CUDA GPU, and Apple Silicon MPS acceleration. It uses the
31
+ whisper-timestamped library to get accurate word-level timestamps.
32
+
33
+ Requirements:
34
+ pip install karaoke-gen[local-whisper]
35
+
36
+ Configuration:
37
+ Set environment variables to customize behavior:
38
+ - WHISPER_MODEL_SIZE: Model size (tiny, base, small, medium, large)
39
+ - WHISPER_DEVICE: Device to use (cpu, cuda, mps, or auto)
40
+ - WHISPER_CACHE_DIR: Directory for model downloads
41
+ - WHISPER_LANGUAGE: Language code (en, es, fr, etc.) or auto-detect
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ cache_dir: Union[str, Path],
47
+ config: Optional[LocalWhisperConfig] = None,
48
+ logger: Optional[logging.Logger] = None,
49
+ ):
50
+ """
51
+ Initialize local Whisper transcriber.
52
+
53
+ Args:
54
+ cache_dir: Directory for caching transcription results
55
+ config: Configuration options for the transcriber
56
+ logger: Logger instance to use
57
+ """
58
+ super().__init__(cache_dir=cache_dir, logger=logger)
59
+
60
+ # Initialize configuration from env vars or defaults
61
+ self.config = config or LocalWhisperConfig(
62
+ model_size=os.getenv("WHISPER_MODEL_SIZE", "medium"),
63
+ device=os.getenv("WHISPER_DEVICE"), # None for auto-detect
64
+ cache_dir=os.getenv("WHISPER_CACHE_DIR"),
65
+ language=os.getenv("WHISPER_LANGUAGE"), # None for auto-detect
66
+ )
67
+
68
+ # Lazy-loaded model instance (loaded on first use)
69
+ self._model = None
70
+ self._whisper_module = None
71
+
72
+ self.logger.debug(
73
+ f"LocalWhisperTranscriber initialized with model_size={self.config.model_size}, "
74
+ f"device={self.config.device or 'auto'}, language={self.config.language or 'auto-detect'}"
75
+ )
76
+
77
+ def get_name(self) -> str:
78
+ """Return the name of this transcription service."""
79
+ return "LocalWhisper"
80
+
81
+ def _check_dependencies(self) -> None:
82
+ """Check that required dependencies are installed."""
83
+ try:
84
+ import whisper_timestamped # noqa: F401
85
+ except ImportError:
86
+ raise TranscriptionError(
87
+ "whisper-timestamped is not installed. "
88
+ "Install it with: pip install karaoke-gen[local-whisper] "
89
+ "or: pip install whisper-timestamped"
90
+ )
91
+
92
+ def _get_device(self) -> str:
93
+ """Determine the best device to use for inference."""
94
+ if self.config.device:
95
+ return self.config.device
96
+
97
+ # Auto-detect best available device
98
+ try:
99
+ import torch
100
+
101
+ if torch.cuda.is_available():
102
+ self.logger.info("Using CUDA GPU for Whisper inference")
103
+ return "cuda"
104
+ elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
105
+ self.logger.info("Using Apple Silicon MPS for Whisper inference")
106
+ return "cpu" # whisper-timestamped works better with CPU on MPS
107
+ else:
108
+ self.logger.info("Using CPU for Whisper inference (no GPU detected)")
109
+ return "cpu"
110
+ except ImportError:
111
+ self.logger.warning("PyTorch not available, defaulting to CPU")
112
+ return "cpu"
113
+
114
+ def _load_model(self):
115
+ """Load the Whisper model (lazy loading on first use)."""
116
+ if self._model is not None:
117
+ return self._model
118
+
119
+ self._check_dependencies()
120
+ import whisper_timestamped as whisper
121
+
122
+ self._whisper_module = whisper
123
+
124
+ device = self._get_device()
125
+ self.logger.info(f"Loading Whisper model '{self.config.model_size}' on device '{device}'...")
126
+
127
+ try:
128
+ # Load model with optional custom cache directory
129
+ download_root = self.config.cache_dir
130
+ self._model = whisper.load_model(
131
+ self.config.model_size,
132
+ device=device,
133
+ download_root=download_root,
134
+ )
135
+ self.logger.info(f"Whisper model '{self.config.model_size}' loaded successfully")
136
+ return self._model
137
+ except RuntimeError as e:
138
+ if "out of memory" in str(e).lower() or "CUDA" in str(e):
139
+ raise TranscriptionError(
140
+ f"GPU out of memory loading model '{self.config.model_size}'. "
141
+ "Try using a smaller model (set WHISPER_MODEL_SIZE=small or tiny) "
142
+ "or force CPU mode (set WHISPER_DEVICE=cpu)"
143
+ ) from e
144
+ raise TranscriptionError(f"Failed to load Whisper model: {e}") from e
145
+ except Exception as e:
146
+ raise TranscriptionError(f"Failed to load Whisper model: {e}") from e
147
+
148
+ def _perform_transcription(self, audio_filepath: str) -> Dict[str, Any]:
149
+ """
150
+ Perform local Whisper transcription with word-level timestamps.
151
+
152
+ Args:
153
+ audio_filepath: Path to the audio file to transcribe
154
+
155
+ Returns:
156
+ Raw transcription result dictionary
157
+ """
158
+ self.logger.info(f"Starting local Whisper transcription for {audio_filepath}")
159
+
160
+ # Load model if not already loaded
161
+ model = self._load_model()
162
+
163
+ try:
164
+ # Perform transcription with word-level timestamps
165
+ transcribe_kwargs = {
166
+ "verbose": False,
167
+ }
168
+
169
+ # Add language if specified
170
+ if self.config.language:
171
+ transcribe_kwargs["language"] = self.config.language
172
+
173
+ self.logger.debug(f"Transcribing with options: {transcribe_kwargs}")
174
+ result = self._whisper_module.transcribe_timestamped(
175
+ model,
176
+ audio_filepath,
177
+ **transcribe_kwargs,
178
+ )
179
+
180
+ self.logger.info("Local Whisper transcription completed successfully")
181
+ return result
182
+
183
+ except RuntimeError as e:
184
+ if "out of memory" in str(e).lower():
185
+ raise TranscriptionError(
186
+ f"GPU out of memory during transcription. "
187
+ "Try using a smaller model (WHISPER_MODEL_SIZE=small) "
188
+ "or force CPU mode (WHISPER_DEVICE=cpu)"
189
+ ) from e
190
+ raise TranscriptionError(f"Transcription failed: {e}") from e
191
+ except Exception as e:
192
+ raise TranscriptionError(f"Transcription failed: {e}") from e
193
+
194
+ def _convert_result_format(self, raw_data: Dict[str, Any]) -> TranscriptionData:
195
+ """
196
+ Convert whisper-timestamped output to standard TranscriptionData format.
197
+
198
+ The whisper-timestamped library returns results in this format:
199
+ {
200
+ "text": "Full transcription text",
201
+ "segments": [
202
+ {
203
+ "id": 0,
204
+ "text": "Segment text",
205
+ "start": 0.0,
206
+ "end": 2.5,
207
+ "words": [
208
+ {"text": "word", "start": 0.0, "end": 0.5, "confidence": 0.95},
209
+ ...
210
+ ]
211
+ },
212
+ ...
213
+ ],
214
+ "language": "en"
215
+ }
216
+
217
+ Args:
218
+ raw_data: Raw output from whisper_timestamped.transcribe_timestamped()
219
+
220
+ Returns:
221
+ TranscriptionData with segments, words, and metadata
222
+ """
223
+ segments = []
224
+ all_words = []
225
+
226
+ for seg in raw_data.get("segments", []):
227
+ segment_words = []
228
+
229
+ for word_data in seg.get("words", []):
230
+ word = Word(
231
+ id=WordUtils.generate_id(),
232
+ text=word_data.get("text", "").strip(),
233
+ start_time=word_data.get("start", 0.0),
234
+ end_time=word_data.get("end", 0.0),
235
+ confidence=word_data.get("confidence"),
236
+ )
237
+ segment_words.append(word)
238
+ all_words.append(word)
239
+
240
+ # Create segment with its words
241
+ segment = LyricsSegment(
242
+ id=WordUtils.generate_id(),
243
+ text=seg.get("text", "").strip(),
244
+ words=segment_words,
245
+ start_time=seg.get("start", 0.0),
246
+ end_time=seg.get("end", 0.0),
247
+ )
248
+ segments.append(segment)
249
+
250
+ return TranscriptionData(
251
+ segments=segments,
252
+ words=all_words,
253
+ text=raw_data.get("text", "").strip(),
254
+ source=self.get_name(),
255
+ metadata={
256
+ "model_size": self.config.model_size,
257
+ "detected_language": raw_data.get("language", "unknown"),
258
+ "device": self._get_device(),
259
+ },
260
+ )
@@ -1,293 +0,0 @@
1
- from typing import List, Optional, Tuple, Dict, Any, Union
2
- import logging
3
- import json
4
- from datetime import datetime
5
- from pathlib import Path
6
-
7
- from lyrics_transcriber.types import GapSequence, WordCorrection
8
- from lyrics_transcriber.correction.handlers.base import GapCorrectionHandler
9
- from lyrics_transcriber.correction.handlers.word_operations import WordOperations
10
- from lyrics_transcriber.correction.handlers.llm_providers import LLMProvider
11
-
12
-
13
- class LLMHandler(GapCorrectionHandler):
14
- """Uses an LLM to analyze and correct gaps by comparing with reference lyrics."""
15
-
16
- def __init__(
17
- self, provider: LLMProvider, name: str, logger: Optional[logging.Logger] = None, cache_dir: Optional[Union[str, Path]] = None
18
- ):
19
- super().__init__(logger)
20
- self.logger = logger or logging.getLogger(__name__)
21
- self.provider = provider
22
- self.name = name
23
- self.cache_dir = Path(cache_dir) if cache_dir else None
24
-
25
- def _format_prompt(self, gap: GapSequence, data: Optional[Dict[str, Any]] = None) -> str:
26
- """Format the prompt for the LLM with context about the gap and reference lyrics."""
27
- word_map = data.get("word_map", {})
28
- metadata = data.get("metadata", {}) if data else {}
29
-
30
- if not word_map:
31
- self.logger.error("No word_map provided in data")
32
- return ""
33
-
34
- # Format transcribed words with their IDs
35
- transcribed_words = [{"id": word_id, "text": word_map[word_id].text} for word_id in gap.transcribed_word_ids if word_id in word_map]
36
-
37
- prompt = (
38
- "You are a lyrics correction expert. You will be given transcribed lyrics that may contain errors "
39
- "and reference lyrics from multiple sources. Your task is to analyze each word in the transcribed text "
40
- "and suggest specific corrections based on the reference lyrics.\n\n"
41
- "Each word has a unique ID. When suggesting corrections, you must specify the ID of the word being corrected. "
42
- "This ensures accuracy in applying your corrections.\n\n"
43
- "For each correction, specify:\n"
44
- "1. The word ID being corrected\n"
45
- "2. The correction type ('replace', 'split', 'combine', or 'delete')\n"
46
- "3. The corrected text\n"
47
- "4. Your confidence level\n"
48
- "5. The reason for the correction\n\n"
49
- )
50
-
51
- # Add song context if available
52
- if metadata and metadata.get("artist") and metadata.get("title"):
53
- prompt += f"Song: {metadata['title']}\nArtist: {metadata['artist']}\n\n"
54
-
55
- # Format transcribed words with IDs
56
- prompt += "Transcribed words:\n"
57
- for word in transcribed_words:
58
- prompt += f"- ID: {word['id']}, Text: '{word['text']}'\n"
59
-
60
- prompt += "\nReference lyrics from different sources:\n"
61
-
62
- # Add each reference source with words and their IDs
63
- for source, word_ids in gap.reference_word_ids.items():
64
- reference_words = [{"id": word_id, "text": word_map[word_id].text} for word_id in word_ids if word_id in word_map]
65
- prompt += f"\n{source} immediate context:\n"
66
- for word in reference_words:
67
- prompt += f"- ID: {word['id']}, Text: '{word['text']}'\n"
68
-
69
- # Add full lyrics if available
70
- if metadata and metadata.get("full_reference_texts", {}).get(source):
71
- prompt += f"\nFull {source} lyrics:\n{metadata['full_reference_texts'][source]}\n"
72
-
73
- # Add context about surrounding anchors if available
74
- if gap.preceding_anchor_id:
75
- preceding_anchor = next((a.anchor for a in data.get("anchor_sequences", []) if a.anchor.id == gap.preceding_anchor_id), None)
76
- if preceding_anchor:
77
- anchor_words = [
78
- {"id": word_id, "text": word_map[word_id].text}
79
- for word_id in preceding_anchor.transcribed_word_ids
80
- if word_id in word_map
81
- ]
82
- prompt += "\nPreceding correct words:\n"
83
- for word in anchor_words:
84
- prompt += f"- ID: {word['id']}, Text: '{word['text']}'\n"
85
-
86
- prompt += (
87
- "\nProvide corrections in the following JSON format:\n"
88
- "{\n"
89
- ' "corrections": [\n'
90
- " {\n"
91
- ' "word_id": "id_of_word_to_correct",\n'
92
- ' "type": "replace|split|combine|delete",\n'
93
- ' "corrected_text": "new text",\n'
94
- ' "reference_word_id": "id_from_reference_lyrics", // Optional, use when matching a specific reference word\n'
95
- ' "confidence": 0.9,\n'
96
- ' "reason": "explanation of correction"\n'
97
- " }\n"
98
- " ]\n"
99
- "}\n\n"
100
- "Important rules:\n"
101
- "1. Always include the word_id for each correction\n"
102
- "2. For 'split' type, corrected_text should contain the space-separated words\n"
103
- "3. For 'combine' type, word_id should be the first word to combine\n"
104
- "4. Include reference_word_id when the correction matches a specific reference word\n"
105
- "5. Only suggest corrections when you're confident they improve the lyrics\n"
106
- "6. Preserve any existing words that match the reference lyrics\n"
107
- "7. Respond ONLY with the JSON object, no other text"
108
- )
109
-
110
- return prompt
111
-
112
- def can_handle(self, gap: GapSequence, data: Optional[Dict[str, Any]] = None) -> Tuple[bool, Dict[str, Any]]:
113
- """LLM handler can attempt to handle any gap with reference words."""
114
- if not gap.reference_word_ids:
115
- self.logger.debug("No reference words available")
116
- return False, {}
117
-
118
- return True, {}
119
-
120
- def _write_debug_info(self, prompt: str, response: str, gap_index: int, audio_file_hash: Optional[str] = None) -> None:
121
- """Write prompt and response to debug files."""
122
- if not self.cache_dir:
123
- self.logger.warning("No cache directory provided, skipping LLM debug output")
124
- return
125
-
126
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
127
- debug_dir = self.cache_dir / "llm_debug"
128
- debug_dir.mkdir(exist_ok=True, parents=True)
129
-
130
- hash_prefix = f"{audio_file_hash}_" if audio_file_hash else ""
131
- filename = debug_dir / f"llm_debug_{hash_prefix}{gap_index}_{timestamp}.txt"
132
-
133
- debug_content = "=== LLM PROMPT ===\n" f"{prompt}\n\n" "=== LLM RESPONSE ===\n" f"{response}\n"
134
-
135
- try:
136
- with open(filename, "w", encoding="utf-8") as f:
137
- f.write(debug_content)
138
- except IOError as e:
139
- self.logger.error(f"Failed to write LLM debug file: {e}")
140
-
141
- def handle(self, gap: GapSequence, data: Optional[Dict[str, Any]] = None) -> List[WordCorrection]:
142
- """Process the gap using the LLM and create corrections based on its response."""
143
- if not data or "word_map" not in data:
144
- self.logger.error("No word_map provided in data")
145
- return []
146
-
147
- word_map = data["word_map"]
148
- transcribed_words = [word_map[word_id].text for word_id in gap.transcribed_word_ids if word_id in word_map]
149
-
150
- # Calculate reference positions using the centralized method
151
- reference_positions = (
152
- WordOperations.calculate_reference_positions(gap, anchor_sequences=data.get("anchor_sequences", [])) or {}
153
- ) # Ensure empty dict if None
154
-
155
- prompt = self._format_prompt(gap, data)
156
- if not prompt:
157
- return []
158
-
159
- # Get a unique index for this gap based on its position
160
- gap_index = gap.transcription_position
161
-
162
- try:
163
- self.logger.debug(f"Processing gap words: {transcribed_words}")
164
- self.logger.debug(f"Reference word IDs: {gap.reference_word_ids}")
165
-
166
- response = self.provider.generate_response(prompt)
167
-
168
- # Write debug info to files
169
- self._write_debug_info(prompt, response, gap_index, audio_file_hash=data.get("audio_file_hash"))
170
-
171
- try:
172
- corrections_data = json.loads(response)
173
- except json.JSONDecodeError as e:
174
- self.logger.error(f"Failed to parse LLM response as JSON: {e}")
175
- self.logger.error(f"Raw response content: {response}")
176
- return []
177
-
178
- # Check if corrections exist and are non-empty
179
- if not corrections_data.get("corrections"):
180
- self.logger.debug("No corrections suggested by LLM")
181
- return []
182
-
183
- corrections = []
184
- for correction in corrections_data["corrections"]:
185
- # Validate word_id exists in gap
186
- if correction["word_id"] not in gap.transcribed_word_ids:
187
- self.logger.error(f"LLM suggested correction for word_id {correction['word_id']} which is not in the gap")
188
- continue
189
-
190
- # Get original word from word map
191
- original_word = word_map[correction["word_id"]]
192
- position = gap.transcription_position + gap.transcribed_word_ids.index(correction["word_id"])
193
-
194
- self.logger.debug(f"Processing correction: {correction}")
195
-
196
- if correction["type"] == "replace":
197
- self.logger.debug(
198
- f"Creating replacement: '{original_word.text}' -> '{correction['corrected_text']}' " f"at position {position}"
199
- )
200
- corrections.append(
201
- WordOperations.create_word_replacement_correction(
202
- original_word=original_word.text,
203
- corrected_word=correction["corrected_text"],
204
- original_position=position,
205
- source="LLM",
206
- confidence=correction["confidence"],
207
- reason=correction["reason"],
208
- handler=self.name,
209
- reference_positions=reference_positions,
210
- original_word_id=correction["word_id"],
211
- corrected_word_id=correction.get("reference_word_id"),
212
- )
213
- )
214
- elif correction["type"] == "split":
215
- split_words = correction["corrected_text"].split()
216
- self.logger.debug(f"Creating split: '{original_word.text}' -> {split_words} " f"at position {position}")
217
-
218
- # Get reference word IDs if provided
219
- reference_word_ids = correction.get("reference_word_ids", [None] * len(split_words))
220
-
221
- corrections.extend(
222
- WordOperations.create_word_split_corrections(
223
- original_word=original_word.text,
224
- reference_words=split_words,
225
- original_position=position,
226
- source="LLM",
227
- confidence=correction["confidence"],
228
- reason=correction["reason"],
229
- handler=self.name,
230
- reference_positions=reference_positions,
231
- original_word_id=correction["word_id"],
232
- corrected_word_ids=reference_word_ids,
233
- )
234
- )
235
- elif correction["type"] == "combine":
236
- # Get all word IDs to combine
237
- word_ids_to_combine = []
238
- current_idx = gap.transcribed_word_ids.index(correction["word_id"])
239
- words_needed = len(correction["corrected_text"].split())
240
-
241
- if current_idx + words_needed <= len(gap.transcribed_word_ids):
242
- word_ids_to_combine = gap.transcribed_word_ids[current_idx : current_idx + words_needed]
243
- else:
244
- self.logger.error(f"Not enough words available to combine at position {position}")
245
- continue
246
-
247
- words_to_combine = [word_map[word_id].text for word_id in word_ids_to_combine]
248
-
249
- self.logger.debug(
250
- f"Creating combine: {words_to_combine} -> '{correction['corrected_text']}' " f"at position {position}"
251
- )
252
-
253
- corrections.extend(
254
- WordOperations.create_word_combine_corrections(
255
- original_words=words_to_combine,
256
- reference_word=correction["corrected_text"],
257
- original_position=position,
258
- source="LLM",
259
- confidence=correction["confidence"],
260
- combine_reason=correction["reason"],
261
- delete_reason=f"Part of combining words: {correction['reason']}",
262
- handler=self.name,
263
- reference_positions=reference_positions,
264
- original_word_ids=word_ids_to_combine,
265
- corrected_word_id=correction.get("reference_word_id"),
266
- )
267
- )
268
- elif correction["type"] == "delete":
269
- self.logger.debug(f"Creating deletion: '{original_word.text}' at position {position}")
270
- corrections.append(
271
- WordCorrection(
272
- original_word=original_word.text,
273
- corrected_word="",
274
- segment_index=0,
275
- original_position=position,
276
- confidence=correction["confidence"],
277
- source="LLM",
278
- reason=correction["reason"],
279
- alternatives={},
280
- is_deletion=True,
281
- handler=self.name,
282
- reference_positions=reference_positions,
283
- word_id=correction["word_id"],
284
- corrected_word_id=None,
285
- )
286
- )
287
-
288
- self.logger.debug(f"Created {len(corrections)} corrections: {[f'{c.original_word}->{c.corrected_word}' for c in corrections]}")
289
- return corrections
290
-
291
- except Exception as e:
292
- self.logger.error(f"Unexpected error in LLM handler: {e}")
293
- return []