supervertaler 1.9.153__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervertaler might be problematic. Click here for more details.

Files changed (85) hide show
  1. Supervertaler.py +47886 -0
  2. modules/__init__.py +10 -0
  3. modules/ai_actions.py +964 -0
  4. modules/ai_attachment_manager.py +343 -0
  5. modules/ai_file_viewer_dialog.py +210 -0
  6. modules/autofingers_engine.py +466 -0
  7. modules/cafetran_docx_handler.py +379 -0
  8. modules/config_manager.py +469 -0
  9. modules/database_manager.py +1878 -0
  10. modules/database_migrations.py +417 -0
  11. modules/dejavurtf_handler.py +779 -0
  12. modules/document_analyzer.py +427 -0
  13. modules/docx_handler.py +689 -0
  14. modules/encoding_repair.py +319 -0
  15. modules/encoding_repair_Qt.py +393 -0
  16. modules/encoding_repair_ui.py +481 -0
  17. modules/feature_manager.py +350 -0
  18. modules/figure_context_manager.py +340 -0
  19. modules/file_dialog_helper.py +148 -0
  20. modules/find_replace.py +164 -0
  21. modules/find_replace_qt.py +457 -0
  22. modules/glossary_manager.py +433 -0
  23. modules/image_extractor.py +188 -0
  24. modules/keyboard_shortcuts_widget.py +571 -0
  25. modules/llm_clients.py +1211 -0
  26. modules/llm_leaderboard.py +737 -0
  27. modules/llm_superbench_ui.py +1401 -0
  28. modules/local_llm_setup.py +1104 -0
  29. modules/model_update_dialog.py +381 -0
  30. modules/model_version_checker.py +373 -0
  31. modules/mqxliff_handler.py +638 -0
  32. modules/non_translatables_manager.py +743 -0
  33. modules/pdf_rescue_Qt.py +1822 -0
  34. modules/pdf_rescue_tkinter.py +909 -0
  35. modules/phrase_docx_handler.py +516 -0
  36. modules/project_home_panel.py +209 -0
  37. modules/prompt_assistant.py +357 -0
  38. modules/prompt_library.py +689 -0
  39. modules/prompt_library_migration.py +447 -0
  40. modules/quick_access_sidebar.py +282 -0
  41. modules/ribbon_widget.py +597 -0
  42. modules/sdlppx_handler.py +874 -0
  43. modules/setup_wizard.py +353 -0
  44. modules/shortcut_manager.py +932 -0
  45. modules/simple_segmenter.py +128 -0
  46. modules/spellcheck_manager.py +727 -0
  47. modules/statuses.py +207 -0
  48. modules/style_guide_manager.py +315 -0
  49. modules/superbench_ui.py +1319 -0
  50. modules/superbrowser.py +329 -0
  51. modules/supercleaner.py +600 -0
  52. modules/supercleaner_ui.py +444 -0
  53. modules/superdocs.py +19 -0
  54. modules/superdocs_viewer_qt.py +382 -0
  55. modules/superlookup.py +252 -0
  56. modules/tag_cleaner.py +260 -0
  57. modules/tag_manager.py +333 -0
  58. modules/term_extractor.py +270 -0
  59. modules/termbase_entry_editor.py +842 -0
  60. modules/termbase_import_export.py +488 -0
  61. modules/termbase_manager.py +1060 -0
  62. modules/termview_widget.py +1172 -0
  63. modules/theme_manager.py +499 -0
  64. modules/tm_editor_dialog.py +99 -0
  65. modules/tm_manager_qt.py +1280 -0
  66. modules/tm_metadata_manager.py +545 -0
  67. modules/tmx_editor.py +1461 -0
  68. modules/tmx_editor_qt.py +2784 -0
  69. modules/tmx_generator.py +284 -0
  70. modules/tracked_changes.py +900 -0
  71. modules/trados_docx_handler.py +430 -0
  72. modules/translation_memory.py +715 -0
  73. modules/translation_results_panel.py +2134 -0
  74. modules/translation_services.py +282 -0
  75. modules/unified_prompt_library.py +659 -0
  76. modules/unified_prompt_manager_qt.py +3951 -0
  77. modules/voice_commands.py +920 -0
  78. modules/voice_dictation.py +477 -0
  79. modules/voice_dictation_lite.py +249 -0
  80. supervertaler-1.9.153.dist-info/METADATA +896 -0
  81. supervertaler-1.9.153.dist-info/RECORD +85 -0
  82. supervertaler-1.9.153.dist-info/WHEEL +5 -0
  83. supervertaler-1.9.153.dist-info/entry_points.txt +2 -0
  84. supervertaler-1.9.153.dist-info/licenses/LICENSE +21 -0
  85. supervertaler-1.9.153.dist-info/top_level.txt +2 -0
@@ -0,0 +1,488 @@
1
+ """
2
+ Termbase Import/Export Module
3
+
4
+ Handles importing and exporting termbases in TSV (Tab-Separated Values) format.
5
+ TSV is simple, universal, and works well with Excel, Google Sheets, and text editors.
6
+
7
+ Format:
8
+ - First row: header with column names
9
+ - Tab-delimited fields
10
+ - UTF-8 encoding with BOM for Excel compatibility
11
+ - Multi-line content wrapped in quotes
12
+ - Boolean values: TRUE/FALSE or 1/0
13
+
14
+ Standard columns:
15
+ - Source Term (required)
16
+ - Target Term (required)
17
+ - Priority (optional, 1-99, default: 50)
18
+ - Domain (optional)
19
+ - Notes (optional, can be multi-line)
20
+ - Project (optional)
21
+ - Client (optional)
22
+ - Forbidden (optional, TRUE/FALSE)
23
+ """
24
+
25
+ import csv
26
+ import os
27
+ from typing import Dict, List, Optional, Tuple
28
+ from dataclasses import dataclass
29
+
30
+
31
+ @dataclass
32
+ class ImportResult:
33
+ """Result of a termbase import operation"""
34
+ success: bool
35
+ imported_count: int
36
+ skipped_count: int
37
+ error_count: int
38
+ errors: List[Tuple[int, str]] # (line_number, error_message)
39
+ message: str
40
+
41
+
42
+ class TermbaseImporter:
43
+ """Import termbases from TSV files"""
44
+
45
+ # Standard column headers (case-insensitive matching)
46
+ STANDARD_HEADERS = {
47
+ 'source': ['source term', 'source', 'src', 'term (source)', 'source language'],
48
+ 'target': ['target term', 'target', 'tgt', 'term (target)', 'target language'],
49
+ 'priority': ['priority', 'prio', 'rank'],
50
+ 'domain': ['domain', 'subject', 'field', 'category'],
51
+ 'notes': ['notes', 'note', 'definition', 'comment', 'comments', 'description'],
52
+ 'project': ['project', 'proj'],
53
+ 'client': ['client', 'customer'],
54
+ 'forbidden': ['forbidden', 'do not use', 'prohibited', 'banned'],
55
+ 'term_uuid': ['term uuid', 'uuid', 'term id', 'id', 'term_uuid', 'termid']
56
+ }
57
+
58
+ # Common language names that can be used as column headers
59
+ LANGUAGE_NAMES = [
60
+ 'dutch', 'english', 'german', 'french', 'spanish', 'italian', 'portuguese',
61
+ 'russian', 'chinese', 'japanese', 'korean', 'arabic', 'hebrew', 'turkish',
62
+ 'polish', 'czech', 'slovak', 'hungarian', 'romanian', 'bulgarian', 'greek',
63
+ 'swedish', 'norwegian', 'danish', 'finnish', 'estonian', 'latvian', 'lithuanian',
64
+ 'ukrainian', 'croatian', 'serbian', 'slovenian', 'bosnian', 'macedonian',
65
+ 'catalan', 'basque', 'galician', 'welsh', 'irish', 'scottish',
66
+ 'indonesian', 'malay', 'thai', 'vietnamese', 'hindi', 'bengali', 'tamil',
67
+ 'afrikaans', 'swahili', 'persian', 'farsi', 'urdu', 'punjabi',
68
+ 'nederlands', 'deutsch', 'français', 'español', 'italiano', 'português',
69
+ 'русский', '中文', '日本語', '한국어', 'العربية', 'עברית', 'türkçe'
70
+ ]
71
+
72
+ def __init__(self, db_manager, termbase_manager):
73
+ """
74
+ Initialize importer
75
+
76
+ Args:
77
+ db_manager: DatabaseManager instance
78
+ termbase_manager: TermbaseManager instance
79
+ """
80
+ self.db_manager = db_manager
81
+ self.termbase_manager = termbase_manager
82
+
83
+ def import_tsv(self, filepath: str, termbase_id: int,
84
+ skip_duplicates: bool = True,
85
+ update_duplicates: bool = False,
86
+ progress_callback=None) -> ImportResult:
87
+ """
88
+ Import terms from TSV file
89
+
90
+ Args:
91
+ filepath: Path to TSV file
92
+ termbase_id: Target termbase ID
93
+ skip_duplicates: Skip terms that already exist (based on source term)
94
+ update_duplicates: Update existing terms instead of skipping
95
+ progress_callback: Optional callback(current, total, message) for progress updates
96
+
97
+ Returns:
98
+ ImportResult with statistics and errors
99
+ """
100
+ errors = []
101
+ imported_count = 0
102
+ skipped_count = 0
103
+ error_count = 0
104
+
105
+ def report_progress(current, total, message):
106
+ """Report progress if callback is provided"""
107
+ if progress_callback:
108
+ progress_callback(current, total, message)
109
+
110
+ try:
111
+ # Count total lines first for progress reporting
112
+ total_lines = 0
113
+ with open(filepath, 'r', encoding='utf-8-sig', newline='') as f:
114
+ total_lines = sum(1 for _ in f) - 1 # Subtract header row
115
+
116
+ report_progress(0, total_lines, f"Starting import of {total_lines} entries...")
117
+
118
+ # Read file with UTF-8 encoding (handle BOM if present)
119
+ with open(filepath, 'r', encoding='utf-8-sig', newline='') as f:
120
+ # Use csv.DictReader with tab delimiter
121
+ reader = csv.DictReader(f, delimiter='\t')
122
+
123
+ # Get column mapping
124
+ if not reader.fieldnames:
125
+ return ImportResult(
126
+ success=False,
127
+ imported_count=0,
128
+ skipped_count=0,
129
+ error_count=1,
130
+ errors=[(0, "File is empty or has no header row")],
131
+ message="Import failed: No header row found"
132
+ )
133
+
134
+ column_map = self._map_columns(reader.fieldnames)
135
+
136
+ report_progress(0, total_lines, f"Found columns: {', '.join(column_map.keys())}")
137
+
138
+ if not column_map.get('source') or not column_map.get('target'):
139
+ return ImportResult(
140
+ success=False,
141
+ imported_count=0,
142
+ skipped_count=0,
143
+ error_count=1,
144
+ errors=[(0, f"Could not find required columns. Headers: {reader.fieldnames}")],
145
+ message="Import failed: Missing required columns (Source Term and Target Term)"
146
+ )
147
+
148
+ # Get existing terms for duplicate detection
149
+ existing_terms_by_source = {}
150
+ existing_terms_by_uuid = {}
151
+ if skip_duplicates or update_duplicates:
152
+ report_progress(0, total_lines, "Loading existing terms for duplicate detection...")
153
+ terms = self.termbase_manager.get_terms(termbase_id)
154
+ existing_terms_by_source = {term['source_term'].lower(): term for term in terms}
155
+ existing_terms_by_uuid = {term.get('term_uuid'): term for term in terms if term.get('term_uuid')}
156
+ report_progress(0, total_lines, f"Found {len(existing_terms_by_source)} existing terms")
157
+
158
+ # Process each row
159
+ for line_num, row in enumerate(reader, start=2): # Start at 2 (line 1 is header)
160
+ current_row = line_num - 1 # Adjust for progress (1-indexed from data rows)
161
+ try:
162
+ # Extract data using column mapping
163
+ source_field = self._get_field(row, column_map.get('source', ''))
164
+ target_field = self._get_field(row, column_map.get('target', ''))
165
+ term_uuid = self._get_field(row, column_map.get('term_uuid', ''))
166
+
167
+ # Validate required fields
168
+ if not source_field or not target_field:
169
+ errors.append((line_num, "Missing source or target term"))
170
+ error_count += 1
171
+ report_progress(current_row, total_lines, f"❌ Line {line_num}: Missing source or target")
172
+ continue
173
+
174
+ # Parse source: first item = main term, rest = synonyms
175
+ source_parts = [s.strip() for s in source_field.split('|') if s.strip()]
176
+ source_term = source_parts[0] if source_parts else ''
177
+ source_synonym_parts = source_parts[1:] if len(source_parts) > 1 else []
178
+
179
+ # Parse target: first item = main term, rest = synonyms
180
+ target_parts = [s.strip() for s in target_field.split('|') if s.strip()]
181
+ target_term = target_parts[0] if target_parts else ''
182
+ target_synonym_parts = target_parts[1:] if len(target_parts) > 1 else []
183
+
184
+ # Check for duplicates - UUID takes priority over source term matching
185
+ existing_term = None
186
+
187
+ if term_uuid and term_uuid in existing_terms_by_uuid:
188
+ # UUID match - this is definitely the same term
189
+ existing_term = existing_terms_by_uuid[term_uuid]
190
+ elif source_term.lower() in existing_terms_by_source:
191
+ # Source term match (no UUID or UUID doesn't match)
192
+ existing_term = existing_terms_by_source[source_term.lower()]
193
+
194
+ if existing_term:
195
+ if update_duplicates:
196
+ # Update existing term
197
+ self._update_term_from_row(existing_term['id'], row, column_map)
198
+ imported_count += 1
199
+ report_progress(current_row, total_lines, f"🔄 Updated: {source_term} → {target_term}")
200
+ else:
201
+ skipped_count += 1
202
+ report_progress(current_row, total_lines, f"⏭️ Skipped duplicate: {source_term}")
203
+ continue
204
+
205
+ # Parse optional fields
206
+ priority = self._parse_priority(
207
+ self._get_field(row, column_map.get('priority', ''))
208
+ )
209
+ domain = self._get_field(row, column_map.get('domain', ''))
210
+ notes = self._get_field(row, column_map.get('notes', ''))
211
+ project = self._get_field(row, column_map.get('project', ''))
212
+ client = self._get_field(row, column_map.get('client', ''))
213
+ forbidden = self._parse_boolean(
214
+ self._get_field(row, column_map.get('forbidden', ''))
215
+ )
216
+
217
+ # Add term to termbase (pass UUID if present, otherwise one will be generated)
218
+ term_id = self.termbase_manager.add_term(
219
+ termbase_id=termbase_id,
220
+ source_term=source_term,
221
+ target_term=target_term,
222
+ priority=priority,
223
+ domain=domain,
224
+ notes=notes,
225
+ project=project,
226
+ client=client,
227
+ forbidden=forbidden,
228
+ term_uuid=term_uuid if term_uuid else None
229
+ )
230
+
231
+ if term_id:
232
+ imported_count += 1
233
+ report_progress(current_row, total_lines, f"✅ Imported: {source_term} → {target_term}")
234
+
235
+ # Add source synonyms (already parsed from source_field above)
236
+ for order, syn_part in enumerate(source_synonym_parts):
237
+ # Check for forbidden marker [!text]
238
+ forbidden = False
239
+ synonym_text = syn_part
240
+
241
+ if syn_part.startswith('[!') and syn_part.endswith(']'):
242
+ forbidden = True
243
+ synonym_text = syn_part[2:-1] # Remove [! and ]
244
+
245
+ self.termbase_manager.add_synonym(
246
+ term_id,
247
+ synonym_text,
248
+ language='source',
249
+ display_order=order,
250
+ forbidden=forbidden
251
+ )
252
+
253
+ # Add target synonyms (already parsed from target_field above)
254
+ for order, syn_part in enumerate(target_synonym_parts):
255
+ # Check for forbidden marker [!text]
256
+ forbidden = False
257
+ synonym_text = syn_part
258
+
259
+ if syn_part.startswith('[!') and syn_part.endswith(']'):
260
+ forbidden = True
261
+ synonym_text = syn_part[2:-1] # Remove [! and ]
262
+
263
+ self.termbase_manager.add_synonym(
264
+ term_id,
265
+ synonym_text,
266
+ language='target',
267
+ display_order=order,
268
+ forbidden=forbidden
269
+ )
270
+ else:
271
+ errors.append((line_num, "Failed to add term to database"))
272
+ error_count += 1
273
+ report_progress(current_row, total_lines, f"❌ Line {line_num}: Failed to add term")
274
+
275
+ except Exception as e:
276
+ errors.append((line_num, f"Error processing row: {str(e)}"))
277
+ report_progress(current_row, total_lines, f"❌ Line {line_num}: {str(e)}")
278
+ error_count += 1
279
+ continue
280
+
281
+ # Generate summary message
282
+ message = f"Import complete: {imported_count} terms imported"
283
+ if skipped_count > 0:
284
+ message += f", {skipped_count} duplicates skipped"
285
+ if error_count > 0:
286
+ message += f", {error_count} errors"
287
+
288
+ return ImportResult(
289
+ success=True,
290
+ imported_count=imported_count,
291
+ skipped_count=skipped_count,
292
+ error_count=error_count,
293
+ errors=errors,
294
+ message=message
295
+ )
296
+
297
+ except Exception as e:
298
+ return ImportResult(
299
+ success=False,
300
+ imported_count=imported_count,
301
+ skipped_count=skipped_count,
302
+ error_count=error_count + 1,
303
+ errors=errors + [(0, f"Fatal error: {str(e)}")],
304
+ message=f"Import failed: {str(e)}"
305
+ )
306
+
307
+ def _map_columns(self, headers: List[str]) -> Dict[str, str]:
308
+ """
309
+ Map file headers to standard column names
310
+
311
+ Args:
312
+ headers: List of column headers from file
313
+
314
+ Returns:
315
+ Dictionary mapping standard names to actual column names
316
+ """
317
+ column_map = {}
318
+
319
+ for header in headers:
320
+ header_lower = header.lower().strip()
321
+
322
+ # Check against each standard column
323
+ for standard_name, variations in self.STANDARD_HEADERS.items():
324
+ if header_lower in variations:
325
+ column_map[standard_name] = header
326
+ break
327
+
328
+ # If source/target not found, check if first two columns are language names
329
+ # This allows headers like "Dutch\tEnglish" or "French\tEnglish"
330
+ if not column_map.get('source') or not column_map.get('target'):
331
+ if len(headers) >= 2:
332
+ first_header = headers[0].lower().strip()
333
+ second_header = headers[1].lower().strip()
334
+
335
+ # Check if both are language names
336
+ first_is_lang = first_header in self.LANGUAGE_NAMES
337
+ second_is_lang = second_header in self.LANGUAGE_NAMES
338
+
339
+ if first_is_lang and second_is_lang:
340
+ # Use first column as source, second as target
341
+ if not column_map.get('source'):
342
+ column_map['source'] = headers[0]
343
+ if not column_map.get('target'):
344
+ column_map['target'] = headers[1]
345
+ elif first_is_lang and not column_map.get('source'):
346
+ # Only first is a language - use as source
347
+ column_map['source'] = headers[0]
348
+ elif second_is_lang and not column_map.get('target'):
349
+ # Only second is a language - use as target
350
+ column_map['target'] = headers[1]
351
+
352
+ return column_map
353
+
354
+ def _get_field(self, row: Dict, column_name: str) -> str:
355
+ """Get field value from row, handling missing columns"""
356
+ return row.get(column_name, '').strip()
357
+
358
+ def _parse_priority(self, value: str) -> int:
359
+ """Parse priority value, default to 50 if invalid"""
360
+ if not value:
361
+ return 50
362
+ try:
363
+ priority = int(value)
364
+ return max(1, min(99, priority)) # Clamp to 1-99
365
+ except ValueError:
366
+ return 50
367
+
368
+ def _parse_boolean(self, value: str) -> bool:
369
+ """Parse boolean value from various formats"""
370
+ if not value:
371
+ return False
372
+ value_lower = value.lower().strip()
373
+ return value_lower in ['true', '1', 'yes', 'y', 'forbidden', 'prohibited']
374
+
375
+ def _update_term_from_row(self, term_id: int, row: Dict, column_map: Dict):
376
+ """Update an existing term with data from row"""
377
+ updates = {}
378
+
379
+ if column_map.get('target'):
380
+ updates['target_term'] = self._get_field(row, column_map['target'])
381
+ if column_map.get('priority'):
382
+ updates['priority'] = self._parse_priority(self._get_field(row, column_map['priority']))
383
+ if column_map.get('domain'):
384
+ updates['domain'] = self._get_field(row, column_map['domain'])
385
+ if column_map.get('notes'):
386
+ updates['notes'] = self._get_field(row, column_map['notes'])
387
+ if column_map.get('project'):
388
+ updates['project'] = self._get_field(row, column_map['project'])
389
+ if column_map.get('client'):
390
+ updates['client'] = self._get_field(row, column_map['client'])
391
+ if column_map.get('forbidden'):
392
+ updates['forbidden'] = self._parse_boolean(self._get_field(row, column_map['forbidden']))
393
+
394
+ self.termbase_manager.update_term(term_id, **updates)
395
+
396
+
397
+ class TermbaseExporter:
398
+ """Export termbases to TSV files"""
399
+
400
+ def __init__(self, db_manager, termbase_manager):
401
+ """
402
+ Initialize exporter
403
+
404
+ Args:
405
+ db_manager: DatabaseManager instance
406
+ termbase_manager: TermbaseManager instance
407
+ """
408
+ self.db_manager = db_manager
409
+ self.termbase_manager = termbase_manager
410
+
411
+ def export_tsv(self, termbase_id: int, filepath: str,
412
+ include_metadata: bool = True) -> Tuple[bool, str]:
413
+ """
414
+ Export termbase to TSV file
415
+
416
+ Args:
417
+ termbase_id: Termbase ID to export
418
+ filepath: Output file path
419
+ include_metadata: Include all metadata fields
420
+
421
+ Returns:
422
+ Tuple of (success: bool, message: str)
423
+ """
424
+ try:
425
+ # Get all terms
426
+ terms = self.termbase_manager.get_terms(termbase_id)
427
+
428
+ if not terms:
429
+ return (False, "Termbase is empty")
430
+
431
+ # Define columns - always include UUID for tracking
432
+ if include_metadata:
433
+ columns = ['Term UUID', 'Source Term', 'Target Term', 'Priority', 'Domain',
434
+ 'Notes', 'Project', 'Client', 'Forbidden']
435
+ else:
436
+ columns = ['Term UUID', 'Source Term', 'Target Term', 'Priority', 'Domain', 'Notes']
437
+
438
+ # Write to file with UTF-8 BOM for Excel compatibility
439
+ with open(filepath, 'w', encoding='utf-8-sig', newline='') as f:
440
+ writer = csv.writer(f, delimiter='\t', quoting=csv.QUOTE_MINIMAL)
441
+
442
+ # Write header
443
+ writer.writerow(columns)
444
+
445
+ # Write terms
446
+ for term in terms:
447
+ # Build source term: main term + synonyms (pipe-delimited)
448
+ source_parts = [term.get('source_term', '')]
449
+ source_synonyms = self.termbase_manager.get_synonyms(term['id'], language='source')
450
+ for s in source_synonyms:
451
+ if s['forbidden']:
452
+ source_parts.append(f"[!{s['synonym_text']}]")
453
+ else:
454
+ source_parts.append(s['synonym_text'])
455
+ source_text = '|'.join(source_parts)
456
+
457
+ # Build target term: main term + synonyms (pipe-delimited)
458
+ target_parts = [term.get('target_term', '')]
459
+ target_synonyms = self.termbase_manager.get_synonyms(term['id'], language='target')
460
+ for s in target_synonyms:
461
+ if s['forbidden']:
462
+ target_parts.append(f"[!{s['synonym_text']}]")
463
+ else:
464
+ target_parts.append(s['synonym_text'])
465
+ target_text = '|'.join(target_parts)
466
+
467
+ row = [
468
+ term.get('term_uuid', ''), # UUID first for tracking
469
+ source_text, # Main source + synonyms pipe-delimited
470
+ target_text, # Main target + synonyms pipe-delimited
471
+ str(term.get('priority', 50)),
472
+ term.get('domain', ''),
473
+ term.get('notes', '')
474
+ ]
475
+
476
+ if include_metadata:
477
+ row.extend([
478
+ term.get('project', ''),
479
+ term.get('client', ''),
480
+ 'TRUE' if term.get('forbidden', False) else 'FALSE'
481
+ ])
482
+
483
+ writer.writerow(row)
484
+
485
+ return (True, f"Exported {len(terms)} terms to {os.path.basename(filepath)}")
486
+
487
+ except Exception as e:
488
+ return (False, f"Export failed: {str(e)}")