supervertaler 1.9.153__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of supervertaler might be problematic. Click here for more details.

Files changed (85) hide show
  1. Supervertaler.py +47886 -0
  2. modules/__init__.py +10 -0
  3. modules/ai_actions.py +964 -0
  4. modules/ai_attachment_manager.py +343 -0
  5. modules/ai_file_viewer_dialog.py +210 -0
  6. modules/autofingers_engine.py +466 -0
  7. modules/cafetran_docx_handler.py +379 -0
  8. modules/config_manager.py +469 -0
  9. modules/database_manager.py +1878 -0
  10. modules/database_migrations.py +417 -0
  11. modules/dejavurtf_handler.py +779 -0
  12. modules/document_analyzer.py +427 -0
  13. modules/docx_handler.py +689 -0
  14. modules/encoding_repair.py +319 -0
  15. modules/encoding_repair_Qt.py +393 -0
  16. modules/encoding_repair_ui.py +481 -0
  17. modules/feature_manager.py +350 -0
  18. modules/figure_context_manager.py +340 -0
  19. modules/file_dialog_helper.py +148 -0
  20. modules/find_replace.py +164 -0
  21. modules/find_replace_qt.py +457 -0
  22. modules/glossary_manager.py +433 -0
  23. modules/image_extractor.py +188 -0
  24. modules/keyboard_shortcuts_widget.py +571 -0
  25. modules/llm_clients.py +1211 -0
  26. modules/llm_leaderboard.py +737 -0
  27. modules/llm_superbench_ui.py +1401 -0
  28. modules/local_llm_setup.py +1104 -0
  29. modules/model_update_dialog.py +381 -0
  30. modules/model_version_checker.py +373 -0
  31. modules/mqxliff_handler.py +638 -0
  32. modules/non_translatables_manager.py +743 -0
  33. modules/pdf_rescue_Qt.py +1822 -0
  34. modules/pdf_rescue_tkinter.py +909 -0
  35. modules/phrase_docx_handler.py +516 -0
  36. modules/project_home_panel.py +209 -0
  37. modules/prompt_assistant.py +357 -0
  38. modules/prompt_library.py +689 -0
  39. modules/prompt_library_migration.py +447 -0
  40. modules/quick_access_sidebar.py +282 -0
  41. modules/ribbon_widget.py +597 -0
  42. modules/sdlppx_handler.py +874 -0
  43. modules/setup_wizard.py +353 -0
  44. modules/shortcut_manager.py +932 -0
  45. modules/simple_segmenter.py +128 -0
  46. modules/spellcheck_manager.py +727 -0
  47. modules/statuses.py +207 -0
  48. modules/style_guide_manager.py +315 -0
  49. modules/superbench_ui.py +1319 -0
  50. modules/superbrowser.py +329 -0
  51. modules/supercleaner.py +600 -0
  52. modules/supercleaner_ui.py +444 -0
  53. modules/superdocs.py +19 -0
  54. modules/superdocs_viewer_qt.py +382 -0
  55. modules/superlookup.py +252 -0
  56. modules/tag_cleaner.py +260 -0
  57. modules/tag_manager.py +333 -0
  58. modules/term_extractor.py +270 -0
  59. modules/termbase_entry_editor.py +842 -0
  60. modules/termbase_import_export.py +488 -0
  61. modules/termbase_manager.py +1060 -0
  62. modules/termview_widget.py +1172 -0
  63. modules/theme_manager.py +499 -0
  64. modules/tm_editor_dialog.py +99 -0
  65. modules/tm_manager_qt.py +1280 -0
  66. modules/tm_metadata_manager.py +545 -0
  67. modules/tmx_editor.py +1461 -0
  68. modules/tmx_editor_qt.py +2784 -0
  69. modules/tmx_generator.py +284 -0
  70. modules/tracked_changes.py +900 -0
  71. modules/trados_docx_handler.py +430 -0
  72. modules/translation_memory.py +715 -0
  73. modules/translation_results_panel.py +2134 -0
  74. modules/translation_services.py +282 -0
  75. modules/unified_prompt_library.py +659 -0
  76. modules/unified_prompt_manager_qt.py +3951 -0
  77. modules/voice_commands.py +920 -0
  78. modules/voice_dictation.py +477 -0
  79. modules/voice_dictation_lite.py +249 -0
  80. supervertaler-1.9.153.dist-info/METADATA +896 -0
  81. supervertaler-1.9.153.dist-info/RECORD +85 -0
  82. supervertaler-1.9.153.dist-info/WHEEL +5 -0
  83. supervertaler-1.9.153.dist-info/entry_points.txt +2 -0
  84. supervertaler-1.9.153.dist-info/licenses/LICENSE +21 -0
  85. supervertaler-1.9.153.dist-info/top_level.txt +2 -0
@@ -0,0 +1,1104 @@
1
+ """
2
+ Local LLM Setup Module for Supervertaler
3
+ =========================================
4
+
5
+ Provides setup wizard, status checking, and model management for local LLM
6
+ integration via Ollama.
7
+
8
+ Features:
9
+ - Ollama installation detection and guidance
10
+ - Hardware detection (RAM, GPU) for model recommendations
11
+ - Model download and management
12
+ - Connection testing
13
+
14
+ Usage:
15
+ from modules.local_llm_setup import LocalLLMSetupDialog, check_ollama_status
16
+
17
+ # Check if Ollama is running
18
+ status = check_ollama_status()
19
+ if status['running']:
20
+ print(f"Ollama running with models: {status['models']}")
21
+
22
+ # Show setup wizard
23
+ dialog = LocalLLMSetupDialog(parent)
24
+ dialog.exec()
25
+
26
+ Author: Supervertaler Team
27
+ Date: December 2025
28
+ """
29
+
30
+ import os
31
+ import sys
32
+ import subprocess
33
+ import webbrowser
34
+ from typing import Dict, List, Optional, Tuple, Callable
35
+ from dataclasses import dataclass
36
+
37
+ from PyQt6.QtWidgets import (
38
+ QDialog, QVBoxLayout, QHBoxLayout, QLabel, QPushButton,
39
+ QGroupBox, QComboBox, QProgressBar, QTextEdit, QWidget,
40
+ QMessageBox, QFrame, QSizePolicy, QApplication
41
+ )
42
+ from PyQt6.QtCore import Qt, QThread, pyqtSignal, QTimer
43
+ from PyQt6.QtGui import QFont
44
+
45
+
46
+ # =============================================================================
47
+ # CONSTANTS
48
+ # =============================================================================
49
+
50
+ DEFAULT_OLLAMA_ENDPOINT = "http://localhost:11434"
51
+
52
+ # Recommended models with metadata
53
+ RECOMMENDED_MODELS = {
54
+ "qwen2.5:3b": {
55
+ "name": "Qwen 2.5 3B",
56
+ "description": "Fast & lightweight - good for simple translations",
57
+ "size_gb": 2.0,
58
+ "ram_required_gb": 4,
59
+ "quality_stars": 3,
60
+ "strengths": ["Fast", "Low memory", "Multilingual"],
61
+ "use_case": "Quick drafts, simple text, low-end hardware",
62
+ "download_size": "1.9 GB"
63
+ },
64
+ "qwen2.5:7b": {
65
+ "name": "Qwen 2.5 7B",
66
+ "description": "Recommended - excellent multilingual quality",
67
+ "size_gb": 4.4,
68
+ "ram_required_gb": 8,
69
+ "quality_stars": 4,
70
+ "strengths": ["Excellent multilingual", "Good quality", "Balanced speed"],
71
+ "use_case": "General translation, most European languages",
72
+ "download_size": "4.4 GB",
73
+ "recommended": True
74
+ },
75
+ "llama3.2:3b": {
76
+ "name": "Llama 3.2 3B",
77
+ "description": "Meta's efficient model - good English",
78
+ "size_gb": 2.0,
79
+ "ram_required_gb": 4,
80
+ "quality_stars": 3,
81
+ "strengths": ["Fast", "Good English", "Efficient"],
82
+ "use_case": "English-centric translations, quick drafts",
83
+ "download_size": "2.0 GB"
84
+ },
85
+ "mistral:7b": {
86
+ "name": "Mistral 7B",
87
+ "description": "Strong European language support",
88
+ "size_gb": 4.1,
89
+ "ram_required_gb": 8,
90
+ "quality_stars": 4,
91
+ "strengths": ["European languages", "French", "Fast inference"],
92
+ "use_case": "French, German, Spanish translations",
93
+ "download_size": "4.1 GB"
94
+ },
95
+ "gemma2:9b": {
96
+ "name": "Gemma 2 9B",
97
+ "description": "Google's quality model - best for size",
98
+ "size_gb": 5.5,
99
+ "ram_required_gb": 10,
100
+ "quality_stars": 5,
101
+ "strengths": ["High quality", "Good reasoning", "Multilingual"],
102
+ "use_case": "Quality-focused translation, technical content",
103
+ "download_size": "5.4 GB"
104
+ },
105
+ "qwen2.5:14b": {
106
+ "name": "Qwen 2.5 14B",
107
+ "description": "Premium quality - needs 16GB+ RAM",
108
+ "size_gb": 9.0,
109
+ "ram_required_gb": 16,
110
+ "quality_stars": 5,
111
+ "strengths": ["Excellent quality", "Complex text", "Nuanced translation"],
112
+ "use_case": "Premium translations, complex documents",
113
+ "download_size": "8.9 GB"
114
+ },
115
+ "llama3.1:8b": {
116
+ "name": "Llama 3.1 8B",
117
+ "description": "Meta's capable model - good all-rounder",
118
+ "size_gb": 4.7,
119
+ "ram_required_gb": 8,
120
+ "quality_stars": 4,
121
+ "strengths": ["Versatile", "Good quality", "Well-tested"],
122
+ "use_case": "General purpose translation",
123
+ "download_size": "4.7 GB"
124
+ },
125
+ # === LARGE MODELS (32GB+ RAM required) ===
126
+ "qwen2.5:32b": {
127
+ "name": "Qwen 2.5 32B",
128
+ "description": "Alibaba's flagship model - excellent for translation",
129
+ "size_gb": 19.9,
130
+ "ram_required_gb": 32,
131
+ "quality_stars": 5,
132
+ "strengths": ["Top translation quality", "Excellent multilingual", "Nuanced output"],
133
+ "use_case": "High-quality professional translation",
134
+ "download_size": "19.9 GB"
135
+ },
136
+ "qwen2.5:72b": {
137
+ "name": "Qwen 2.5 72B",
138
+ "description": "Alibaba's largest model - best quality, very slow on CPU",
139
+ "size_gb": 43.0,
140
+ "ram_required_gb": 48,
141
+ "quality_stars": 5,
142
+ "strengths": ["Highest quality", "Best multilingual", "Near cloud-level"],
143
+ "use_case": "Maximum quality (needs 48GB+ RAM, very slow)",
144
+ "download_size": "43 GB"
145
+ },
146
+ "mixtral:8x7b": {
147
+ "name": "Mixtral 8x7B",
148
+ "description": "Mistral's mixture-of-experts model - very capable",
149
+ "size_gb": 26.4,
150
+ "ram_required_gb": 32,
151
+ "quality_stars": 5,
152
+ "strengths": ["Excellent reasoning", "Strong multilingual", "Efficient inference"],
153
+ "use_case": "High-quality translation with fast inference",
154
+ "download_size": "26.4 GB"
155
+ },
156
+ "command-r:35b": {
157
+ "name": "Command R 35B",
158
+ "description": "Cohere's RAG-optimized model - excellent for context",
159
+ "size_gb": 20.0,
160
+ "ram_required_gb": 40,
161
+ "quality_stars": 5,
162
+ "strengths": ["Context-aware", "Strong reasoning", "Good for technical text"],
163
+ "use_case": "Technical and specialized translation",
164
+ "download_size": "20.0 GB"
165
+ },
166
+ # === DUTCH/MULTILINGUAL SPECIALISTS ===
167
+ "aya-expanse:8b": {
168
+ "name": "Aya Expanse 8B",
169
+ "description": "Cohere's multilingual model - excellent for Dutch",
170
+ "size_gb": 4.8,
171
+ "ram_required_gb": 8,
172
+ "quality_stars": 5,
173
+ "strengths": ["Top Dutch support", "High fidelity translation", "23 languages"],
174
+ "use_case": "Dutch-English translation (Top Pick)",
175
+ "download_size": "4.8 GB"
176
+ },
177
+ "openeurollm-dutch": {
178
+ "name": "OpenEuroLLM Dutch 9B",
179
+ "description": "Gemma3 fine-tuned for Dutch - strong grammar",
180
+ "size_gb": 5.4,
181
+ "ram_required_gb": 8,
182
+ "quality_stars": 4,
183
+ "strengths": ["Dutch grammar/idioms", "European tech docs", "Fine-tuned"],
184
+ "use_case": "Dutch technical documentation",
185
+ "download_size": "5.4 GB"
186
+ },
187
+ "geitje-7b-ultra": {
188
+ "name": "GEITje 7B Ultra",
189
+ "description": "Dutch-specialized Mistral - conversational",
190
+ "size_gb": 4.1,
191
+ "ram_required_gb": 6,
192
+ "quality_stars": 4,
193
+ "strengths": ["Dutch-specialized", "Adaptable prompts", "Fast"],
194
+ "use_case": "Dutch conversational translation",
195
+ "download_size": "4.1 GB"
196
+ },
197
+ "stablelm2:12b": {
198
+ "name": "StableLM 2 12B",
199
+ "description": "Multilingual training including Dutch",
200
+ "size_gb": 7.3,
201
+ "ram_required_gb": 10,
202
+ "quality_stars": 4,
203
+ "strengths": ["Multilingual", "Stable output", "Good for summaries"],
204
+ "use_case": "General multilingual tasks",
205
+ "download_size": "7.3 GB"
206
+ }
207
+ }
208
+
209
+
210
+ # =============================================================================
211
+ # UTILITY FUNCTIONS
212
+ # =============================================================================
213
+
214
+ def get_ollama_endpoint() -> str:
215
+ """Get Ollama endpoint from environment or return default."""
216
+ return os.environ.get('OLLAMA_ENDPOINT', DEFAULT_OLLAMA_ENDPOINT)
217
+
218
+
219
+ def check_ollama_status(endpoint: str = None) -> Dict:
220
+ """
221
+ Check if Ollama is running and get available models.
222
+
223
+ Args:
224
+ endpoint: Ollama API endpoint (default: http://localhost:11434)
225
+
226
+ Returns:
227
+ Dict with:
228
+ - running: bool - whether Ollama is running
229
+ - models: list - available model names
230
+ - version: str - Ollama version if available
231
+ - error: str - error message if not running
232
+ """
233
+ try:
234
+ import requests
235
+ except ImportError:
236
+ return {
237
+ 'running': False,
238
+ 'models': [],
239
+ 'version': None,
240
+ 'endpoint': endpoint or get_ollama_endpoint(),
241
+ 'error': "Requests library not installed"
242
+ }
243
+
244
+ endpoint = endpoint or get_ollama_endpoint()
245
+
246
+ try:
247
+ # Check if Ollama is running by getting model list
248
+ response = requests.get(f"{endpoint}/api/tags", timeout=5)
249
+ if response.status_code == 200:
250
+ data = response.json()
251
+ models = [m['name'] for m in data.get('models', [])]
252
+
253
+ # Try to get version
254
+ version = None
255
+ try:
256
+ ver_response = requests.get(f"{endpoint}/api/version", timeout=2)
257
+ if ver_response.status_code == 200:
258
+ version = ver_response.json().get('version')
259
+ except:
260
+ pass
261
+
262
+ return {
263
+ 'running': True,
264
+ 'models': models,
265
+ 'version': version,
266
+ 'endpoint': endpoint,
267
+ 'error': None
268
+ }
269
+ else:
270
+ return {
271
+ 'running': False,
272
+ 'models': [],
273
+ 'version': None,
274
+ 'endpoint': endpoint,
275
+ 'error': f"Ollama returned status {response.status_code}"
276
+ }
277
+ except requests.exceptions.ConnectionError:
278
+ return {
279
+ 'running': False,
280
+ 'models': [],
281
+ 'version': None,
282
+ 'endpoint': endpoint,
283
+ 'error': "Cannot connect to Ollama. Is it installed and running?"
284
+ }
285
+ except Exception as e:
286
+ return {
287
+ 'running': False,
288
+ 'models': [],
289
+ 'version': None,
290
+ 'endpoint': endpoint,
291
+ 'error': str(e)
292
+ }
293
+
294
+
295
+ def detect_system_specs() -> Dict:
296
+ """
297
+ Detect system hardware specifications.
298
+
299
+ Returns:
300
+ Dict with:
301
+ - ram_gb: Total RAM in GB
302
+ - gpu_name: GPU name if detected
303
+ - gpu_vram_gb: GPU VRAM in GB if detected
304
+ - os_name: Operating system name
305
+ - recommended_model: Suggested model based on specs
306
+ """
307
+ import platform
308
+
309
+ specs = {
310
+ 'ram_gb': 8, # Default assumption
311
+ 'gpu_name': None,
312
+ 'gpu_vram_gb': None,
313
+ 'os_name': platform.system(),
314
+ 'recommended_model': 'qwen2.5:7b'
315
+ }
316
+
317
+ # Detect RAM
318
+ try:
319
+ import psutil
320
+ specs['ram_gb'] = round(psutil.virtual_memory().total / (1024**3), 1)
321
+ except ImportError:
322
+ # Try Windows-specific method
323
+ if platform.system() == 'Windows':
324
+ try:
325
+ import ctypes
326
+ kernel32 = ctypes.windll.kernel32
327
+ c_ulonglong = ctypes.c_ulonglong
328
+
329
+ class MEMORYSTATUSEX(ctypes.Structure):
330
+ _fields_ = [
331
+ ('dwLength', ctypes.c_ulong),
332
+ ('dwMemoryLoad', ctypes.c_ulong),
333
+ ('ullTotalPhys', c_ulonglong),
334
+ ('ullAvailPhys', c_ulonglong),
335
+ ('ullTotalPageFile', c_ulonglong),
336
+ ('ullAvailPageFile', c_ulonglong),
337
+ ('ullTotalVirtual', c_ulonglong),
338
+ ('ullAvailVirtual', c_ulonglong),
339
+ ('ullAvailExtendedVirtual', c_ulonglong),
340
+ ]
341
+
342
+ stat = MEMORYSTATUSEX()
343
+ stat.dwLength = ctypes.sizeof(stat)
344
+ kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
345
+ specs['ram_gb'] = round(stat.ullTotalPhys / (1024**3), 1)
346
+ except:
347
+ pass
348
+
349
+ # Detect GPU (basic detection)
350
+ try:
351
+ if platform.system() == 'Windows':
352
+ # Try to detect NVIDIA GPU
353
+ result = subprocess.run(
354
+ ['nvidia-smi', '--query-gpu=name,memory.total', '--format=csv,noheader'],
355
+ capture_output=True, text=True, timeout=5
356
+ )
357
+ if result.returncode == 0:
358
+ parts = result.stdout.strip().split(', ')
359
+ if len(parts) >= 2:
360
+ specs['gpu_name'] = parts[0]
361
+ # Parse VRAM (e.g., "8192 MiB" -> 8)
362
+ vram_str = parts[1].replace(' MiB', '').replace(' MB', '')
363
+ try:
364
+ specs['gpu_vram_gb'] = round(int(vram_str) / 1024, 1)
365
+ except:
366
+ pass
367
+ except:
368
+ pass
369
+
370
+ # Recommend model based on RAM
371
+ ram = specs['ram_gb']
372
+ if ram >= 16:
373
+ specs['recommended_model'] = 'qwen2.5:14b'
374
+ elif ram >= 10:
375
+ specs['recommended_model'] = 'gemma2:9b'
376
+ elif ram >= 8:
377
+ specs['recommended_model'] = 'qwen2.5:7b'
378
+ elif ram >= 4:
379
+ specs['recommended_model'] = 'qwen2.5:3b'
380
+ else:
381
+ specs['recommended_model'] = 'llama3.2:3b'
382
+
383
+ return specs
384
+
385
+
386
+ def get_model_recommendations(ram_gb: float) -> List[Dict]:
387
+ """
388
+ Get model recommendations based on available RAM.
389
+
390
+ Args:
391
+ ram_gb: Available RAM in gigabytes
392
+
393
+ Returns:
394
+ List of model dicts sorted by recommendation priority
395
+ """
396
+ compatible = []
397
+
398
+ for model_id, info in RECOMMENDED_MODELS.items():
399
+ if info['ram_required_gb'] <= ram_gb:
400
+ model_info = info.copy()
401
+ model_info['id'] = model_id
402
+ model_info['compatible'] = True
403
+ compatible.append(model_info)
404
+ else:
405
+ model_info = info.copy()
406
+ model_info['id'] = model_id
407
+ model_info['compatible'] = False
408
+ compatible.append(model_info)
409
+
410
+ # Sort: compatible first, then by quality (descending), then by size (ascending)
411
+ compatible.sort(key=lambda x: (
412
+ not x['compatible'],
413
+ -x['quality_stars'],
414
+ x['ram_required_gb']
415
+ ))
416
+
417
+ return compatible
418
+
419
+
420
+ # =============================================================================
421
+ # BACKGROUND WORKERS
422
+ # =============================================================================
423
+
424
+ class ModelDownloadWorker(QThread):
425
+ """Background worker for downloading Ollama models."""
426
+
427
+ progress = pyqtSignal(str) # Progress message
428
+ finished = pyqtSignal(bool, str) # Success, message
429
+
430
+ def __init__(self, model_name: str, endpoint: str = None):
431
+ super().__init__()
432
+ self.model_name = model_name
433
+ self.endpoint = endpoint or get_ollama_endpoint()
434
+ self._cancelled = False
435
+
436
+ def cancel(self):
437
+ self._cancelled = True
438
+
439
+ def run(self):
440
+ """Download model using Ollama API."""
441
+ try:
442
+ import requests
443
+
444
+ self.progress.emit(f"Starting download of {self.model_name}...")
445
+
446
+ # Use streaming pull endpoint
447
+ response = requests.post(
448
+ f"{self.endpoint}/api/pull",
449
+ json={"name": self.model_name, "stream": True},
450
+ stream=True,
451
+ timeout=3600 # 1 hour timeout for large models
452
+ )
453
+
454
+ if response.status_code != 200:
455
+ self.finished.emit(False, f"Download failed: HTTP {response.status_code}")
456
+ return
457
+
458
+ last_status = ""
459
+ for line in response.iter_lines():
460
+ if self._cancelled:
461
+ self.finished.emit(False, "Download cancelled")
462
+ return
463
+
464
+ if line:
465
+ try:
466
+ import json
467
+ data = json.loads(line)
468
+ status = data.get('status', '')
469
+
470
+ # Show progress
471
+ if 'completed' in data and 'total' in data:
472
+ pct = int(data['completed'] / data['total'] * 100)
473
+ completed_mb = data['completed'] / (1024 * 1024)
474
+ total_mb = data['total'] / (1024 * 1024)
475
+ self.progress.emit(f"{status}: {completed_mb:.0f} MB / {total_mb:.0f} MB ({pct}%)")
476
+ elif status != last_status:
477
+ self.progress.emit(status)
478
+ last_status = status
479
+ except:
480
+ pass
481
+
482
+ self.finished.emit(True, f"Successfully downloaded {self.model_name}")
483
+
484
+ except requests.exceptions.ConnectionError:
485
+ self.finished.emit(False, "Cannot connect to Ollama. Is it running?")
486
+ except Exception as e:
487
+ self.finished.emit(False, f"Download error: {str(e)}")
488
+
489
+
490
+ class ConnectionTestWorker(QThread):
491
+ """Background worker for testing Ollama connection with a simple prompt."""
492
+
493
+ progress = pyqtSignal(str)
494
+ finished = pyqtSignal(bool, str, str) # Success, message, response
495
+
496
+ def __init__(self, model_name: str, endpoint: str = None):
497
+ super().__init__()
498
+ self.model_name = model_name
499
+ self.endpoint = endpoint or get_ollama_endpoint()
500
+
501
+ def run(self):
502
+ """Test model with a simple translation prompt."""
503
+ try:
504
+ import requests
505
+
506
+ self.progress.emit(f"Loading model {self.model_name}...")
507
+ self.progress.emit("(First load may take 30-60 seconds)")
508
+
509
+ # Simple test prompt
510
+ test_prompt = "Translate to Dutch: Hello, how are you today?"
511
+
512
+ response = requests.post(
513
+ f"{self.endpoint}/api/chat",
514
+ json={
515
+ "model": self.model_name,
516
+ "messages": [{"role": "user", "content": test_prompt}],
517
+ "stream": False,
518
+ "options": {"temperature": 0.3, "num_predict": 50}
519
+ },
520
+ timeout=180 # 3 min for model loading
521
+ )
522
+
523
+ if response.status_code == 200:
524
+ result = response.json()
525
+ translation = result.get('message', {}).get('content', 'No response')
526
+ self.finished.emit(True, "Model is working!", translation.strip())
527
+ else:
528
+ self.finished.emit(False, f"Model test failed: HTTP {response.status_code}", "")
529
+
530
+ except requests.exceptions.Timeout:
531
+ self.finished.emit(False, "Test timed out. Model may still be loading.", "")
532
+ except requests.exceptions.ConnectionError:
533
+ self.finished.emit(False, "Cannot connect to Ollama", "")
534
+ except Exception as e:
535
+ self.finished.emit(False, f"Test error: {str(e)}", "")
536
+
537
+
538
+ # =============================================================================
539
+ # SETUP DIALOG
540
+ # =============================================================================
541
+
542
+ class LocalLLMSetupDialog(QDialog):
543
+ """
544
+ Setup wizard for local LLM configuration.
545
+
546
+ Guides users through:
547
+ 1. Checking if Ollama is installed and running
548
+ 2. Detecting hardware specs
549
+ 3. Recommending and downloading a model
550
+ 4. Testing the connection
551
+ """
552
+
553
+ def __init__(self, parent=None, log_callback: Callable = None):
554
+ super().__init__(parent)
555
+ self.log = log_callback or print
556
+ self.download_worker = None
557
+ self.test_worker = None
558
+
559
+ self.setWindowTitle("Local LLM Setup")
560
+ self.setMinimumWidth(600)
561
+ self.setMinimumHeight(500)
562
+
563
+ self.init_ui()
564
+ self.refresh_status()
565
+
566
+ def init_ui(self):
567
+ """Initialize the UI."""
568
+ layout = QVBoxLayout(self)
569
+ layout.setSpacing(15)
570
+
571
+ # Header
572
+ header = QLabel("🖥️ Local LLM Setup")
573
+ header.setStyleSheet("font-size: 18pt; font-weight: bold; color: #1976D2;")
574
+ layout.addWidget(header)
575
+
576
+ desc = QLabel(
577
+ "Run AI translation locally on your computer - no API keys needed, "
578
+ "complete privacy, works offline."
579
+ )
580
+ desc.setWordWrap(True)
581
+ desc.setStyleSheet("color: #666; padding: 5px; background-color: #E3F2FD; border-radius: 5px;")
582
+ layout.addWidget(desc)
583
+
584
+ # === STEP 1: Ollama Status ===
585
+ self.ollama_group = QGroupBox("Step 1: Ollama Status")
586
+ ollama_layout = QVBoxLayout()
587
+
588
+ self.status_label = QLabel("Checking Ollama status...")
589
+ self.status_label.setStyleSheet("padding: 10px;")
590
+ ollama_layout.addWidget(self.status_label)
591
+
592
+ btn_row = QHBoxLayout()
593
+ self.install_btn = QPushButton("📥 Download Ollama")
594
+ self.install_btn.clicked.connect(self.open_ollama_download)
595
+ self.install_btn.setToolTip("Opens Ollama download page in your browser")
596
+ btn_row.addWidget(self.install_btn)
597
+
598
+ self.start_btn = QPushButton("▶️ Start Ollama")
599
+ self.start_btn.clicked.connect(self.start_ollama)
600
+ self.start_btn.setToolTip("Start the Ollama service on your computer")
601
+ btn_row.addWidget(self.start_btn)
602
+
603
+ self.refresh_btn = QPushButton("🔄 Refresh Status")
604
+ self.refresh_btn.clicked.connect(self.refresh_status)
605
+ btn_row.addWidget(self.refresh_btn)
606
+
607
+ btn_row.addStretch()
608
+ ollama_layout.addLayout(btn_row)
609
+
610
+ self.ollama_group.setLayout(ollama_layout)
611
+ layout.addWidget(self.ollama_group)
612
+
613
+ # === STEP 2: Hardware & Model Selection ===
614
+ self.model_group = QGroupBox("Step 2: Select Model")
615
+ model_layout = QVBoxLayout()
616
+
617
+ self.specs_label = QLabel("Detecting hardware...")
618
+ model_layout.addWidget(self.specs_label)
619
+
620
+ model_row = QHBoxLayout()
621
+ model_row.addWidget(QLabel("Model:"))
622
+
623
+ self.model_combo = QComboBox()
624
+ self.model_combo.setMinimumWidth(350)
625
+ self.model_combo.currentIndexChanged.connect(self.on_model_selected)
626
+ model_row.addWidget(self.model_combo)
627
+ model_row.addStretch()
628
+ model_layout.addLayout(model_row)
629
+
630
+ self.model_info_label = QLabel("")
631
+ self.model_info_label.setWordWrap(True)
632
+ self.model_info_label.setStyleSheet("color: #666; padding: 5px;")
633
+ model_layout.addWidget(self.model_info_label)
634
+
635
+ # Download button and progress
636
+ download_row = QHBoxLayout()
637
+ self.download_btn = QPushButton("📦 Download Selected Model")
638
+ self.download_btn.clicked.connect(self.download_model)
639
+ download_row.addWidget(self.download_btn)
640
+
641
+ self.cancel_btn = QPushButton("Cancel")
642
+ self.cancel_btn.clicked.connect(self.cancel_download)
643
+ self.cancel_btn.setVisible(False)
644
+ download_row.addWidget(self.cancel_btn)
645
+
646
+ download_row.addStretch()
647
+ model_layout.addLayout(download_row)
648
+
649
+ self.progress_bar = QProgressBar()
650
+ self.progress_bar.setVisible(False)
651
+ self.progress_bar.setRange(0, 0) # Indeterminate
652
+ model_layout.addWidget(self.progress_bar)
653
+
654
+ self.progress_label = QLabel("")
655
+ self.progress_label.setStyleSheet("color: #666;")
656
+ model_layout.addWidget(self.progress_label)
657
+
658
+ self.model_group.setLayout(model_layout)
659
+ layout.addWidget(self.model_group)
660
+
661
+ # === STEP 3: Test Connection ===
662
+ self.test_group = QGroupBox("Step 3: Test Connection")
663
+ test_layout = QVBoxLayout()
664
+
665
+ self.test_btn = QPushButton("🧪 Test Translation")
666
+ self.test_btn.clicked.connect(self.test_connection)
667
+ self.test_btn.setToolTip("Send a test translation to verify everything works")
668
+ test_layout.addWidget(self.test_btn)
669
+
670
+ self.test_result = QTextEdit()
671
+ self.test_result.setMaximumHeight(100)
672
+ self.test_result.setReadOnly(True)
673
+ self.test_result.setPlaceholderText("Test results will appear here...")
674
+ test_layout.addWidget(self.test_result)
675
+
676
+ self.test_group.setLayout(test_layout)
677
+ layout.addWidget(self.test_group)
678
+
679
+ # === Bottom buttons ===
680
+ btn_layout = QHBoxLayout()
681
+ btn_layout.addStretch()
682
+
683
+ self.close_btn = QPushButton("Close")
684
+ self.close_btn.clicked.connect(self.accept)
685
+ btn_layout.addWidget(self.close_btn)
686
+
687
+ layout.addLayout(btn_layout)
688
+
689
+ # Initial state
690
+ self.model_group.setEnabled(False)
691
+ self.test_group.setEnabled(False)
692
+
693
+ def refresh_status(self):
694
+ """Refresh Ollama status and hardware specs."""
695
+ # Check Ollama
696
+ status = check_ollama_status()
697
+
698
+ if status['running']:
699
+ models_str = ", ".join(status['models'][:5]) if status['models'] else "None"
700
+ if len(status['models']) > 5:
701
+ models_str += f" (+{len(status['models']) - 5} more)"
702
+
703
+ version_str = f" v{status['version']}" if status['version'] else ""
704
+
705
+ self.status_label.setText(
706
+ f"✅ Ollama is running{version_str}\n"
707
+ f"📍 Endpoint: {status['endpoint']}\n"
708
+ f"📦 Installed models: {models_str}"
709
+ )
710
+ self.status_label.setStyleSheet("padding: 10px; background-color: #E8F5E9; border-radius: 5px;")
711
+ self.install_btn.setVisible(False)
712
+ self.model_group.setEnabled(True)
713
+ self.test_group.setEnabled(True)
714
+
715
+ # Store installed models for later
716
+ self.installed_models = status['models']
717
+ else:
718
+ self.status_label.setText(
719
+ f"❌ Ollama is not running\n"
720
+ f"Error: {status['error']}\n\n"
721
+ "Please install and start Ollama to use local LLM translation."
722
+ )
723
+ self.status_label.setStyleSheet("padding: 10px; background-color: #FFEBEE; border-radius: 5px;")
724
+ self.install_btn.setVisible(True)
725
+ self.model_group.setEnabled(False)
726
+ self.test_group.setEnabled(False)
727
+ self.installed_models = []
728
+
729
+ # Detect hardware
730
+ specs = detect_system_specs()
731
+ gpu_str = f", GPU: {specs['gpu_name']} ({specs['gpu_vram_gb']}GB)" if specs['gpu_name'] else ""
732
+ self.specs_label.setText(
733
+ f"💻 Your system: {specs['ram_gb']:.0f} GB RAM{gpu_str}"
734
+ )
735
+
736
+ # Populate model combo
737
+ self.model_combo.clear()
738
+ recommendations = get_model_recommendations(specs['ram_gb'])
739
+
740
+ for model in recommendations:
741
+ stars = "★" * model['quality_stars'] + "☆" * (5 - model['quality_stars'])
742
+
743
+ # Mark if already installed
744
+ installed_marker = " ✓" if model['id'] in self.installed_models else ""
745
+
746
+ # Mark if not compatible
747
+ if not model['compatible']:
748
+ label = f"⚠️ {model['name']} ({model['download_size']}) - Needs {model['ram_required_gb']}GB RAM"
749
+ elif model.get('recommended'):
750
+ label = f"⭐ {model['name']} ({model['download_size']}) {stars}{installed_marker} - RECOMMENDED"
751
+ else:
752
+ label = f"{model['name']} ({model['download_size']}) {stars}{installed_marker}"
753
+
754
+ self.model_combo.addItem(label, model['id'])
755
+
756
+ # Select recommended model
757
+ for i in range(self.model_combo.count()):
758
+ model_id = self.model_combo.itemData(i)
759
+ if model_id == specs['recommended_model']:
760
+ self.model_combo.setCurrentIndex(i)
761
+ break
762
+
763
+ def on_model_selected(self, index):
764
+ """Update model info when selection changes."""
765
+ if index < 0:
766
+ return
767
+
768
+ model_id = self.model_combo.itemData(index)
769
+ if model_id and model_id in RECOMMENDED_MODELS:
770
+ info = RECOMMENDED_MODELS[model_id]
771
+ self.model_info_label.setText(
772
+ f"<b>{info['description']}</b><br>"
773
+ f"Best for: {info['use_case']}<br>"
774
+ f"Strengths: {', '.join(info['strengths'])}"
775
+ )
776
+
777
+ # Update download button
778
+ if model_id in getattr(self, 'installed_models', []):
779
+ self.download_btn.setText("✓ Already Installed")
780
+ self.download_btn.setEnabled(False)
781
+ else:
782
+ self.download_btn.setText(f"📦 Download {info['name']} ({info['download_size']})")
783
+ self.download_btn.setEnabled(True)
784
+
785
+ def open_ollama_download(self):
786
+ """Open Ollama download page."""
787
+ webbrowser.open("https://ollama.com/download")
788
+ QMessageBox.information(
789
+ self,
790
+ "Install Ollama",
791
+ "The Ollama download page has been opened in your browser.\n\n"
792
+ "After installation:\n"
793
+ "1. Ollama should start automatically\n"
794
+ "2. Click 'Refresh Status' to check\n"
795
+ "3. If not running, open a terminal and run: ollama serve"
796
+ )
797
+
798
+ def start_ollama(self):
799
+ """Start the Ollama service."""
800
+ import subprocess
801
+ import time
802
+
803
+ # Common locations for Ollama on Windows
804
+ ollama_paths = [
805
+ os.path.expandvars(r"%LOCALAPPDATA%\Programs\Ollama\ollama.exe"),
806
+ os.path.expandvars(r"%APPDATA%\Microsoft\Windows\Start Menu\Programs\Ollama\Ollama.lnk"),
807
+ r"C:\Program Files\Ollama\ollama.exe",
808
+ ]
809
+
810
+ # Also check for the Start Menu shortcut
811
+ start_menu_lnk = os.path.expandvars(
812
+ r"%APPDATA%\Microsoft\Windows\Start Menu\Programs\Ollama\Ollama.lnk"
813
+ )
814
+
815
+ started = False
816
+
817
+ # Try the Start Menu shortcut first (most reliable on Windows)
818
+ if os.path.exists(start_menu_lnk):
819
+ try:
820
+ os.startfile(start_menu_lnk)
821
+ started = True
822
+ self.status_label.setText("⏳ Starting Ollama... please wait")
823
+ self.status_label.setStyleSheet("background-color: #FFF3CD; padding: 10px;")
824
+ QApplication.processEvents()
825
+ except Exception as e:
826
+ print(f"Failed to start via shortcut: {e}")
827
+
828
+ # Try direct executable paths
829
+ if not started:
830
+ for path in ollama_paths:
831
+ if os.path.exists(path) and path.endswith('.exe'):
832
+ try:
833
+ subprocess.Popen([path, "serve"],
834
+ creationflags=subprocess.CREATE_NO_WINDOW if hasattr(subprocess, 'CREATE_NO_WINDOW') else 0)
835
+ started = True
836
+ self.status_label.setText("⏳ Starting Ollama... please wait")
837
+ self.status_label.setStyleSheet("background-color: #FFF3CD; padding: 10px;")
838
+ QApplication.processEvents()
839
+ break
840
+ except Exception as e:
841
+ print(f"Failed to start {path}: {e}")
842
+
843
+ if started:
844
+ # Wait a moment for Ollama to start, then refresh
845
+ QTimer.singleShot(3000, self.refresh_status) # Check after 3 seconds
846
+ QMessageBox.information(
847
+ self,
848
+ "Starting Ollama",
849
+ "Ollama is starting...\n\n"
850
+ "The status will refresh automatically in a few seconds.\n"
851
+ "If it doesn't start, try opening Ollama from your Start Menu."
852
+ )
853
+ else:
854
+ QMessageBox.warning(
855
+ self,
856
+ "Ollama Not Found",
857
+ "Could not find Ollama installation.\n\n"
858
+ "Please either:\n"
859
+ "1. Open Ollama from your Start Menu manually\n"
860
+ "2. Download and install Ollama from https://ollama.com\n\n"
861
+ "After starting Ollama, click 'Refresh Status'."
862
+ )
863
+
864
+ def download_model(self):
865
+ """Start downloading the selected model."""
866
+ model_id = self.model_combo.currentData()
867
+ if not model_id:
868
+ return
869
+
870
+ self.progress_bar.setVisible(True)
871
+ self.progress_label.setText("Starting download...")
872
+ self.download_btn.setEnabled(False)
873
+ self.cancel_btn.setVisible(True)
874
+ self.model_combo.setEnabled(False)
875
+
876
+ self.download_worker = ModelDownloadWorker(model_id)
877
+ self.download_worker.progress.connect(self.on_download_progress)
878
+ self.download_worker.finished.connect(self.on_download_finished)
879
+ self.download_worker.start()
880
+
881
+ def cancel_download(self):
882
+ """Cancel ongoing download."""
883
+ if self.download_worker:
884
+ self.download_worker.cancel()
885
+
886
+ def on_download_progress(self, message):
887
+ """Update download progress."""
888
+ self.progress_label.setText(message)
889
+
890
+ def on_download_finished(self, success, message):
891
+ """Handle download completion."""
892
+ self.progress_bar.setVisible(False)
893
+ self.cancel_btn.setVisible(False)
894
+ self.model_combo.setEnabled(True)
895
+
896
+ if success:
897
+ self.progress_label.setText(f"✅ {message}")
898
+ self.progress_label.setStyleSheet("color: green;")
899
+ self.refresh_status() # Refresh to show new model
900
+ else:
901
+ self.progress_label.setText(f"❌ {message}")
902
+ self.progress_label.setStyleSheet("color: red;")
903
+ self.download_btn.setEnabled(True)
904
+
905
+ self.download_worker = None
906
+
907
+ def test_connection(self):
908
+ """Test the selected model with a simple translation."""
909
+ model_id = self.model_combo.currentData()
910
+ if not model_id:
911
+ return
912
+
913
+ # Check if model is installed
914
+ if model_id not in getattr(self, 'installed_models', []):
915
+ self.test_result.setText("⚠️ Please download the model first")
916
+ return
917
+
918
+ self.test_btn.setEnabled(False)
919
+ self.test_result.setText("🔄 Testing... (first load may take 30-60 seconds)")
920
+
921
+ self.test_worker = ConnectionTestWorker(model_id)
922
+ self.test_worker.progress.connect(lambda msg: self.test_result.setText(f"🔄 {msg}"))
923
+ self.test_worker.finished.connect(self.on_test_finished)
924
+ self.test_worker.start()
925
+
926
+ def on_test_finished(self, success, message, response):
927
+ """Handle test completion."""
928
+ self.test_btn.setEnabled(True)
929
+
930
+ if success:
931
+ self.test_result.setText(
932
+ f"✅ {message}\n\n"
933
+ f"Test prompt: \"Translate to Dutch: Hello, how are you today?\"\n"
934
+ f"Response: {response}"
935
+ )
936
+ self.test_result.setStyleSheet("background-color: #E8F5E9;")
937
+ else:
938
+ self.test_result.setText(f"❌ {message}")
939
+ self.test_result.setStyleSheet("background-color: #FFEBEE;")
940
+
941
+ self.test_worker = None
942
+
943
+ def closeEvent(self, event):
944
+ """Handle dialog close - cancel any running workers."""
945
+ if self.download_worker and self.download_worker.isRunning():
946
+ self.download_worker.cancel()
947
+ self.download_worker.wait()
948
+
949
+ if self.test_worker and self.test_worker.isRunning():
950
+ self.test_worker.wait()
951
+
952
+ super().closeEvent(event)
953
+
954
+
955
+ # =============================================================================
956
+ # COMPACT STATUS WIDGET (for embedding in settings)
957
+ # =============================================================================
958
+
959
+ class LocalLLMStatusWidget(QWidget):
960
+ """
961
+ Compact status widget for embedding in settings panel.
962
+ Shows Ollama status and provides quick access to setup.
963
+ """
964
+
965
+ model_changed = pyqtSignal(str) # Emitted when user selects a model
966
+
967
+ def __init__(self, parent=None):
968
+ super().__init__(parent)
969
+ self.init_ui()
970
+ self.refresh_status()
971
+
972
+ def init_ui(self):
973
+ """Initialize the compact UI."""
974
+ layout = QVBoxLayout(self)
975
+ layout.setContentsMargins(0, 0, 0, 0)
976
+ layout.setSpacing(5)
977
+
978
+ # Status row
979
+ status_row = QHBoxLayout()
980
+
981
+ self.status_icon = QLabel("⏳")
982
+ status_row.addWidget(self.status_icon)
983
+
984
+ self.status_text = QLabel("Checking...")
985
+ status_row.addWidget(self.status_text, 1)
986
+
987
+ self.setup_btn = QPushButton("Setup...")
988
+ self.setup_btn.setMaximumWidth(80)
989
+ self.setup_btn.clicked.connect(self.show_setup_dialog)
990
+ status_row.addWidget(self.setup_btn)
991
+
992
+ layout.addLayout(status_row)
993
+
994
+ # Model selection row
995
+ model_row = QHBoxLayout()
996
+ model_row.addWidget(QLabel("Model:"))
997
+
998
+ self.model_combo = QComboBox()
999
+ self.model_combo.setMinimumWidth(250)
1000
+ self.model_combo.currentIndexChanged.connect(self.on_model_changed)
1001
+ model_row.addWidget(self.model_combo, 1)
1002
+
1003
+ self.refresh_btn = QPushButton("🔄")
1004
+ self.refresh_btn.setMaximumWidth(40)
1005
+ self.refresh_btn.setToolTip("Refresh status")
1006
+ self.refresh_btn.clicked.connect(self.refresh_status)
1007
+ model_row.addWidget(self.refresh_btn)
1008
+
1009
+ layout.addLayout(model_row)
1010
+
1011
+ # Info label
1012
+ self.info_label = QLabel("")
1013
+ self.info_label.setStyleSheet("color: #666; font-size: 9pt;")
1014
+ self.info_label.setWordWrap(True)
1015
+ layout.addWidget(self.info_label)
1016
+
1017
+ def refresh_status(self):
1018
+ """Refresh Ollama status."""
1019
+ status = check_ollama_status()
1020
+
1021
+ if status['running']:
1022
+ self.status_icon.setText("✅")
1023
+ self.status_text.setText(f"Ollama running ({len(status['models'])} models)")
1024
+ self.status_text.setStyleSheet("color: green;")
1025
+ self.model_combo.setEnabled(True)
1026
+
1027
+ # Populate model combo with installed models
1028
+ current = self.model_combo.currentData()
1029
+ self.model_combo.clear()
1030
+
1031
+ for model in status['models']:
1032
+ # Get friendly name if available
1033
+ base_model = model.split(':')[0] + ':' + model.split(':')[1] if ':' in model else model
1034
+ info = RECOMMENDED_MODELS.get(base_model, {})
1035
+ name = info.get('name', model)
1036
+ stars = "★" * info.get('quality_stars', 3) if info else ""
1037
+
1038
+ self.model_combo.addItem(f"{name} {stars}", model)
1039
+
1040
+ # Restore selection
1041
+ if current:
1042
+ for i in range(self.model_combo.count()):
1043
+ if self.model_combo.itemData(i) == current:
1044
+ self.model_combo.setCurrentIndex(i)
1045
+ break
1046
+
1047
+ self.info_label.setText("🔒 Local LLM: No API costs, complete privacy, works offline")
1048
+ else:
1049
+ self.status_icon.setText("❌")
1050
+ self.status_text.setText("Ollama not running")
1051
+ self.status_text.setStyleSheet("color: red;")
1052
+ self.model_combo.setEnabled(False)
1053
+ self.model_combo.clear()
1054
+ self.info_label.setText("Click 'Setup...' to install and configure Ollama")
1055
+
1056
+ def on_model_changed(self, index):
1057
+ """Emit signal when model selection changes."""
1058
+ if index >= 0:
1059
+ model_id = self.model_combo.itemData(index)
1060
+ if model_id:
1061
+ self.model_changed.emit(model_id)
1062
+
1063
+ def show_setup_dialog(self):
1064
+ """Show the full setup dialog."""
1065
+ dialog = LocalLLMSetupDialog(self)
1066
+ dialog.exec()
1067
+ self.refresh_status()
1068
+
1069
+ def get_selected_model(self) -> Optional[str]:
1070
+ """Get currently selected model ID."""
1071
+ return self.model_combo.currentData()
1072
+
1073
+ def set_selected_model(self, model_id: str):
1074
+ """Set the selected model."""
1075
+ for i in range(self.model_combo.count()):
1076
+ if self.model_combo.itemData(i) == model_id:
1077
+ self.model_combo.setCurrentIndex(i)
1078
+ return
1079
+
1080
+
1081
+ # =============================================================================
1082
+ # STANDALONE TEST
1083
+ # =============================================================================
1084
+
1085
+ if __name__ == "__main__":
1086
+ app = QApplication(sys.argv)
1087
+
1088
+ # Test status check
1089
+ print("Checking Ollama status...")
1090
+ status = check_ollama_status()
1091
+ print(f"Running: {status['running']}")
1092
+ print(f"Models: {status['models']}")
1093
+ print(f"Error: {status['error']}")
1094
+
1095
+ # Test hardware detection
1096
+ print("\nDetecting hardware...")
1097
+ specs = detect_system_specs()
1098
+ print(f"RAM: {specs['ram_gb']} GB")
1099
+ print(f"GPU: {specs['gpu_name']}")
1100
+ print(f"Recommended model: {specs['recommended_model']}")
1101
+
1102
+ # Show dialog
1103
+ dialog = LocalLLMSetupDialog()
1104
+ dialog.exec()