supervertaler 1.9.153__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supervertaler might be problematic. Click here for more details.
- Supervertaler.py +47886 -0
- modules/__init__.py +10 -0
- modules/ai_actions.py +964 -0
- modules/ai_attachment_manager.py +343 -0
- modules/ai_file_viewer_dialog.py +210 -0
- modules/autofingers_engine.py +466 -0
- modules/cafetran_docx_handler.py +379 -0
- modules/config_manager.py +469 -0
- modules/database_manager.py +1878 -0
- modules/database_migrations.py +417 -0
- modules/dejavurtf_handler.py +779 -0
- modules/document_analyzer.py +427 -0
- modules/docx_handler.py +689 -0
- modules/encoding_repair.py +319 -0
- modules/encoding_repair_Qt.py +393 -0
- modules/encoding_repair_ui.py +481 -0
- modules/feature_manager.py +350 -0
- modules/figure_context_manager.py +340 -0
- modules/file_dialog_helper.py +148 -0
- modules/find_replace.py +164 -0
- modules/find_replace_qt.py +457 -0
- modules/glossary_manager.py +433 -0
- modules/image_extractor.py +188 -0
- modules/keyboard_shortcuts_widget.py +571 -0
- modules/llm_clients.py +1211 -0
- modules/llm_leaderboard.py +737 -0
- modules/llm_superbench_ui.py +1401 -0
- modules/local_llm_setup.py +1104 -0
- modules/model_update_dialog.py +381 -0
- modules/model_version_checker.py +373 -0
- modules/mqxliff_handler.py +638 -0
- modules/non_translatables_manager.py +743 -0
- modules/pdf_rescue_Qt.py +1822 -0
- modules/pdf_rescue_tkinter.py +909 -0
- modules/phrase_docx_handler.py +516 -0
- modules/project_home_panel.py +209 -0
- modules/prompt_assistant.py +357 -0
- modules/prompt_library.py +689 -0
- modules/prompt_library_migration.py +447 -0
- modules/quick_access_sidebar.py +282 -0
- modules/ribbon_widget.py +597 -0
- modules/sdlppx_handler.py +874 -0
- modules/setup_wizard.py +353 -0
- modules/shortcut_manager.py +932 -0
- modules/simple_segmenter.py +128 -0
- modules/spellcheck_manager.py +727 -0
- modules/statuses.py +207 -0
- modules/style_guide_manager.py +315 -0
- modules/superbench_ui.py +1319 -0
- modules/superbrowser.py +329 -0
- modules/supercleaner.py +600 -0
- modules/supercleaner_ui.py +444 -0
- modules/superdocs.py +19 -0
- modules/superdocs_viewer_qt.py +382 -0
- modules/superlookup.py +252 -0
- modules/tag_cleaner.py +260 -0
- modules/tag_manager.py +333 -0
- modules/term_extractor.py +270 -0
- modules/termbase_entry_editor.py +842 -0
- modules/termbase_import_export.py +488 -0
- modules/termbase_manager.py +1060 -0
- modules/termview_widget.py +1172 -0
- modules/theme_manager.py +499 -0
- modules/tm_editor_dialog.py +99 -0
- modules/tm_manager_qt.py +1280 -0
- modules/tm_metadata_manager.py +545 -0
- modules/tmx_editor.py +1461 -0
- modules/tmx_editor_qt.py +2784 -0
- modules/tmx_generator.py +284 -0
- modules/tracked_changes.py +900 -0
- modules/trados_docx_handler.py +430 -0
- modules/translation_memory.py +715 -0
- modules/translation_results_panel.py +2134 -0
- modules/translation_services.py +282 -0
- modules/unified_prompt_library.py +659 -0
- modules/unified_prompt_manager_qt.py +3951 -0
- modules/voice_commands.py +920 -0
- modules/voice_dictation.py +477 -0
- modules/voice_dictation_lite.py +249 -0
- supervertaler-1.9.153.dist-info/METADATA +896 -0
- supervertaler-1.9.153.dist-info/RECORD +85 -0
- supervertaler-1.9.153.dist-info/WHEEL +5 -0
- supervertaler-1.9.153.dist-info/entry_points.txt +2 -0
- supervertaler-1.9.153.dist-info/licenses/LICENSE +21 -0
- supervertaler-1.9.153.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Model Version Checker for Supervertaler
|
|
3
|
+
========================================
|
|
4
|
+
|
|
5
|
+
Automatically checks for new LLM models from OpenAI, Anthropic, and Google.
|
|
6
|
+
Notifies users when new models are available and provides easy addition interface.
|
|
7
|
+
|
|
8
|
+
Features:
|
|
9
|
+
- Once-per-day automatic checking (configurable)
|
|
10
|
+
- Manual check button
|
|
11
|
+
- Popup dialog showing new models
|
|
12
|
+
- Easy click-to-add interface
|
|
13
|
+
- Caches results to avoid unnecessary API calls
|
|
14
|
+
|
|
15
|
+
Usage:
|
|
16
|
+
from modules.model_version_checker import ModelVersionChecker
|
|
17
|
+
|
|
18
|
+
checker = ModelVersionChecker(cache_path="user_data/model_cache.json")
|
|
19
|
+
new_models = checker.check_for_new_models(
|
|
20
|
+
openai_key="...",
|
|
21
|
+
anthropic_key="...",
|
|
22
|
+
google_key="..."
|
|
23
|
+
)
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
import os
|
|
27
|
+
import json
|
|
28
|
+
from datetime import datetime, timedelta
|
|
29
|
+
from typing import Dict, List, Optional, Tuple
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class ModelVersionChecker:
|
|
34
|
+
"""Check for new models from LLM providers"""
|
|
35
|
+
|
|
36
|
+
def __init__(self, cache_path: str = None):
|
|
37
|
+
"""
|
|
38
|
+
Initialize the model version checker
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
cache_path: Path to JSON cache file for storing last check time and known models
|
|
42
|
+
"""
|
|
43
|
+
self.cache_path = cache_path or "model_version_cache.json"
|
|
44
|
+
self.cache = self._load_cache()
|
|
45
|
+
|
|
46
|
+
# Current known models (from llm_clients.py)
|
|
47
|
+
self.known_models = {
|
|
48
|
+
"openai": [
|
|
49
|
+
"gpt-4o",
|
|
50
|
+
"gpt-4o-mini",
|
|
51
|
+
"chatgpt-4o-latest",
|
|
52
|
+
"o1-preview",
|
|
53
|
+
"o1-mini",
|
|
54
|
+
"o3-mini",
|
|
55
|
+
"gpt-4-turbo",
|
|
56
|
+
"gpt-4"
|
|
57
|
+
],
|
|
58
|
+
"claude": [
|
|
59
|
+
"claude-sonnet-4-5-20250929",
|
|
60
|
+
"claude-haiku-4-5-20251001",
|
|
61
|
+
"claude-opus-4-1-20250805",
|
|
62
|
+
"claude-3-5-sonnet-20241022",
|
|
63
|
+
"claude-3-5-sonnet-20240620",
|
|
64
|
+
"claude-3-opus-20240229",
|
|
65
|
+
"claude-3-sonnet-20240229",
|
|
66
|
+
"claude-3-haiku-20240307"
|
|
67
|
+
],
|
|
68
|
+
"gemini": [
|
|
69
|
+
"gemini-3-pro-preview",
|
|
70
|
+
"gemini-2.5-pro",
|
|
71
|
+
"gemini-2.5-flash",
|
|
72
|
+
"gemini-2.5-flash-lite",
|
|
73
|
+
"gemini-2.0-flash",
|
|
74
|
+
"gemini-1.5-pro",
|
|
75
|
+
"gemini-1.5-flash"
|
|
76
|
+
]
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
def _load_cache(self) -> dict:
|
|
80
|
+
"""Load cache from JSON file"""
|
|
81
|
+
if os.path.exists(self.cache_path):
|
|
82
|
+
try:
|
|
83
|
+
with open(self.cache_path, 'r') as f:
|
|
84
|
+
return json.load(f)
|
|
85
|
+
except:
|
|
86
|
+
pass
|
|
87
|
+
|
|
88
|
+
return {
|
|
89
|
+
"last_check": None,
|
|
90
|
+
"discovered_models": {
|
|
91
|
+
"openai": [],
|
|
92
|
+
"claude": [],
|
|
93
|
+
"gemini": []
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
def _save_cache(self):
|
|
98
|
+
"""Save cache to JSON file"""
|
|
99
|
+
try:
|
|
100
|
+
os.makedirs(os.path.dirname(self.cache_path), exist_ok=True)
|
|
101
|
+
with open(self.cache_path, 'w') as f:
|
|
102
|
+
json.dump(self.cache, f, indent=2)
|
|
103
|
+
except Exception as e:
|
|
104
|
+
print(f"Warning: Could not save model cache: {e}")
|
|
105
|
+
|
|
106
|
+
def should_check(self) -> bool:
|
|
107
|
+
"""
|
|
108
|
+
Check if we should run the version check
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
True if more than 24 hours since last check, or never checked
|
|
112
|
+
"""
|
|
113
|
+
if not self.cache.get("last_check"):
|
|
114
|
+
return True
|
|
115
|
+
|
|
116
|
+
last_check = datetime.fromisoformat(self.cache["last_check"])
|
|
117
|
+
return datetime.now() - last_check > timedelta(hours=24)
|
|
118
|
+
|
|
119
|
+
def check_openai_models(self, api_key: str) -> Tuple[List[str], Optional[str]]:
|
|
120
|
+
"""
|
|
121
|
+
Check for new OpenAI models
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
api_key: OpenAI API key
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
(list of new model IDs, error message if any)
|
|
128
|
+
"""
|
|
129
|
+
if not api_key:
|
|
130
|
+
return [], "No API key provided"
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
from openai import OpenAI
|
|
134
|
+
client = OpenAI(api_key=api_key)
|
|
135
|
+
|
|
136
|
+
# List all models
|
|
137
|
+
models = client.models.list()
|
|
138
|
+
|
|
139
|
+
# Filter for GPT models (gpt-4*, gpt-5*, o1*, o3*)
|
|
140
|
+
available_models = []
|
|
141
|
+
for model in models.data:
|
|
142
|
+
model_id = model.id
|
|
143
|
+
if any(prefix in model_id.lower() for prefix in ['gpt-4', 'gpt-5', 'o1', 'o3']):
|
|
144
|
+
available_models.append(model_id)
|
|
145
|
+
|
|
146
|
+
# Find new models not in our known list
|
|
147
|
+
new_models = [m for m in available_models if m not in self.known_models["openai"]]
|
|
148
|
+
|
|
149
|
+
return new_models, None
|
|
150
|
+
|
|
151
|
+
except ImportError:
|
|
152
|
+
return [], "OpenAI library not installed (pip install openai)"
|
|
153
|
+
except Exception as e:
|
|
154
|
+
return [], f"Error checking OpenAI models: {str(e)}"
|
|
155
|
+
|
|
156
|
+
def check_claude_models(self, api_key: str) -> Tuple[List[str], Optional[str]]:
|
|
157
|
+
"""
|
|
158
|
+
Check for new Claude models
|
|
159
|
+
|
|
160
|
+
Note: Anthropic doesn't provide a models.list() endpoint, so we try
|
|
161
|
+
to call the API with common model naming patterns and see what works.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
api_key: Anthropic API key
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
(list of new model IDs, error message if any)
|
|
168
|
+
"""
|
|
169
|
+
if not api_key:
|
|
170
|
+
return [], "No API key provided"
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
from anthropic import Anthropic
|
|
174
|
+
client = Anthropic(api_key=api_key)
|
|
175
|
+
|
|
176
|
+
# Anthropic doesn't have a list endpoint, so we'll try common patterns
|
|
177
|
+
# This is a limitation - we can only detect models we explicitly test for
|
|
178
|
+
test_patterns = [
|
|
179
|
+
# Claude 5 potential patterns
|
|
180
|
+
"claude-sonnet-5",
|
|
181
|
+
"claude-haiku-5",
|
|
182
|
+
"claude-opus-5",
|
|
183
|
+
# Claude 4 with newer dates
|
|
184
|
+
"claude-sonnet-4-5-20260101",
|
|
185
|
+
"claude-haiku-4-5-20260101",
|
|
186
|
+
"claude-opus-4-5-20260101",
|
|
187
|
+
]
|
|
188
|
+
|
|
189
|
+
new_models = []
|
|
190
|
+
|
|
191
|
+
# Test each pattern with a minimal API call
|
|
192
|
+
for pattern in test_patterns:
|
|
193
|
+
if pattern in self.known_models["claude"]:
|
|
194
|
+
continue
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
# Try a minimal API call
|
|
198
|
+
response = client.messages.create(
|
|
199
|
+
model=pattern,
|
|
200
|
+
max_tokens=1,
|
|
201
|
+
messages=[{"role": "user", "content": "test"}]
|
|
202
|
+
)
|
|
203
|
+
# If we got here, the model exists
|
|
204
|
+
new_models.append(pattern)
|
|
205
|
+
except Exception as model_error:
|
|
206
|
+
# Model doesn't exist or other error - skip it
|
|
207
|
+
if "model" not in str(model_error).lower():
|
|
208
|
+
# Not a model error, might be real issue
|
|
209
|
+
pass
|
|
210
|
+
|
|
211
|
+
return new_models, None
|
|
212
|
+
|
|
213
|
+
except ImportError:
|
|
214
|
+
return [], "Anthropic library not installed (pip install anthropic)"
|
|
215
|
+
except Exception as e:
|
|
216
|
+
return [], f"Error checking Claude models: {str(e)}"
|
|
217
|
+
|
|
218
|
+
def check_gemini_models(self, api_key: str) -> Tuple[List[str], Optional[str]]:
|
|
219
|
+
"""
|
|
220
|
+
Check for new Gemini models
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
api_key: Google AI API key
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
(list of new model IDs, error message if any)
|
|
227
|
+
"""
|
|
228
|
+
if not api_key:
|
|
229
|
+
return [], "No API key provided"
|
|
230
|
+
|
|
231
|
+
try:
|
|
232
|
+
import google.generativeai as genai
|
|
233
|
+
genai.configure(api_key=api_key)
|
|
234
|
+
|
|
235
|
+
# List all available models
|
|
236
|
+
models = genai.list_models()
|
|
237
|
+
|
|
238
|
+
# Filter for generative models only
|
|
239
|
+
available_models = []
|
|
240
|
+
for model in models:
|
|
241
|
+
# Model name format: "models/gemini-xxx"
|
|
242
|
+
if hasattr(model, 'name'):
|
|
243
|
+
model_id = model.name.replace('models/', '')
|
|
244
|
+
# Only include models that start with 'gemini'
|
|
245
|
+
if model_id.startswith('gemini'):
|
|
246
|
+
# Check if it supports generateContent
|
|
247
|
+
if hasattr(model, 'supported_generation_methods'):
|
|
248
|
+
if 'generateContent' in model.supported_generation_methods:
|
|
249
|
+
available_models.append(model_id)
|
|
250
|
+
|
|
251
|
+
# Find new models not in our known list
|
|
252
|
+
new_models = [m for m in available_models if m not in self.known_models["gemini"]]
|
|
253
|
+
|
|
254
|
+
return new_models, None
|
|
255
|
+
|
|
256
|
+
except ImportError:
|
|
257
|
+
return [], "Google AI library not installed (pip install google-generativeai)"
|
|
258
|
+
except Exception as e:
|
|
259
|
+
return [], f"Error checking Gemini models: {str(e)}"
|
|
260
|
+
|
|
261
|
+
def check_all_providers(
|
|
262
|
+
self,
|
|
263
|
+
openai_key: str = None,
|
|
264
|
+
anthropic_key: str = None,
|
|
265
|
+
google_key: str = None,
|
|
266
|
+
force: bool = False
|
|
267
|
+
) -> Dict[str, Dict]:
|
|
268
|
+
"""
|
|
269
|
+
Check all providers for new models
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
openai_key: OpenAI API key
|
|
273
|
+
anthropic_key: Anthropic API key
|
|
274
|
+
google_key: Google AI API key
|
|
275
|
+
force: Force check even if checked recently
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
Dictionary with results per provider:
|
|
279
|
+
{
|
|
280
|
+
"openai": {"new_models": [...], "error": None},
|
|
281
|
+
"claude": {"new_models": [...], "error": None},
|
|
282
|
+
"gemini": {"new_models": [...], "error": None},
|
|
283
|
+
"checked": True
|
|
284
|
+
}
|
|
285
|
+
"""
|
|
286
|
+
# Check if we should run the check
|
|
287
|
+
if not force and not self.should_check():
|
|
288
|
+
return {
|
|
289
|
+
"openai": {"new_models": [], "error": None},
|
|
290
|
+
"claude": {"new_models": [], "error": None},
|
|
291
|
+
"gemini": {"new_models": [], "error": None},
|
|
292
|
+
"checked": False,
|
|
293
|
+
"message": "Already checked in the last 24 hours"
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
results = {}
|
|
297
|
+
|
|
298
|
+
# Check OpenAI
|
|
299
|
+
if openai_key:
|
|
300
|
+
new_models, error = self.check_openai_models(openai_key)
|
|
301
|
+
results["openai"] = {"new_models": new_models, "error": error}
|
|
302
|
+
else:
|
|
303
|
+
results["openai"] = {"new_models": [], "error": "No API key"}
|
|
304
|
+
|
|
305
|
+
# Check Claude
|
|
306
|
+
if anthropic_key:
|
|
307
|
+
new_models, error = self.check_claude_models(anthropic_key)
|
|
308
|
+
results["claude"] = {"new_models": new_models, "error": error}
|
|
309
|
+
else:
|
|
310
|
+
results["claude"] = {"new_models": [], "error": "No API key"}
|
|
311
|
+
|
|
312
|
+
# Check Gemini
|
|
313
|
+
if google_key:
|
|
314
|
+
new_models, error = self.check_gemini_models(google_key)
|
|
315
|
+
results["gemini"] = {"new_models": new_models, "error": error}
|
|
316
|
+
else:
|
|
317
|
+
results["gemini"] = {"new_models": [], "error": "No API key"}
|
|
318
|
+
|
|
319
|
+
# Update cache
|
|
320
|
+
self.cache["last_check"] = datetime.now().isoformat()
|
|
321
|
+
for provider, result in results.items():
|
|
322
|
+
if result["new_models"] and not result["error"]:
|
|
323
|
+
# Add newly discovered models to cache
|
|
324
|
+
existing = set(self.cache["discovered_models"].get(provider, []))
|
|
325
|
+
existing.update(result["new_models"])
|
|
326
|
+
self.cache["discovered_models"][provider] = list(existing)
|
|
327
|
+
|
|
328
|
+
self._save_cache()
|
|
329
|
+
|
|
330
|
+
results["checked"] = True
|
|
331
|
+
return results
|
|
332
|
+
|
|
333
|
+
def has_new_models(self, results: Dict) -> bool:
|
|
334
|
+
"""
|
|
335
|
+
Check if any new models were found
|
|
336
|
+
|
|
337
|
+
Args:
|
|
338
|
+
results: Results from check_all_providers()
|
|
339
|
+
|
|
340
|
+
Returns:
|
|
341
|
+
True if any new models found
|
|
342
|
+
"""
|
|
343
|
+
if not results.get("checked"):
|
|
344
|
+
return False
|
|
345
|
+
|
|
346
|
+
for provider in ["openai", "claude", "gemini"]:
|
|
347
|
+
if results.get(provider, {}).get("new_models"):
|
|
348
|
+
return True
|
|
349
|
+
|
|
350
|
+
return False
|
|
351
|
+
|
|
352
|
+
def get_cache_info(self) -> Dict:
|
|
353
|
+
"""Get information about the cache"""
|
|
354
|
+
return {
|
|
355
|
+
"last_check": self.cache.get("last_check"),
|
|
356
|
+
"discovered_models": self.cache.get("discovered_models", {})
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
# Standalone test
|
|
361
|
+
if __name__ == "__main__":
|
|
362
|
+
checker = ModelVersionChecker()
|
|
363
|
+
|
|
364
|
+
# Test with dummy keys (will fail but shows structure)
|
|
365
|
+
results = checker.check_all_providers(
|
|
366
|
+
openai_key="dummy",
|
|
367
|
+
anthropic_key="dummy",
|
|
368
|
+
google_key="dummy",
|
|
369
|
+
force=True
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
print("Check results:")
|
|
373
|
+
print(json.dumps(results, indent=2))
|