supervertaler 1.9.181__py3-none-any.whl → 1.9.183__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Supervertaler.py +558 -364
- modules/extract_tm.py +518 -0
- modules/project_tm.py +320 -0
- modules/termbase_manager.py +0 -1
- modules/termview_widget.py +12 -11
- modules/translation_memory.py +3 -12
- modules/translation_results_panel.py +0 -7
- {supervertaler-1.9.181.dist-info → supervertaler-1.9.183.dist-info}/METADATA +1 -1
- {supervertaler-1.9.181.dist-info → supervertaler-1.9.183.dist-info}/RECORD +13 -11
- {supervertaler-1.9.181.dist-info → supervertaler-1.9.183.dist-info}/WHEEL +0 -0
- {supervertaler-1.9.181.dist-info → supervertaler-1.9.183.dist-info}/entry_points.txt +0 -0
- {supervertaler-1.9.181.dist-info → supervertaler-1.9.183.dist-info}/licenses/LICENSE +0 -0
- {supervertaler-1.9.181.dist-info → supervertaler-1.9.183.dist-info}/top_level.txt +0 -0
modules/project_tm.py
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ProjectTM - In-memory TM for instant grid lookups (Total Recall architecture)
|
|
3
|
+
|
|
4
|
+
This module implements a lightweight in-memory Translation Memory that extracts
|
|
5
|
+
relevant segments from the full TM database on project load. This makes grid
|
|
6
|
+
navigation instant while keeping the full TM for concordance searches.
|
|
7
|
+
|
|
8
|
+
Inspired by CafeTran's "Total Recall" feature.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import sqlite3
|
|
12
|
+
import threading
|
|
13
|
+
from difflib import SequenceMatcher
|
|
14
|
+
from typing import Dict, List, Optional, Callable
|
|
15
|
+
import re
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ProjectTM:
|
|
19
|
+
"""
|
|
20
|
+
Lightweight in-memory TM extracted from the main TM database.
|
|
21
|
+
|
|
22
|
+
On project load, extracts segments that are relevant to the current project
|
|
23
|
+
(fuzzy matches above threshold) into an in-memory SQLite database for
|
|
24
|
+
instant lookups during grid navigation.
|
|
25
|
+
|
|
26
|
+
Usage:
|
|
27
|
+
project_tm = ProjectTM()
|
|
28
|
+
project_tm.extract_from_database(
|
|
29
|
+
db_manager,
|
|
30
|
+
project_segments,
|
|
31
|
+
tm_ids=['tm1', 'tm2'],
|
|
32
|
+
threshold=0.75,
|
|
33
|
+
progress_callback=lambda cur, total: print(f"{cur}/{total}")
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Fast lookup during grid navigation
|
|
37
|
+
matches = project_tm.search("source text to translate")
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self):
|
|
41
|
+
"""Initialize in-memory SQLite database for ProjectTM"""
|
|
42
|
+
self.conn = sqlite3.connect(":memory:", check_same_thread=False)
|
|
43
|
+
self.conn.row_factory = sqlite3.Row
|
|
44
|
+
self.lock = threading.Lock()
|
|
45
|
+
self.is_built = False
|
|
46
|
+
self.segment_count = 0
|
|
47
|
+
|
|
48
|
+
# Create the schema
|
|
49
|
+
self._create_schema()
|
|
50
|
+
|
|
51
|
+
def _create_schema(self):
|
|
52
|
+
"""Create the in-memory database schema"""
|
|
53
|
+
with self.lock:
|
|
54
|
+
cursor = self.conn.cursor()
|
|
55
|
+
cursor.execute("""
|
|
56
|
+
CREATE TABLE IF NOT EXISTS segments (
|
|
57
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
58
|
+
source_text TEXT NOT NULL,
|
|
59
|
+
target_text TEXT NOT NULL,
|
|
60
|
+
source_lower TEXT NOT NULL,
|
|
61
|
+
tm_id TEXT,
|
|
62
|
+
tm_name TEXT,
|
|
63
|
+
similarity REAL,
|
|
64
|
+
original_id INTEGER
|
|
65
|
+
)
|
|
66
|
+
""")
|
|
67
|
+
# Index for fast exact match lookups
|
|
68
|
+
cursor.execute("CREATE INDEX IF NOT EXISTS idx_source_lower ON segments(source_lower)")
|
|
69
|
+
# FTS5 for fuzzy text search
|
|
70
|
+
cursor.execute("""
|
|
71
|
+
CREATE VIRTUAL TABLE IF NOT EXISTS segments_fts USING fts5(
|
|
72
|
+
source_text,
|
|
73
|
+
content=segments,
|
|
74
|
+
content_rowid=id
|
|
75
|
+
)
|
|
76
|
+
""")
|
|
77
|
+
self.conn.commit()
|
|
78
|
+
|
|
79
|
+
def clear(self):
|
|
80
|
+
"""Clear all segments from the ProjectTM"""
|
|
81
|
+
with self.lock:
|
|
82
|
+
cursor = self.conn.cursor()
|
|
83
|
+
cursor.execute("DELETE FROM segments")
|
|
84
|
+
cursor.execute("DELETE FROM segments_fts")
|
|
85
|
+
self.conn.commit()
|
|
86
|
+
self.is_built = False
|
|
87
|
+
self.segment_count = 0
|
|
88
|
+
|
|
89
|
+
def extract_from_database(
|
|
90
|
+
self,
|
|
91
|
+
db_manager,
|
|
92
|
+
project_segments: List,
|
|
93
|
+
tm_ids: List[str] = None,
|
|
94
|
+
source_lang: str = None,
|
|
95
|
+
target_lang: str = None,
|
|
96
|
+
threshold: float = 0.75,
|
|
97
|
+
progress_callback: Optional[Callable[[int, int], None]] = None,
|
|
98
|
+
log_callback: Optional[Callable[[str], None]] = None
|
|
99
|
+
) -> int:
|
|
100
|
+
"""
|
|
101
|
+
Extract relevant segments from the main TM database into ProjectTM.
|
|
102
|
+
|
|
103
|
+
For each unique source text in the project, searches the TM for fuzzy
|
|
104
|
+
matches above the threshold and stores them in memory.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
db_manager: The main database manager with TM data
|
|
108
|
+
project_segments: List of project segments to find matches for
|
|
109
|
+
tm_ids: List of TM IDs to search (None = all active TMs)
|
|
110
|
+
source_lang: Source language filter
|
|
111
|
+
target_lang: Target language filter
|
|
112
|
+
threshold: Minimum similarity threshold (0.0-1.0)
|
|
113
|
+
progress_callback: Optional callback(current, total) for progress
|
|
114
|
+
log_callback: Optional callback(message) for logging
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
Number of TM segments extracted
|
|
118
|
+
"""
|
|
119
|
+
def log(msg):
|
|
120
|
+
if log_callback:
|
|
121
|
+
log_callback(msg)
|
|
122
|
+
else:
|
|
123
|
+
print(msg)
|
|
124
|
+
|
|
125
|
+
self.clear()
|
|
126
|
+
|
|
127
|
+
if not project_segments or not db_manager:
|
|
128
|
+
log(f"[ProjectTM] Early exit: segments={bool(project_segments)}, db={bool(db_manager)}")
|
|
129
|
+
return 0
|
|
130
|
+
|
|
131
|
+
# Get unique source texts from project
|
|
132
|
+
unique_sources = {}
|
|
133
|
+
for seg in project_segments:
|
|
134
|
+
# Try both 'source' and 'source_text' attributes (different segment types use different names)
|
|
135
|
+
source = getattr(seg, 'source', None) or getattr(seg, 'source_text', None)
|
|
136
|
+
if source and source.strip():
|
|
137
|
+
# Normalize: strip and lowercase for deduplication
|
|
138
|
+
key = source.strip().lower()
|
|
139
|
+
if key not in unique_sources:
|
|
140
|
+
unique_sources[key] = source.strip()
|
|
141
|
+
|
|
142
|
+
total = len(unique_sources)
|
|
143
|
+
log(f"[ProjectTM] Found {total} unique source texts from {len(project_segments)} segments")
|
|
144
|
+
if total == 0:
|
|
145
|
+
return 0
|
|
146
|
+
|
|
147
|
+
extracted_count = 0
|
|
148
|
+
seen_sources = set() # Deduplicate TM entries
|
|
149
|
+
|
|
150
|
+
cursor = self.conn.cursor()
|
|
151
|
+
|
|
152
|
+
log(f"[ProjectTM] Searching TMs: {tm_ids}, threshold={threshold}, langs={source_lang}->{target_lang}")
|
|
153
|
+
|
|
154
|
+
for i, (key, source_text) in enumerate(unique_sources.items()):
|
|
155
|
+
if progress_callback and i % 10 == 0:
|
|
156
|
+
progress_callback(i, total)
|
|
157
|
+
|
|
158
|
+
try:
|
|
159
|
+
# Search main TM database for fuzzy matches
|
|
160
|
+
matches = db_manager.search_fuzzy_matches(
|
|
161
|
+
source_text,
|
|
162
|
+
tm_ids=tm_ids,
|
|
163
|
+
threshold=threshold,
|
|
164
|
+
max_results=10, # Keep top 10 matches per source
|
|
165
|
+
source_lang=source_lang,
|
|
166
|
+
target_lang=target_lang,
|
|
167
|
+
bidirectional=True
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# Debug: log first search
|
|
171
|
+
if i == 0:
|
|
172
|
+
log(f"[ProjectTM] First search '{source_text[:50]}...' returned {len(matches)} matches")
|
|
173
|
+
|
|
174
|
+
for match in matches:
|
|
175
|
+
match_source = match.get('source_text', '')
|
|
176
|
+
match_target = match.get('target_text', '')
|
|
177
|
+
|
|
178
|
+
if not match_source or not match_target:
|
|
179
|
+
continue
|
|
180
|
+
|
|
181
|
+
# Deduplicate by source text
|
|
182
|
+
source_key = match_source.strip().lower()
|
|
183
|
+
if source_key in seen_sources:
|
|
184
|
+
continue
|
|
185
|
+
seen_sources.add(source_key)
|
|
186
|
+
|
|
187
|
+
# Insert into ProjectTM
|
|
188
|
+
cursor.execute("""
|
|
189
|
+
INSERT INTO segments (source_text, target_text, source_lower,
|
|
190
|
+
tm_id, tm_name, similarity, original_id)
|
|
191
|
+
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
192
|
+
""", (
|
|
193
|
+
match_source,
|
|
194
|
+
match_target,
|
|
195
|
+
source_key,
|
|
196
|
+
match.get('tm_id'),
|
|
197
|
+
match.get('tm_name', 'Unknown TM'),
|
|
198
|
+
match.get('similarity', 0),
|
|
199
|
+
match.get('id')
|
|
200
|
+
))
|
|
201
|
+
extracted_count += 1
|
|
202
|
+
|
|
203
|
+
except Exception as e:
|
|
204
|
+
# Log but continue - don't fail extraction for one bad segment
|
|
205
|
+
pass
|
|
206
|
+
|
|
207
|
+
# Commit all inserts
|
|
208
|
+
self.conn.commit()
|
|
209
|
+
|
|
210
|
+
# Rebuild FTS5 index
|
|
211
|
+
try:
|
|
212
|
+
cursor.execute("INSERT INTO segments_fts(segments_fts) VALUES('rebuild')")
|
|
213
|
+
self.conn.commit()
|
|
214
|
+
except Exception:
|
|
215
|
+
pass # FTS rebuild may fail if no data, that's OK
|
|
216
|
+
|
|
217
|
+
if progress_callback:
|
|
218
|
+
progress_callback(total, total)
|
|
219
|
+
|
|
220
|
+
self.is_built = True
|
|
221
|
+
self.segment_count = extracted_count
|
|
222
|
+
|
|
223
|
+
return extracted_count
|
|
224
|
+
|
|
225
|
+
def search(self, source_text: str, max_results: int = 5) -> List[Dict]:
|
|
226
|
+
"""
|
|
227
|
+
Search ProjectTM for matches (instant lookup).
|
|
228
|
+
|
|
229
|
+
First checks for exact matches, then falls back to fuzzy search.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
source_text: Source text to search for
|
|
233
|
+
max_results: Maximum number of results to return
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
List of match dictionaries with source_text, target_text, similarity, etc.
|
|
237
|
+
"""
|
|
238
|
+
if not self.is_built or not source_text:
|
|
239
|
+
return []
|
|
240
|
+
|
|
241
|
+
source_lower = source_text.strip().lower()
|
|
242
|
+
results = []
|
|
243
|
+
|
|
244
|
+
with self.lock:
|
|
245
|
+
cursor = self.conn.cursor()
|
|
246
|
+
|
|
247
|
+
# 1. Check for exact match first (fastest)
|
|
248
|
+
cursor.execute("""
|
|
249
|
+
SELECT * FROM segments WHERE source_lower = ? LIMIT 1
|
|
250
|
+
""", (source_lower,))
|
|
251
|
+
exact = cursor.fetchone()
|
|
252
|
+
|
|
253
|
+
if exact:
|
|
254
|
+
results.append({
|
|
255
|
+
'source_text': exact['source_text'],
|
|
256
|
+
'target_text': exact['target_text'],
|
|
257
|
+
'tm_id': exact['tm_id'],
|
|
258
|
+
'tm_name': exact['tm_name'],
|
|
259
|
+
'similarity': 1.0, # Exact match
|
|
260
|
+
'match_pct': 100,
|
|
261
|
+
'id': exact['original_id']
|
|
262
|
+
})
|
|
263
|
+
return results # Exact match - no need to search further
|
|
264
|
+
|
|
265
|
+
# 2. FTS5 fuzzy search
|
|
266
|
+
try:
|
|
267
|
+
# Tokenize query for FTS5
|
|
268
|
+
clean_text = re.sub(r'[^\w\s]', ' ', source_text)
|
|
269
|
+
search_terms = [t for t in clean_text.split() if len(t) > 2]
|
|
270
|
+
|
|
271
|
+
if search_terms:
|
|
272
|
+
fts_query = ' OR '.join(f'"{term}"' for term in search_terms[:10])
|
|
273
|
+
|
|
274
|
+
cursor.execute("""
|
|
275
|
+
SELECT s.*, bm25(segments_fts) as rank
|
|
276
|
+
FROM segments s
|
|
277
|
+
JOIN segments_fts ON s.id = segments_fts.rowid
|
|
278
|
+
WHERE segments_fts MATCH ?
|
|
279
|
+
ORDER BY rank
|
|
280
|
+
LIMIT ?
|
|
281
|
+
""", (fts_query, max_results * 3)) # Get more candidates for re-ranking
|
|
282
|
+
|
|
283
|
+
candidates = cursor.fetchall()
|
|
284
|
+
|
|
285
|
+
# Re-rank by actual similarity
|
|
286
|
+
for row in candidates:
|
|
287
|
+
similarity = self._calculate_similarity(source_text, row['source_text'])
|
|
288
|
+
if similarity >= 0.5: # Lower threshold for ProjectTM (pre-filtered)
|
|
289
|
+
results.append({
|
|
290
|
+
'source_text': row['source_text'],
|
|
291
|
+
'target_text': row['target_text'],
|
|
292
|
+
'tm_id': row['tm_id'],
|
|
293
|
+
'tm_name': row['tm_name'],
|
|
294
|
+
'similarity': similarity,
|
|
295
|
+
'match_pct': int(similarity * 100),
|
|
296
|
+
'id': row['original_id']
|
|
297
|
+
})
|
|
298
|
+
|
|
299
|
+
# Sort by similarity and limit
|
|
300
|
+
results.sort(key=lambda x: x['similarity'], reverse=True)
|
|
301
|
+
results = results[:max_results]
|
|
302
|
+
|
|
303
|
+
except Exception:
|
|
304
|
+
pass # FTS search may fail, return what we have
|
|
305
|
+
|
|
306
|
+
return results
|
|
307
|
+
|
|
308
|
+
def _calculate_similarity(self, text1: str, text2: str) -> float:
|
|
309
|
+
"""Calculate similarity ratio between two texts"""
|
|
310
|
+
# Strip HTML/XML tags for comparison
|
|
311
|
+
clean1 = re.sub(r'<[^>]+>', '', text1).lower()
|
|
312
|
+
clean2 = re.sub(r'<[^>]+>', '', text2).lower()
|
|
313
|
+
return SequenceMatcher(None, clean1, clean2).ratio()
|
|
314
|
+
|
|
315
|
+
def get_stats(self) -> Dict:
|
|
316
|
+
"""Get statistics about the ProjectTM"""
|
|
317
|
+
return {
|
|
318
|
+
'is_built': self.is_built,
|
|
319
|
+
'segment_count': self.segment_count
|
|
320
|
+
}
|
modules/termbase_manager.py
CHANGED
|
@@ -609,7 +609,6 @@ class TermbaseManager:
|
|
|
609
609
|
""", (project_id,))
|
|
610
610
|
|
|
611
611
|
active_ids = [row[0] for row in cursor.fetchall()]
|
|
612
|
-
self.log(f"📋 Found {len(active_ids)} active termbases for project {project_id}: {active_ids}")
|
|
613
612
|
return active_ids
|
|
614
613
|
except Exception as e:
|
|
615
614
|
self.log(f"✗ Error getting active termbase IDs: {e}")
|
modules/termview_widget.py
CHANGED
|
@@ -734,15 +734,9 @@ class TermviewWidget(QWidget):
|
|
|
734
734
|
has_termbase = termbase_matches and len(termbase_matches) > 0
|
|
735
735
|
has_nt = nt_matches and len(nt_matches) > 0
|
|
736
736
|
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
self.info_label.setText("No glossaries activated. Go to Resources → Glossary to activate.")
|
|
741
|
-
elif status_hint == 'wrong_language':
|
|
742
|
-
self.info_label.setText("Activated glossaries don't match project language pair.")
|
|
743
|
-
else:
|
|
744
|
-
self.info_label.setText("No terminology or NT matches for this segment")
|
|
745
|
-
return
|
|
737
|
+
# Store status hint for info label (will be set at the end)
|
|
738
|
+
self._status_hint = status_hint
|
|
739
|
+
self._has_any_matches = has_termbase or has_nt
|
|
746
740
|
|
|
747
741
|
# Convert termbase matches to dict for easy lookup: {source_term.lower(): [translations]}
|
|
748
742
|
matches_dict = {}
|
|
@@ -874,11 +868,18 @@ class TermviewWidget(QWidget):
|
|
|
874
868
|
info_parts.append(f"{blocks_with_translations} terms")
|
|
875
869
|
if blocks_with_nt > 0:
|
|
876
870
|
info_parts.append(f"{blocks_with_nt} NTs")
|
|
877
|
-
|
|
871
|
+
|
|
878
872
|
if info_parts:
|
|
879
873
|
self.info_label.setText(f"✓ Found {', '.join(info_parts)} in {len(tokens)} words")
|
|
880
874
|
else:
|
|
881
|
-
|
|
875
|
+
# Show appropriate message based on status hint when no matches
|
|
876
|
+
status_hint = getattr(self, '_status_hint', None)
|
|
877
|
+
if status_hint == 'no_termbases_activated':
|
|
878
|
+
self.info_label.setText(f"No glossaries activated ({len(tokens)} words)")
|
|
879
|
+
elif status_hint == 'wrong_language':
|
|
880
|
+
self.info_label.setText(f"Glossaries don't match language pair ({len(tokens)} words)")
|
|
881
|
+
else:
|
|
882
|
+
self.info_label.setText(f"No matches in {len(tokens)} words")
|
|
882
883
|
|
|
883
884
|
def get_all_termbase_matches(self, text: str) -> Dict[str, List[Dict]]:
|
|
884
885
|
"""
|
modules/translation_memory.py
CHANGED
|
@@ -205,20 +205,14 @@ class TMDatabase:
|
|
|
205
205
|
Returns:
|
|
206
206
|
List of match dictionaries sorted by similarity
|
|
207
207
|
"""
|
|
208
|
-
print(f"[DEBUG] TMDatabase.search_all: source='{source[:50]}...', tm_ids={tm_ids}")
|
|
209
|
-
|
|
210
208
|
# Determine which TMs to search
|
|
211
209
|
# If tm_ids is None or empty, search ALL TMs (don't filter by tm_id)
|
|
212
210
|
if tm_ids is None and enabled_only:
|
|
213
211
|
tm_ids = [tm_id for tm_id, meta in self.tm_metadata.items() if meta.get('enabled', True)]
|
|
214
|
-
|
|
215
|
-
|
|
212
|
+
|
|
216
213
|
# If tm_ids is still empty, set to None to search ALL TMs
|
|
217
214
|
if tm_ids is not None and len(tm_ids) == 0:
|
|
218
215
|
tm_ids = None
|
|
219
|
-
print(f"[DEBUG] TMDatabase.search_all: Empty tm_ids, setting to None to search ALL")
|
|
220
|
-
|
|
221
|
-
print(f"[DEBUG] TMDatabase.search_all: Final tm_ids to search: {tm_ids}")
|
|
222
216
|
|
|
223
217
|
# First try exact match
|
|
224
218
|
exact_match = self.db.get_exact_match(
|
|
@@ -227,8 +221,7 @@ class TMDatabase:
|
|
|
227
221
|
source_lang=self.source_lang,
|
|
228
222
|
target_lang=self.target_lang
|
|
229
223
|
)
|
|
230
|
-
|
|
231
|
-
|
|
224
|
+
|
|
232
225
|
if exact_match:
|
|
233
226
|
# Format as match dictionary
|
|
234
227
|
return [{
|
|
@@ -241,7 +234,6 @@ class TMDatabase:
|
|
|
241
234
|
}]
|
|
242
235
|
|
|
243
236
|
# Try fuzzy matches
|
|
244
|
-
print(f"[DEBUG] TMDatabase.search_all: Calling fuzzy search with source_lang={self.source_lang}, target_lang={self.target_lang}")
|
|
245
237
|
fuzzy_matches = self.db.search_fuzzy_matches(
|
|
246
238
|
source=source,
|
|
247
239
|
tm_ids=tm_ids,
|
|
@@ -250,8 +242,7 @@ class TMDatabase:
|
|
|
250
242
|
source_lang=self.source_lang,
|
|
251
243
|
target_lang=self.target_lang
|
|
252
244
|
)
|
|
253
|
-
|
|
254
|
-
|
|
245
|
+
|
|
255
246
|
# Format matches for UI
|
|
256
247
|
formatted_matches = []
|
|
257
248
|
for match in fuzzy_matches:
|
|
@@ -1676,13 +1676,6 @@ class TranslationResultsPanel(QWidget):
|
|
|
1676
1676
|
Args:
|
|
1677
1677
|
matches_dict: Dict with keys like "NT", "MT", "TM", "Termbases"
|
|
1678
1678
|
"""
|
|
1679
|
-
print(f"🎯 TranslationResultsPanel.set_matches() called with matches_dict keys: {list(matches_dict.keys())}")
|
|
1680
|
-
for match_type, matches in matches_dict.items():
|
|
1681
|
-
print(f" {match_type}: {len(matches)} matches")
|
|
1682
|
-
if match_type == "Termbases" and matches:
|
|
1683
|
-
for i, match in enumerate(matches[:2]): # Show first 2 for debugging
|
|
1684
|
-
print(f" [{i}] {match.source} → {match.target}")
|
|
1685
|
-
|
|
1686
1679
|
# Ensure CompactMatchItem has current theme_manager
|
|
1687
1680
|
if self.theme_manager:
|
|
1688
1681
|
CompactMatchItem.theme_manager = self.theme_manager
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: supervertaler
|
|
3
|
-
Version: 1.9.
|
|
3
|
+
Version: 1.9.183
|
|
4
4
|
Summary: Professional AI-enhanced translation workbench with multi-LLM support, glossary system, TM, spellcheck, voice commands, and PyQt6 interface. Batteries included (core).
|
|
5
5
|
Home-page: https://supervertaler.com
|
|
6
6
|
Author: Michael Beijer
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
Supervertaler.py,sha256=
|
|
1
|
+
Supervertaler.py,sha256=8_tSjEorQseT5osmoDfk6WoeoCOmtfPhFjQcwRPsQfk,2338474
|
|
2
2
|
modules/__init__.py,sha256=G58XleS-EJ2sX4Kehm-3N2m618_W2Es0Kg8CW_eBG7g,327
|
|
3
3
|
modules/ai_actions.py,sha256=i5MJcM-7Y6CAvKUwxmxrVHeoZAVtAP7aRDdWM5KLkO0,33877
|
|
4
4
|
modules/ai_attachment_manager.py,sha256=juZlrW3UPkIkcnj0SREgOQkQROLf0fcu3ShZcKXMxsI,11361
|
|
@@ -14,6 +14,7 @@ modules/docx_handler.py,sha256=jSlZs5tollJnsnIA80buEXLfZBunp_GQ9lCtFZPUnBs,34053
|
|
|
14
14
|
modules/encoding_repair.py,sha256=mrQSOMW-tvaowLja_gtXDJv3Qw4YhLCLsuZtBS0BVms,11900
|
|
15
15
|
modules/encoding_repair_Qt.py,sha256=UosUOpMnXf0gkgp_F_4wszd25Op_3X_4h9kWMomK0Sk,17533
|
|
16
16
|
modules/encoding_repair_ui.py,sha256=bRnyFg-5_Lz-bZoNT5eivM5LKsQgpoxbllsffYW3fAg,18619
|
|
17
|
+
modules/extract_tm.py,sha256=ix58ti1_Zkd2dxIx1PwN8rWxmYHGv2zsUPrT-VfcMwA,18228
|
|
17
18
|
modules/feature_manager.py,sha256=toQnOjeCPMTWp3sas-J88-ZAomFdoeRLKPc19C-6XFo,12416
|
|
18
19
|
modules/figure_context_manager.py,sha256=hy7h5PcvN8qr8nQtDS41YvTwr6gecWvmjzMBP43_LM8,12682
|
|
19
20
|
modules/file_dialog_helper.py,sha256=0dthKAFBkoQkdGm_zsYQMImn31Z9Bc9-VKNV5idZtkQ,3767
|
|
@@ -34,6 +35,7 @@ modules/pdf_rescue_Qt.py,sha256=9W_M0Zms4miapQbrqm-viHNCpaW39GL9VaKKFCJxpnE,8047
|
|
|
34
35
|
modules/pdf_rescue_tkinter.py,sha256=a4R_OUnn7X5O_XMR1roybrdu1aXoGCwwO-mwYB2ZpOg,39606
|
|
35
36
|
modules/phrase_docx_handler.py,sha256=7vJNbvxxURzdcinZ3rkqyJ-7Y5O1NpVL4Lvu9NuGFjQ,18598
|
|
36
37
|
modules/project_home_panel.py,sha256=P0PgMnoPp6WEiGrfq8cNJNEdxO83aHQDdXzRLqF173w,6810
|
|
38
|
+
modules/project_tm.py,sha256=TQUc9ApZjfiKZlA4bc1PrHwtEtk_XM9XArCd53De_20,12327
|
|
37
39
|
modules/prompt_assistant.py,sha256=shkZqNTvyQKNDO_9aFEu1_gN0zQq0fR5krXkWfnTR2Y,13150
|
|
38
40
|
modules/prompt_library.py,sha256=t5w4cqB6_Sin4BQDVNALKpfB1EN_oaDeHFwlHxILLSY,26894
|
|
39
41
|
modules/prompt_library_migration.py,sha256=fv3RHhe2-EnH50XW5tyTWy0YP_KJ2EsESuTxR8klfmI,17639
|
|
@@ -58,8 +60,8 @@ modules/tag_manager.py,sha256=g66S0JSxdguN9AhWzZG3hsIz87Ul51wQ3c2wOCTZVSk,12789
|
|
|
58
60
|
modules/term_extractor.py,sha256=qPvKNCVXFTGEGwXNvvC0cfCmdb5c3WhzE38EOgKdKUI,11253
|
|
59
61
|
modules/termbase_entry_editor.py,sha256=iWO9CgLjMomGAqBXDsGAX7TFJvDOp2s_taS4gBL1rZY,35818
|
|
60
62
|
modules/termbase_import_export.py,sha256=16IAY04IS_rgt0GH5UOUzUI5NoqAli4JMfMquxmFBm0,23552
|
|
61
|
-
modules/termbase_manager.py,sha256=
|
|
62
|
-
modules/termview_widget.py,sha256=
|
|
63
|
+
modules/termbase_manager.py,sha256=XAVrz-wt8jKcjoD6ocHoXewY5PN0A0GeqFEctsv0jS8,48697
|
|
64
|
+
modules/termview_widget.py,sha256=zK0vYBJZpvD4flg-W5QAdCY6EGONjkWHYf-C_Kuofo0,54037
|
|
63
65
|
modules/theme_manager.py,sha256=Qk_jfCmfm7fjdMAOyBHpD18w3MiRfWBZk0cHTw6yAAg,18639
|
|
64
66
|
modules/tm_editor_dialog.py,sha256=AzGwq4QW641uFJdF8DljLTRRp4FLoYX3Pe4rlTjQWNg,3517
|
|
65
67
|
modules/tm_manager_qt.py,sha256=h2bvXkRuboHf_RRz9-5FX35GVRlpXgRDWeXyj1QWtPs,54406
|
|
@@ -69,17 +71,17 @@ modules/tmx_editor_qt.py,sha256=PxBIUw_06PHYTBHsd8hZzVJXW8T0A0ljfz1Wjjsa4yU,1170
|
|
|
69
71
|
modules/tmx_generator.py,sha256=pNkxwdMLvSRMMru0lkB1gvViIpg9BQy1EVhRbwoef3k,9426
|
|
70
72
|
modules/tracked_changes.py,sha256=S_BIEC6r7wVAwjG42aSy_RgH4KaMAC8GS5thEvqrYdE,39480
|
|
71
73
|
modules/trados_docx_handler.py,sha256=VPRAQ73cUHs_SEj6x81z1PmSxfjnwPBp9P4fXeK3KpQ,16363
|
|
72
|
-
modules/translation_memory.py,sha256=
|
|
73
|
-
modules/translation_results_panel.py,sha256=
|
|
74
|
+
modules/translation_memory.py,sha256=LnG8csZNL2GTHXT4zk0uecJEtvRc-MKwv7Pt7EX3s7s,28002
|
|
75
|
+
modules/translation_results_panel.py,sha256=OWqzV9xmJOi8NGCi3h42nq-qE7-v6WStjQWRsghCVbQ,92044
|
|
74
76
|
modules/translation_services.py,sha256=lyVpWuZK1wtVtYZMDMdLoq1DHBoSaeAnp-Yejb0TlVQ,10530
|
|
75
77
|
modules/unified_prompt_library.py,sha256=96u4WlMwnmmhD4uNJHZ-qVQj8v9_8dA2AVCWpBcwTrg,26006
|
|
76
78
|
modules/unified_prompt_manager_qt.py,sha256=U89UFGG-M7BLetoaLAlma0x-n8SIyx682DhSvaRnzJs,171285
|
|
77
79
|
modules/voice_commands.py,sha256=iBb-gjWxRMLhFH7-InSRjYJz1EIDBNA2Pog8V7TtJaY,38516
|
|
78
80
|
modules/voice_dictation.py,sha256=QmitXfkG-vRt5hIQATjphHdhXfqmwhzcQcbXB6aRzIg,16386
|
|
79
81
|
modules/voice_dictation_lite.py,sha256=jorY0BmWE-8VczbtGrWwt1zbnOctMoSlWOsQrcufBcc,9423
|
|
80
|
-
supervertaler-1.9.
|
|
81
|
-
supervertaler-1.9.
|
|
82
|
-
supervertaler-1.9.
|
|
83
|
-
supervertaler-1.9.
|
|
84
|
-
supervertaler-1.9.
|
|
85
|
-
supervertaler-1.9.
|
|
82
|
+
supervertaler-1.9.183.dist-info/licenses/LICENSE,sha256=m28u-4qL5nXIWnJ6xlQVw__H30rWFtRK3pCOais2OuY,1092
|
|
83
|
+
supervertaler-1.9.183.dist-info/METADATA,sha256=MoZSNxW6WqAUNIyvHTQ_2r6vC3Pl6e281sH7_WbePPk,5725
|
|
84
|
+
supervertaler-1.9.183.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
85
|
+
supervertaler-1.9.183.dist-info/entry_points.txt,sha256=NP4hiCvx-_30YYKqgr-jfJYQvHr1qTYBMfoVmKIXSM8,53
|
|
86
|
+
supervertaler-1.9.183.dist-info/top_level.txt,sha256=9tUHBYUSfaE4S2E4W3eavJsDyYymkwLfeWAHHAPT6Dk,22
|
|
87
|
+
supervertaler-1.9.183.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|