supervertaler 1.9.173__py3-none-any.whl → 1.9.190__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
modules/project_tm.py ADDED
@@ -0,0 +1,320 @@
1
+ """
2
+ ProjectTM - In-memory TM for instant grid lookups (Total Recall architecture)
3
+
4
+ This module implements a lightweight in-memory Translation Memory that extracts
5
+ relevant segments from the full TM database on project load. This makes grid
6
+ navigation instant while keeping the full TM for concordance searches.
7
+
8
+ Inspired by CafeTran's "Total Recall" feature.
9
+ """
10
+
11
+ import sqlite3
12
+ import threading
13
+ from difflib import SequenceMatcher
14
+ from typing import Dict, List, Optional, Callable
15
+ import re
16
+
17
+
18
+ class ProjectTM:
19
+ """
20
+ Lightweight in-memory TM extracted from the main TM database.
21
+
22
+ On project load, extracts segments that are relevant to the current project
23
+ (fuzzy matches above threshold) into an in-memory SQLite database for
24
+ instant lookups during grid navigation.
25
+
26
+ Usage:
27
+ project_tm = ProjectTM()
28
+ project_tm.extract_from_database(
29
+ db_manager,
30
+ project_segments,
31
+ tm_ids=['tm1', 'tm2'],
32
+ threshold=0.75,
33
+ progress_callback=lambda cur, total: print(f"{cur}/{total}")
34
+ )
35
+
36
+ # Fast lookup during grid navigation
37
+ matches = project_tm.search("source text to translate")
38
+ """
39
+
40
+ def __init__(self):
41
+ """Initialize in-memory SQLite database for ProjectTM"""
42
+ self.conn = sqlite3.connect(":memory:", check_same_thread=False)
43
+ self.conn.row_factory = sqlite3.Row
44
+ self.lock = threading.Lock()
45
+ self.is_built = False
46
+ self.segment_count = 0
47
+
48
+ # Create the schema
49
+ self._create_schema()
50
+
51
+ def _create_schema(self):
52
+ """Create the in-memory database schema"""
53
+ with self.lock:
54
+ cursor = self.conn.cursor()
55
+ cursor.execute("""
56
+ CREATE TABLE IF NOT EXISTS segments (
57
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
58
+ source_text TEXT NOT NULL,
59
+ target_text TEXT NOT NULL,
60
+ source_lower TEXT NOT NULL,
61
+ tm_id TEXT,
62
+ tm_name TEXT,
63
+ similarity REAL,
64
+ original_id INTEGER
65
+ )
66
+ """)
67
+ # Index for fast exact match lookups
68
+ cursor.execute("CREATE INDEX IF NOT EXISTS idx_source_lower ON segments(source_lower)")
69
+ # FTS5 for fuzzy text search
70
+ cursor.execute("""
71
+ CREATE VIRTUAL TABLE IF NOT EXISTS segments_fts USING fts5(
72
+ source_text,
73
+ content=segments,
74
+ content_rowid=id
75
+ )
76
+ """)
77
+ self.conn.commit()
78
+
79
+ def clear(self):
80
+ """Clear all segments from the ProjectTM"""
81
+ with self.lock:
82
+ cursor = self.conn.cursor()
83
+ cursor.execute("DELETE FROM segments")
84
+ cursor.execute("DELETE FROM segments_fts")
85
+ self.conn.commit()
86
+ self.is_built = False
87
+ self.segment_count = 0
88
+
89
+ def extract_from_database(
90
+ self,
91
+ db_manager,
92
+ project_segments: List,
93
+ tm_ids: List[str] = None,
94
+ source_lang: str = None,
95
+ target_lang: str = None,
96
+ threshold: float = 0.75,
97
+ progress_callback: Optional[Callable[[int, int], None]] = None,
98
+ log_callback: Optional[Callable[[str], None]] = None
99
+ ) -> int:
100
+ """
101
+ Extract relevant segments from the main TM database into ProjectTM.
102
+
103
+ For each unique source text in the project, searches the TM for fuzzy
104
+ matches above the threshold and stores them in memory.
105
+
106
+ Args:
107
+ db_manager: The main database manager with TM data
108
+ project_segments: List of project segments to find matches for
109
+ tm_ids: List of TM IDs to search (None = all active TMs)
110
+ source_lang: Source language filter
111
+ target_lang: Target language filter
112
+ threshold: Minimum similarity threshold (0.0-1.0)
113
+ progress_callback: Optional callback(current, total) for progress
114
+ log_callback: Optional callback(message) for logging
115
+
116
+ Returns:
117
+ Number of TM segments extracted
118
+ """
119
+ def log(msg):
120
+ if log_callback:
121
+ log_callback(msg)
122
+ else:
123
+ print(msg)
124
+
125
+ self.clear()
126
+
127
+ if not project_segments or not db_manager:
128
+ log(f"[ProjectTM] Early exit: segments={bool(project_segments)}, db={bool(db_manager)}")
129
+ return 0
130
+
131
+ # Get unique source texts from project
132
+ unique_sources = {}
133
+ for seg in project_segments:
134
+ # Try both 'source' and 'source_text' attributes (different segment types use different names)
135
+ source = getattr(seg, 'source', None) or getattr(seg, 'source_text', None)
136
+ if source and source.strip():
137
+ # Normalize: strip and lowercase for deduplication
138
+ key = source.strip().lower()
139
+ if key not in unique_sources:
140
+ unique_sources[key] = source.strip()
141
+
142
+ total = len(unique_sources)
143
+ log(f"[ProjectTM] Found {total} unique source texts from {len(project_segments)} segments")
144
+ if total == 0:
145
+ return 0
146
+
147
+ extracted_count = 0
148
+ seen_sources = set() # Deduplicate TM entries
149
+
150
+ cursor = self.conn.cursor()
151
+
152
+ log(f"[ProjectTM] Searching TMs: {tm_ids}, threshold={threshold}, langs={source_lang}->{target_lang}")
153
+
154
+ for i, (key, source_text) in enumerate(unique_sources.items()):
155
+ if progress_callback and i % 10 == 0:
156
+ progress_callback(i, total)
157
+
158
+ try:
159
+ # Search main TM database for fuzzy matches
160
+ matches = db_manager.search_fuzzy_matches(
161
+ source_text,
162
+ tm_ids=tm_ids,
163
+ threshold=threshold,
164
+ max_results=10, # Keep top 10 matches per source
165
+ source_lang=source_lang,
166
+ target_lang=target_lang,
167
+ bidirectional=True
168
+ )
169
+
170
+ # Debug: log first search
171
+ if i == 0:
172
+ log(f"[ProjectTM] First search '{source_text[:50]}...' returned {len(matches)} matches")
173
+
174
+ for match in matches:
175
+ match_source = match.get('source_text', '')
176
+ match_target = match.get('target_text', '')
177
+
178
+ if not match_source or not match_target:
179
+ continue
180
+
181
+ # Deduplicate by source text
182
+ source_key = match_source.strip().lower()
183
+ if source_key in seen_sources:
184
+ continue
185
+ seen_sources.add(source_key)
186
+
187
+ # Insert into ProjectTM
188
+ cursor.execute("""
189
+ INSERT INTO segments (source_text, target_text, source_lower,
190
+ tm_id, tm_name, similarity, original_id)
191
+ VALUES (?, ?, ?, ?, ?, ?, ?)
192
+ """, (
193
+ match_source,
194
+ match_target,
195
+ source_key,
196
+ match.get('tm_id'),
197
+ match.get('tm_name', 'Unknown TM'),
198
+ match.get('similarity', 0),
199
+ match.get('id')
200
+ ))
201
+ extracted_count += 1
202
+
203
+ except Exception as e:
204
+ # Log but continue - don't fail extraction for one bad segment
205
+ pass
206
+
207
+ # Commit all inserts
208
+ self.conn.commit()
209
+
210
+ # Rebuild FTS5 index
211
+ try:
212
+ cursor.execute("INSERT INTO segments_fts(segments_fts) VALUES('rebuild')")
213
+ self.conn.commit()
214
+ except Exception:
215
+ pass # FTS rebuild may fail if no data, that's OK
216
+
217
+ if progress_callback:
218
+ progress_callback(total, total)
219
+
220
+ self.is_built = True
221
+ self.segment_count = extracted_count
222
+
223
+ return extracted_count
224
+
225
+ def search(self, source_text: str, max_results: int = 5) -> List[Dict]:
226
+ """
227
+ Search ProjectTM for matches (instant lookup).
228
+
229
+ First checks for exact matches, then falls back to fuzzy search.
230
+
231
+ Args:
232
+ source_text: Source text to search for
233
+ max_results: Maximum number of results to return
234
+
235
+ Returns:
236
+ List of match dictionaries with source_text, target_text, similarity, etc.
237
+ """
238
+ if not self.is_built or not source_text:
239
+ return []
240
+
241
+ source_lower = source_text.strip().lower()
242
+ results = []
243
+
244
+ with self.lock:
245
+ cursor = self.conn.cursor()
246
+
247
+ # 1. Check for exact match first (fastest)
248
+ cursor.execute("""
249
+ SELECT * FROM segments WHERE source_lower = ? LIMIT 1
250
+ """, (source_lower,))
251
+ exact = cursor.fetchone()
252
+
253
+ if exact:
254
+ results.append({
255
+ 'source_text': exact['source_text'],
256
+ 'target_text': exact['target_text'],
257
+ 'tm_id': exact['tm_id'],
258
+ 'tm_name': exact['tm_name'],
259
+ 'similarity': 1.0, # Exact match
260
+ 'match_pct': 100,
261
+ 'id': exact['original_id']
262
+ })
263
+ return results # Exact match - no need to search further
264
+
265
+ # 2. FTS5 fuzzy search
266
+ try:
267
+ # Tokenize query for FTS5
268
+ clean_text = re.sub(r'[^\w\s]', ' ', source_text)
269
+ search_terms = [t for t in clean_text.split() if len(t) > 2]
270
+
271
+ if search_terms:
272
+ fts_query = ' OR '.join(f'"{term}"' for term in search_terms[:10])
273
+
274
+ cursor.execute("""
275
+ SELECT s.*, bm25(segments_fts) as rank
276
+ FROM segments s
277
+ JOIN segments_fts ON s.id = segments_fts.rowid
278
+ WHERE segments_fts MATCH ?
279
+ ORDER BY rank
280
+ LIMIT ?
281
+ """, (fts_query, max_results * 3)) # Get more candidates for re-ranking
282
+
283
+ candidates = cursor.fetchall()
284
+
285
+ # Re-rank by actual similarity
286
+ for row in candidates:
287
+ similarity = self._calculate_similarity(source_text, row['source_text'])
288
+ if similarity >= 0.5: # Lower threshold for ProjectTM (pre-filtered)
289
+ results.append({
290
+ 'source_text': row['source_text'],
291
+ 'target_text': row['target_text'],
292
+ 'tm_id': row['tm_id'],
293
+ 'tm_name': row['tm_name'],
294
+ 'similarity': similarity,
295
+ 'match_pct': int(similarity * 100),
296
+ 'id': row['original_id']
297
+ })
298
+
299
+ # Sort by similarity and limit
300
+ results.sort(key=lambda x: x['similarity'], reverse=True)
301
+ results = results[:max_results]
302
+
303
+ except Exception:
304
+ pass # FTS search may fail, return what we have
305
+
306
+ return results
307
+
308
+ def _calculate_similarity(self, text1: str, text2: str) -> float:
309
+ """Calculate similarity ratio between two texts"""
310
+ # Strip HTML/XML tags for comparison
311
+ clean1 = re.sub(r'<[^>]+>', '', text1).lower()
312
+ clean2 = re.sub(r'<[^>]+>', '', text2).lower()
313
+ return SequenceMatcher(None, clean1, clean2).ratio()
314
+
315
+ def get_stats(self) -> Dict:
316
+ """Get statistics about the ProjectTM"""
317
+ return {
318
+ 'is_built': self.is_built,
319
+ 'segment_count': self.segment_count
320
+ }
modules/superbrowser.py CHANGED
@@ -160,6 +160,20 @@ class ChatColumn(QWidget):
160
160
  """Update URL bar when page changes"""
161
161
  self.url_input.setText(url.toString())
162
162
 
163
+ def cleanup(self):
164
+ """Clean up web engine resources before deletion"""
165
+ try:
166
+ from PyQt6.QtCore import QUrl
167
+ if hasattr(self, 'web_view'):
168
+ self.web_view.stop()
169
+ self.web_view.setPage(None)
170
+ self.web_view.setUrl(QUrl('about:blank'))
171
+ self.web_view.deleteLater()
172
+ if hasattr(self, 'profile'):
173
+ self.profile.deleteLater()
174
+ except:
175
+ pass
176
+
163
177
 
164
178
  class SuperbrowserWidget(QWidget):
165
179
  """
@@ -304,6 +318,14 @@ class SuperbrowserWidget(QWidget):
304
318
  self.claude_column.go_home()
305
319
  self.gemini_column.go_home()
306
320
 
321
+ def cleanup(self):
322
+ """Clean up all web engine resources before widget deletion"""
323
+ try:
324
+ for column in self.chat_columns:
325
+ column.cleanup()
326
+ except:
327
+ pass
328
+
307
329
 
308
330
  # ============================================================================
309
331
  # STANDALONE USAGE
@@ -409,7 +409,111 @@ class TermbaseManager:
409
409
  except Exception as e:
410
410
  self.log(f"✗ Error setting termbase read_only: {e}")
411
411
  return False
412
-
412
+
413
+ def get_termbase_ai_inject(self, termbase_id: int) -> bool:
414
+ """Get whether termbase terms should be injected into LLM prompts"""
415
+ try:
416
+ cursor = self.db_manager.cursor
417
+ cursor.execute("SELECT ai_inject FROM termbases WHERE id = ?", (termbase_id,))
418
+ result = cursor.fetchone()
419
+ return bool(result[0]) if result and result[0] else False
420
+ except Exception as e:
421
+ self.log(f"✗ Error getting termbase ai_inject: {e}")
422
+ return False
423
+
424
+ def set_termbase_ai_inject(self, termbase_id: int, ai_inject: bool) -> bool:
425
+ """Set whether termbase terms should be injected into LLM prompts"""
426
+ try:
427
+ cursor = self.db_manager.cursor
428
+ cursor.execute("""
429
+ UPDATE termbases SET ai_inject = ? WHERE id = ?
430
+ """, (1 if ai_inject else 0, termbase_id))
431
+ self.db_manager.connection.commit()
432
+ status = "enabled" if ai_inject else "disabled"
433
+ self.log(f"✓ AI injection {status} for termbase {termbase_id}")
434
+ return True
435
+ except Exception as e:
436
+ self.log(f"✗ Error setting termbase ai_inject: {e}")
437
+ return False
438
+
439
+ def get_ai_inject_termbases(self, project_id: Optional[int] = None) -> List[Dict]:
440
+ """
441
+ Get all termbases with ai_inject enabled that are active for the given project.
442
+
443
+ Args:
444
+ project_id: Project ID (0 or None for global)
445
+
446
+ Returns:
447
+ List of termbase dictionaries with all terms
448
+ """
449
+ try:
450
+ cursor = self.db_manager.cursor
451
+ proj_id = project_id if project_id else 0
452
+
453
+ cursor.execute("""
454
+ SELECT t.id, t.name, t.source_lang, t.target_lang
455
+ FROM termbases t
456
+ LEFT JOIN termbase_activation ta ON t.id = ta.termbase_id AND ta.project_id = ?
457
+ WHERE t.ai_inject = 1
458
+ AND (ta.is_active = 1 OR (t.is_global = 1 AND ta.is_active IS NULL))
459
+ ORDER BY ta.priority ASC, t.name ASC
460
+ """, (proj_id,))
461
+
462
+ termbases = []
463
+ for row in cursor.fetchall():
464
+ termbases.append({
465
+ 'id': row[0],
466
+ 'name': row[1],
467
+ 'source_lang': row[2],
468
+ 'target_lang': row[3]
469
+ })
470
+ return termbases
471
+ except Exception as e:
472
+ self.log(f"✗ Error getting AI inject termbases: {e}")
473
+ return []
474
+
475
+ def get_ai_inject_terms(self, project_id: Optional[int] = None) -> List[Dict]:
476
+ """
477
+ Get all terms from AI-inject-enabled termbases for the given project.
478
+
479
+ Args:
480
+ project_id: Project ID (0 or None for global)
481
+
482
+ Returns:
483
+ List of term dictionaries with source_term, target_term, forbidden, termbase_name
484
+ """
485
+ try:
486
+ # First get all AI-inject termbases
487
+ ai_termbases = self.get_ai_inject_termbases(project_id)
488
+ if not ai_termbases:
489
+ return []
490
+
491
+ all_terms = []
492
+ cursor = self.db_manager.cursor
493
+
494
+ for tb in ai_termbases:
495
+ cursor.execute("""
496
+ SELECT source_term, target_term, forbidden, priority
497
+ FROM termbase_terms
498
+ WHERE termbase_id = ?
499
+ ORDER BY priority ASC, source_term ASC
500
+ """, (tb['id'],))
501
+
502
+ for row in cursor.fetchall():
503
+ all_terms.append({
504
+ 'source_term': row[0],
505
+ 'target_term': row[1],
506
+ 'forbidden': bool(row[2]) if row[2] else False,
507
+ 'priority': row[3] or 99,
508
+ 'termbase_name': tb['name']
509
+ })
510
+
511
+ self.log(f"📚 Retrieved {len(all_terms)} terms from {len(ai_termbases)} AI-inject glossar{'y' if len(ai_termbases) == 1 else 'ies'}")
512
+ return all_terms
513
+ except Exception as e:
514
+ self.log(f"✗ Error getting AI inject terms: {e}")
515
+ return []
516
+
413
517
  def set_termbase_priority(self, termbase_id: int, project_id: int, priority: int) -> bool:
414
518
  """
415
519
  Set manual priority for a termbase in a specific project.
@@ -505,7 +609,6 @@ class TermbaseManager:
505
609
  """, (project_id,))
506
610
 
507
611
  active_ids = [row[0] for row in cursor.fetchall()]
508
- self.log(f"📋 Found {len(active_ids)} active termbases for project {project_id}: {active_ids}")
509
612
  return active_ids
510
613
  except Exception as e:
511
614
  self.log(f"✗ Error getting active termbase IDs: {e}")
@@ -172,7 +172,7 @@ class TermBlock(QWidget):
172
172
  # Get theme colors
173
173
  is_dark = self.theme_manager and self.theme_manager.current_theme.name == "Dark"
174
174
  separator_color = "#555555" if is_dark else "#CCCCCC"
175
- source_text_color = "#E0E0E0" if is_dark else "#333"
175
+ source_text_color = "#FFFFFF" if is_dark else "#333"
176
176
  no_match_color = "#666666" if is_dark else "#ddd"
177
177
  no_match_bg = "#2A2A2A" if is_dark else "#F5F5F5"
178
178
 
@@ -224,10 +224,17 @@ class TermBlock(QWidget):
224
224
  if self.translations:
225
225
  target_text = primary_translation.get('target_term', primary_translation.get('target', ''))
226
226
  termbase_name = primary_translation.get('termbase_name', '')
227
-
228
- # Background color based on termbase type
229
- bg_color = "#FFE5F0" if self.is_effective_project else "#D6EBFF" # Pink for project, light blue for regular
230
- hover_color = "#FFD0E8" if self.is_effective_project else "#BBDEFB" # Slightly darker on hover
227
+
228
+ # Background color based on termbase type (theme-aware)
229
+ is_dark = self.theme_manager and self.theme_manager.current_theme.name == "Dark"
230
+ if is_dark:
231
+ # Dark mode: darker backgrounds
232
+ bg_color = "#4A2D3A" if self.is_effective_project else "#2D3E4A" # Dark pink/blue
233
+ hover_color = "#5A3D4A" if self.is_effective_project else "#3D4E5A" # Lighter on hover
234
+ else:
235
+ # Light mode: original colors
236
+ bg_color = "#FFE5F0" if self.is_effective_project else "#D6EBFF" # Pink for project, light blue for regular
237
+ hover_color = "#FFD0E8" if self.is_effective_project else "#BBDEFB" # Slightly darker on hover
231
238
 
232
239
  # Create horizontal layout for target + shortcut badge
233
240
  # Apply background to container so it covers both text and badge
@@ -251,9 +258,11 @@ class TermBlock(QWidget):
251
258
  target_font.setBold(self.font_bold)
252
259
  target_label.setFont(target_font)
253
260
  target_label.setAlignment(Qt.AlignmentFlag.AlignCenter)
261
+ # Theme-aware text color
262
+ target_text_color = "#B0C4DE" if is_dark else "#0052A3" # Light blue in dark mode
254
263
  target_label.setStyleSheet(f"""
255
264
  QLabel {{
256
- color: #0052A3;
265
+ color: {target_text_color};
257
266
  padding: 0px;
258
267
  background-color: transparent;
259
268
  border: none;
@@ -312,11 +321,12 @@ class TermBlock(QWidget):
312
321
  if len(self.translations) > 1:
313
322
  count_label = QLabel(f"+{len(self.translations) - 1}")
314
323
  count_label.setAlignment(Qt.AlignmentFlag.AlignCenter)
315
- count_label.setStyleSheet("""
316
- QLabel {
317
- color: #999;
324
+ count_color = "#AAA" if is_dark else "#999" # Lighter in dark mode
325
+ count_label.setStyleSheet(f"""
326
+ QLabel {{
327
+ color: {count_color};
318
328
  font-size: 7px;
319
- }
329
+ }}
320
330
  """)
321
331
  layout.addWidget(count_label)
322
332
  return
@@ -336,10 +346,13 @@ class TermBlock(QWidget):
336
346
  badge_label = QLabel(badge_text)
337
347
  badge_label.setAlignment(Qt.AlignmentFlag.AlignCenter)
338
348
  badge_label.setFixedSize(badge_width, 14)
349
+ # Theme-aware badge colors
350
+ badge_bg = "#4A90E2" if is_dark else "#1976D2" # Lighter blue in dark mode
351
+ badge_text_color = "#FFFFFF" if is_dark else "white"
339
352
  badge_label.setStyleSheet(f"""
340
353
  QLabel {{
341
- background-color: #1976D2;
342
- color: white;
354
+ background-color: {badge_bg};
355
+ color: {badge_text_color};
343
356
  font-size: 9px;
344
357
  font-weight: bold;
345
358
  border-radius: 7px;
@@ -352,16 +365,17 @@ class TermBlock(QWidget):
352
365
  target_layout.addWidget(badge_label)
353
366
 
354
367
  layout.addWidget(target_container)
355
-
368
+
356
369
  # Show count if multiple translations - very compact
357
370
  if len(self.translations) > 1:
358
371
  count_label = QLabel(f"+{len(self.translations) - 1}")
359
372
  count_label.setAlignment(Qt.AlignmentFlag.AlignCenter)
360
- count_label.setStyleSheet("""
361
- QLabel {
362
- color: #999;
373
+ count_color = "#AAA" if is_dark else "#999" # Lighter in dark mode
374
+ count_label.setStyleSheet(f"""
375
+ QLabel {{
376
+ color: {count_color};
363
377
  font-size: 7px;
364
- }
378
+ }}
365
379
  """)
366
380
  layout.addWidget(count_label)
367
381
  else:
@@ -439,7 +453,7 @@ class NTBlock(QWidget):
439
453
 
440
454
  # Get theme colors
441
455
  is_dark = self.theme_manager and self.theme_manager.current_theme.name == "Dark"
442
- source_text_color = "#E0E0E0" if is_dark else "#5D4E37"
456
+ source_text_color = "#FFFFFF" if is_dark else "#5D4E37"
443
457
 
444
458
  # Pastel yellow border for non-translatables
445
459
  border_color = "#E6C200" # Darker yellow for border
@@ -637,6 +651,17 @@ class TermviewWidget(QWidget):
637
651
  is_dark = theme.name == "Dark"
638
652
  info_label_color = "#909090" if is_dark else info_text
639
653
  self.info_label.setStyleSheet(f"color: {info_label_color}; font-size: 10px; padding: 5px;")
654
+
655
+ # Refresh term blocks to pick up new theme colors
656
+ if hasattr(self, '_last_termbase_matches') and hasattr(self, '_last_nt_matches') and hasattr(self, 'current_source'):
657
+ # Re-render with stored matches to apply new theme colors
658
+ if self.current_source:
659
+ self.update_with_matches(
660
+ self.current_source,
661
+ self._last_termbase_matches or [],
662
+ self._last_nt_matches,
663
+ self._status_hint if hasattr(self, '_status_hint') else None
664
+ )
640
665
 
641
666
  def set_font_settings(self, font_family: str = "Segoe UI", font_size: int = 10, bold: bool = False):
642
667
  """Update font settings for Termview
@@ -694,27 +719,31 @@ class TermviewWidget(QWidget):
694
719
  font.setBold(self.current_font_bold)
695
720
  block.source_label.setFont(font)
696
721
 
697
- def update_with_matches(self, source_text: str, termbase_matches: List[Dict], nt_matches: List[Dict] = None):
722
+ def update_with_matches(self, source_text: str, termbase_matches: List[Dict], nt_matches: List[Dict] = None, status_hint: str = None):
698
723
  """
699
724
  Update the termview display with pre-computed termbase and NT matches
700
-
725
+
701
726
  RYS-STYLE DISPLAY: Show source text as tokens with translations underneath
702
-
727
+
703
728
  Args:
704
729
  source_text: Source segment text
705
730
  termbase_matches: List of termbase match dicts from Translation Results
706
731
  nt_matches: Optional list of NT match dicts with 'text', 'start', 'end', 'list_name' keys
732
+ status_hint: Optional hint about why there might be no matches (e.g., 'no_termbases_activated', 'wrong_language')
707
733
  """
708
734
  self.current_source = source_text
709
-
735
+ # Store matches for theme refresh
736
+ self._last_termbase_matches = termbase_matches
737
+ self._last_nt_matches = nt_matches
738
+
710
739
  # Clear existing blocks and shortcut mappings
711
740
  self.clear_terms()
712
741
  self.shortcut_terms = {} # Reset shortcut mappings
713
-
742
+
714
743
  if not source_text or not source_text.strip():
715
744
  self.info_label.setText("No segment selected")
716
745
  return
717
-
746
+
718
747
  # Strip HTML/XML tags from source text for display in TermView
719
748
  # This handles CAT tool tags like <b>, </b>, <i>, </i>, <u>, </u>, <bi>, <sub>, <sup>, <li-o>, <li-b>
720
749
  # as well as memoQ tags {1}, [2}, {3], Trados tags <1>, </1>, and Déjà Vu tags {00001}
@@ -725,17 +754,17 @@ class TermviewWidget(QWidget):
725
754
  display_text = re.sub(r'\[[^\[\]]*\}', '', display_text) # Opening: [anything}
726
755
  display_text = re.sub(r'\{[^\{\}]*\]', '', display_text) # Closing: {anything]
727
756
  display_text = display_text.strip()
728
-
757
+
729
758
  # If stripping tags leaves nothing, fall back to original
730
759
  if not display_text:
731
760
  display_text = source_text
732
-
761
+
733
762
  has_termbase = termbase_matches and len(termbase_matches) > 0
734
763
  has_nt = nt_matches and len(nt_matches) > 0
735
-
736
- if not has_termbase and not has_nt:
737
- self.info_label.setText("No terminology or NT matches for this segment")
738
- return
764
+
765
+ # Store status hint for info label (will be set at the end)
766
+ self._status_hint = status_hint
767
+ self._has_any_matches = has_termbase or has_nt
739
768
 
740
769
  # Convert termbase matches to dict for easy lookup: {source_term.lower(): [translations]}
741
770
  matches_dict = {}
@@ -867,11 +896,18 @@ class TermviewWidget(QWidget):
867
896
  info_parts.append(f"{blocks_with_translations} terms")
868
897
  if blocks_with_nt > 0:
869
898
  info_parts.append(f"{blocks_with_nt} NTs")
870
-
899
+
871
900
  if info_parts:
872
901
  self.info_label.setText(f"✓ Found {', '.join(info_parts)} in {len(tokens)} words")
873
902
  else:
874
- self.info_label.setText(f"No matches in {len(tokens)} words")
903
+ # Show appropriate message based on status hint when no matches
904
+ status_hint = getattr(self, '_status_hint', None)
905
+ if status_hint == 'no_termbases_activated':
906
+ self.info_label.setText(f"No glossaries activated ({len(tokens)} words)")
907
+ elif status_hint == 'wrong_language':
908
+ self.info_label.setText(f"Glossaries don't match language pair ({len(tokens)} words)")
909
+ else:
910
+ self.info_label.setText(f"No matches in {len(tokens)} words")
875
911
 
876
912
  def get_all_termbase_matches(self, text: str) -> Dict[str, List[Dict]]:
877
913
  """