supervertaler 1.9.116__py3-none-any.whl → 1.9.172__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -29,15 +29,15 @@ class AttachmentManager:
29
29
  Initialize the AttachmentManager.
30
30
 
31
31
  Args:
32
- base_dir: Base directory for attachments (default: user_data_private/AI_Assistant)
32
+ base_dir: Base directory for attachments (default: user_data_private/ai_assistant)
33
33
  log_callback: Function to call for logging messages
34
34
  """
35
35
  self.log = log_callback if log_callback else print
36
36
 
37
37
  # Set base directory
38
38
  if base_dir is None:
39
- # Default to user_data_private/AI_Assistant
40
- base_dir = Path("user_data_private") / "AI_Assistant"
39
+ # Default to user_data_private/ai_assistant
40
+ base_dir = Path("user_data_private") / "ai_assistant"
41
41
 
42
42
  self.base_dir = Path(base_dir)
43
43
  self.attachments_dir = self.base_dir / "attachments"
modules/config_manager.py CHANGED
@@ -35,14 +35,14 @@ class ConfigManager:
35
35
  REQUIRED_FOLDERS = [
36
36
  # Note: Old numbered folders (1_System_Prompts, 2_Domain_Prompts, etc.) are deprecated
37
37
  # Migration moves them to unified Library structure
38
- "Prompt_Library/Domain Expertise",
39
- "Prompt_Library/Project Prompts",
40
- "Prompt_Library/Style Guides",
41
- "Translation_Resources/Termbases",
42
- "Translation_Resources/TMs",
43
- "Translation_Resources/Non-translatables",
44
- "Translation_Resources/Segmentation_rules",
45
- "Projects",
38
+ "prompt_library/domain_expertise",
39
+ "prompt_library/project_prompts",
40
+ "prompt_library/style_guides",
41
+ "resources/termbases",
42
+ "resources/tms",
43
+ "resources/non_translatables",
44
+ "resources/segmentation_rules",
45
+ "projects",
46
46
  ]
47
47
 
48
48
  def __init__(self):
@@ -268,8 +268,8 @@ class ConfigManager:
268
268
  Get the full path to a subfolder in user_data.
269
269
 
270
270
  Example:
271
- config.get_subfolder_path('Translation_Resources/TMs')
272
- -> '/home/user/Supervertaler_Data/Translation_Resources/TMs'
271
+ config.get_subfolder_path('resources/tms')
272
+ -> '/home/user/Supervertaler/resources/tms'
273
273
  """
274
274
  user_data_path = self.get_user_data_path()
275
275
  full_path = os.path.join(user_data_path, subfolder)
@@ -840,11 +840,15 @@ class DatabaseManager:
840
840
  bidirectional: If True, search both directions (nl→en AND en→nl)
841
841
 
842
842
  Returns: List of matches with similarity scores
843
+
844
+ Note: When multiple TMs are provided, searches each TM separately to ensure
845
+ good matches from smaller TMs aren't pushed out by BM25 keyword ranking
846
+ from larger TMs. Results are merged and sorted by actual similarity.
843
847
  """
844
848
  # For better FTS5 matching, tokenize the query and escape special chars
845
849
  # FTS5 special characters: " ( ) - : , . ! ?
846
850
  import re
847
- from modules.tmx_generator import get_base_lang_code
851
+ from modules.tmx_generator import get_base_lang_code, get_lang_match_variants
848
852
 
849
853
  # Strip HTML/XML tags from source for clean text search
850
854
  text_without_tags = re.sub(r'<[^>]+>', '', source)
@@ -868,22 +872,57 @@ class DatabaseManager:
868
872
  # This helps find similar long segments more reliably
869
873
  search_terms_for_query = all_search_terms[:20]
870
874
 
871
- print(f"[DEBUG] search_fuzzy_matches: source='{source[:50]}...', {len(all_search_terms)} terms")
872
-
873
875
  if not search_terms_for_query:
874
876
  # If no valid terms, return empty results
875
- print(f"[DEBUG] search_fuzzy_matches: No valid search terms, returning empty")
876
877
  return []
877
878
 
878
879
  # Quote each term to prevent FTS5 syntax errors
879
880
  fts_query = ' OR '.join(f'"{term}"' for term in search_terms_for_query)
880
- print(f"[DEBUG] search_fuzzy_matches: FTS query terms = {search_terms_for_query[:10]}...")
881
881
 
882
882
  # Get base language codes for comparison
883
883
  src_base = get_base_lang_code(source_lang) if source_lang else None
884
884
  tgt_base = get_base_lang_code(target_lang) if target_lang else None
885
885
 
886
- # Use FTS5 for initial candidate retrieval (fast)
886
+ # MULTI-TM FIX: Search each TM separately to avoid BM25 ranking issues
887
+ # When a large TM is combined with a small TM, the large TM's many keyword matches
888
+ # push down genuinely similar sentences from the small TM
889
+ tms_to_search = tm_ids if tm_ids else [None] # None means search all TMs together
890
+
891
+ all_results = []
892
+
893
+ for tm_id in tms_to_search:
894
+ # Search this specific TM (or all if tm_id is None)
895
+ tm_results = self._search_single_tm_fuzzy(
896
+ source, fts_query, [tm_id] if tm_id else None,
897
+ threshold, max_results, src_base, tgt_base,
898
+ source_lang, target_lang, bidirectional
899
+ )
900
+ all_results.extend(tm_results)
901
+
902
+ # Deduplicate by source_text (keep highest similarity for each unique source)
903
+ seen = {}
904
+ for result in all_results:
905
+ key = result['source_text']
906
+ if key not in seen or result['similarity'] > seen[key]['similarity']:
907
+ seen[key] = result
908
+
909
+ deduped_results = list(seen.values())
910
+
911
+ # Sort ALL results by similarity (highest first) - this ensures the 76% match
912
+ # appears before 40% matches regardless of which TM they came from
913
+ deduped_results.sort(key=lambda x: x['similarity'], reverse=True)
914
+
915
+ return deduped_results[:max_results]
916
+
917
+ def _search_single_tm_fuzzy(self, source: str, fts_query: str, tm_ids: List[str],
918
+ threshold: float, max_results: int,
919
+ src_base: str, tgt_base: str,
920
+ source_lang: str, target_lang: str,
921
+ bidirectional: bool) -> List[Dict]:
922
+ """Search a single TM (or all TMs if tm_ids is None) for fuzzy matches"""
923
+ from modules.tmx_generator import get_lang_match_variants
924
+
925
+ # Build query for this TM
887
926
  query = """
888
927
  SELECT tu.*,
889
928
  bm25(translation_units_fts) as relevance
@@ -893,13 +932,12 @@ class DatabaseManager:
893
932
  """
894
933
  params = [fts_query]
895
934
 
896
- if tm_ids:
935
+ if tm_ids and tm_ids[0] is not None:
897
936
  placeholders = ','.join('?' * len(tm_ids))
898
937
  query += f" AND tu.tm_id IN ({placeholders})"
899
938
  params.extend(tm_ids)
900
939
 
901
940
  # Use flexible language matching (matches 'nl', 'nl-NL', 'Dutch', etc.)
902
- from modules.tmx_generator import get_lang_match_variants
903
941
  if src_base:
904
942
  src_variants = get_lang_match_variants(source_lang)
905
943
  src_conditions = []
@@ -920,19 +958,16 @@ class DatabaseManager:
920
958
  params.append(f"{variant}-%")
921
959
  query += f" AND ({' OR '.join(tgt_conditions)})"
922
960
 
923
- # Get more candidates than needed for proper scoring (increase limit for long segments)
924
- # Long segments need MANY more candidates because BM25 ranking may push down
925
- # the truly similar entries in favor of entries matching more search terms
961
+ # Per-TM candidate limit - INCREASED to catch more potential fuzzy matches
962
+ # When multiple TMs are searched, BM25 ranking can push genuinely similar
963
+ # entries far down the list due to common word matches in other entries
926
964
  candidate_limit = max(500, max_results * 50)
927
965
  query += f" ORDER BY relevance DESC LIMIT {candidate_limit}"
928
966
 
929
- print(f"[DEBUG] search_fuzzy_matches: Executing query (limit={candidate_limit})...")
930
-
931
967
  try:
932
968
  self.cursor.execute(query, params)
933
969
  all_rows = self.cursor.fetchall()
934
970
  except Exception as e:
935
- print(f"[DEBUG] search_fuzzy_matches: SQL ERROR: {e}")
936
971
  return []
937
972
 
938
973
  results = []
@@ -948,8 +983,6 @@ class DatabaseManager:
948
983
  match_dict['match_pct'] = int(similarity * 100)
949
984
  results.append(match_dict)
950
985
 
951
- print(f"[DEBUG] search_fuzzy_matches: After threshold filter ({threshold}): {len(results)} matches")
952
-
953
986
  # If bidirectional, also search reverse direction
954
987
  if bidirectional and src_base and tgt_base:
955
988
  query = """
@@ -961,13 +994,12 @@ class DatabaseManager:
961
994
  """
962
995
  params = [fts_query]
963
996
 
964
- if tm_ids:
997
+ if tm_ids and tm_ids[0] is not None:
965
998
  placeholders = ','.join('?' * len(tm_ids))
966
999
  query += f" AND tu.tm_id IN ({placeholders})"
967
1000
  params.extend(tm_ids)
968
1001
 
969
1002
  # Reversed language filters with flexible matching
970
- # For reverse: TM target_lang should match our source_lang, TM source_lang should match our target_lang
971
1003
  src_variants = get_lang_match_variants(source_lang)
972
1004
  tgt_variants = get_lang_match_variants(target_lang)
973
1005
 
@@ -991,26 +1023,27 @@ class DatabaseManager:
991
1023
 
992
1024
  query += f" ORDER BY relevance DESC LIMIT {max_results * 5}"
993
1025
 
994
- self.cursor.execute(query, params)
995
-
996
- for row in self.cursor.fetchall():
997
- match_dict = dict(row)
998
- # Calculate similarity against target_text (since we're reversing)
999
- similarity = self.calculate_similarity(source, match_dict['target_text'])
1026
+ try:
1027
+ self.cursor.execute(query, params)
1000
1028
 
1001
- # Only include matches above threshold
1002
- if similarity >= threshold:
1003
- # Swap source/target for reverse match
1004
- match_dict['source_text'], match_dict['target_text'] = match_dict['target_text'], match_dict['source_text']
1005
- match_dict['source_lang'], match_dict['target_lang'] = match_dict['target_lang'], match_dict['source_lang']
1006
- match_dict['similarity'] = similarity
1007
- match_dict['match_pct'] = int(similarity * 100)
1008
- match_dict['reverse_match'] = True
1009
- results.append(match_dict)
1010
-
1011
- # Sort by similarity (highest first) and limit results
1012
- results.sort(key=lambda x: x['similarity'], reverse=True)
1013
- return results[:max_results]
1029
+ for row in self.cursor.fetchall():
1030
+ match_dict = dict(row)
1031
+ # Calculate similarity against target_text (since we're reversing)
1032
+ similarity = self.calculate_similarity(source, match_dict['target_text'])
1033
+
1034
+ # Only include matches above threshold
1035
+ if similarity >= threshold:
1036
+ # Swap source/target for reverse match
1037
+ match_dict['source_text'], match_dict['target_text'] = match_dict['target_text'], match_dict['source_text']
1038
+ match_dict['source_lang'], match_dict['target_lang'] = match_dict['target_lang'], match_dict['source_lang']
1039
+ match_dict['similarity'] = similarity
1040
+ match_dict['match_pct'] = int(similarity * 100)
1041
+ match_dict['reverse_match'] = True
1042
+ results.append(match_dict)
1043
+ except Exception as e:
1044
+ print(f"[DEBUG] _search_single_tm_fuzzy (reverse): SQL ERROR: {e}")
1045
+
1046
+ return results
1014
1047
 
1015
1048
  def search_all(self, source: str, tm_ids: List[str] = None, enabled_only: bool = True,
1016
1049
  threshold: float = 0.75, max_results: int = 10) -> List[Dict]:
@@ -1124,6 +1157,12 @@ class DatabaseManager:
1124
1157
  Uses FTS5 full-text search for fast matching on millions of segments.
1125
1158
  Falls back to LIKE queries if FTS5 fails.
1126
1159
 
1160
+ Language filters define what you're searching FOR and what translation you want:
1161
+ - "From: Dutch, To: English" = Search for Dutch text, show English translations
1162
+ - Searches ALL TMs (regardless of their stored language pair direction)
1163
+ - Automatically swaps columns when needed (e.g., finds Dutch in target column of EN→NL TM)
1164
+ - This is MORE intuitive than traditional CAT tools that only search specific TM directions
1165
+
1127
1166
  Args:
1128
1167
  query: Text to search for
1129
1168
  tm_ids: List of TM IDs to search (None = all)
@@ -1141,6 +1180,12 @@ class DatabaseManager:
1141
1180
  # Wrap in quotes for phrase search
1142
1181
  fts_query = f'"{fts_query}"'
1143
1182
 
1183
+ # When language filters specified, we need to search intelligently:
1184
+ # - Don't filter by TM language pair (search ALL TMs)
1185
+ # - Search in BOTH columns to find text
1186
+ # - Swap columns if needed to show correct language order
1187
+ use_smart_search = (source_langs or target_langs)
1188
+
1144
1189
  try:
1145
1190
  # Use FTS5 for fast full-text search
1146
1191
  if direction == 'source':
@@ -1171,20 +1216,105 @@ class DatabaseManager:
1171
1216
  fts_sql += f" AND tu.tm_id IN ({placeholders})"
1172
1217
  params.extend(tm_ids)
1173
1218
 
1174
- # Add language filters (support for list of variants)
1175
- if source_langs:
1176
- placeholders = ','.join('?' * len(source_langs))
1177
- fts_sql += f" AND tu.source_lang IN ({placeholders})"
1178
- params.extend(source_langs)
1179
- if target_langs:
1180
- placeholders = ','.join('?' * len(target_langs))
1181
- fts_sql += f" AND tu.target_lang IN ({placeholders})"
1182
- params.extend(target_langs)
1219
+ # DON'T filter by language when smart search active
1220
+ # (we need to search all TMs and figure out which column has our language)
1221
+ if not use_smart_search:
1222
+ # Traditional filtering when no language filters
1223
+ if source_langs:
1224
+ placeholders = ','.join('?' * len(source_langs))
1225
+ fts_sql += f" AND tu.source_lang IN ({placeholders})"
1226
+ params.extend(source_langs)
1227
+ if target_langs:
1228
+ placeholders = ','.join('?' * len(target_langs))
1229
+ fts_sql += f" AND tu.target_lang IN ({placeholders})"
1230
+ params.extend(target_langs)
1183
1231
 
1184
1232
  fts_sql += " ORDER BY tu.modified_date DESC LIMIT 100"
1185
1233
 
1186
1234
  self.cursor.execute(fts_sql, params)
1187
- return [dict(row) for row in self.cursor.fetchall()]
1235
+ raw_results = [dict(row) for row in self.cursor.fetchall()]
1236
+
1237
+ # Smart search: Filter and swap based on language metadata
1238
+ if use_smart_search:
1239
+ processed_results = []
1240
+ for row in raw_results:
1241
+ row_src_lang = row.get('source_lang', '')
1242
+ row_tgt_lang = row.get('target_lang', '')
1243
+
1244
+ # Check if this row matches our language requirements
1245
+ # If "From: Dutch, To: English":
1246
+ # - Accept if source=nl and target=en (normal)
1247
+ # - Accept if source=en and target=nl (swap needed)
1248
+
1249
+ matches = False
1250
+ needs_swap = False
1251
+
1252
+ if source_langs and target_langs:
1253
+ # Both filters specified
1254
+ if row_src_lang in source_langs and row_tgt_lang in target_langs:
1255
+ # Perfect match - no swap
1256
+ matches = True
1257
+ needs_swap = False
1258
+ elif row_src_lang in target_langs and row_tgt_lang in source_langs:
1259
+ # Reversed - needs swap
1260
+ matches = True
1261
+ needs_swap = True
1262
+ elif source_langs:
1263
+ # Only "From" specified - just check if Dutch is in EITHER column
1264
+ if row_src_lang in source_langs:
1265
+ matches = True
1266
+ needs_swap = False
1267
+ elif row_tgt_lang in source_langs:
1268
+ matches = True
1269
+ needs_swap = True
1270
+ elif target_langs:
1271
+ # Only "To" specified - just check if English is in EITHER column
1272
+ if row_tgt_lang in target_langs:
1273
+ matches = True
1274
+ needs_swap = False
1275
+ elif row_src_lang in target_langs:
1276
+ matches = True
1277
+ needs_swap = True
1278
+
1279
+ if matches:
1280
+ # CRITICAL CHECK: Verify the search text is actually in the correct column
1281
+ # If user searches for Dutch with "From: Dutch", the text must be in the source column (after any swap)
1282
+ # This prevents finding Dutch text when user asks to search FOR English
1283
+
1284
+ if needs_swap:
1285
+ # After swap, check if query is in the NEW source column (was target)
1286
+ text_to_check = row['target_text'].lower()
1287
+ else:
1288
+ # No swap, check if query is in source column
1289
+ text_to_check = row['source_text'].lower()
1290
+
1291
+ # Only include if query text is actually in the source column
1292
+ if query.lower() in text_to_check:
1293
+ if needs_swap:
1294
+ # Swap columns to show correct language order
1295
+ swapped_row = row.copy()
1296
+ swapped_row['source'] = row['target_text']
1297
+ swapped_row['target'] = row['source_text']
1298
+ swapped_row['source_lang'] = row['target_lang']
1299
+ swapped_row['target_lang'] = row['source_lang']
1300
+ processed_results.append(swapped_row)
1301
+ else:
1302
+ # No swap needed - just rename columns
1303
+ processed_row = row.copy()
1304
+ processed_row['source'] = row['source_text']
1305
+ processed_row['target'] = row['target_text']
1306
+ processed_results.append(processed_row)
1307
+
1308
+ return processed_results
1309
+ else:
1310
+ # No language filters - just rename columns
1311
+ processed_results = []
1312
+ for row in raw_results:
1313
+ processed_row = row.copy()
1314
+ processed_row['source'] = row['source_text']
1315
+ processed_row['target'] = row['target_text']
1316
+ processed_results.append(processed_row)
1317
+ return processed_results
1188
1318
 
1189
1319
  except Exception as e:
1190
1320
  # Fallback to LIKE query if FTS5 fails (e.g., index not built)
@@ -1312,6 +1442,10 @@ class DatabaseManager:
1312
1442
  # Note: termbase_id is stored as TEXT in termbase_terms but INTEGER in termbases
1313
1443
  # Use CAST to ensure proper comparison
1314
1444
  # IMPORTANT: Join with termbase_activation to get the ACTUAL priority for this project
1445
+ # CRITICAL FIX: Also match when search_term starts with the glossary term
1446
+ # This handles cases like searching for "ca." when glossary has "ca."
1447
+ # AND searching for "ca" when glossary has "ca."
1448
+ # We also strip trailing punctuation from glossary terms for comparison
1315
1449
  query = """
1316
1450
  SELECT
1317
1451
  t.id, t.source_term, t.target_term, t.termbase_id, t.priority,
@@ -1329,19 +1463,30 @@ class DatabaseManager:
1329
1463
  LOWER(t.source_term) = LOWER(?) OR
1330
1464
  LOWER(t.source_term) LIKE LOWER(?) OR
1331
1465
  LOWER(t.source_term) LIKE LOWER(?) OR
1332
- LOWER(t.source_term) LIKE LOWER(?)
1466
+ LOWER(t.source_term) LIKE LOWER(?) OR
1467
+ LOWER(RTRIM(t.source_term, '.!?,;:')) = LOWER(?) OR
1468
+ LOWER(?) LIKE LOWER(t.source_term) || '%' OR
1469
+ LOWER(?) = LOWER(RTRIM(t.source_term, '.!?,;:'))
1333
1470
  )
1334
1471
  AND (ta.is_active = 1 OR tb.is_project_termbase = 1)
1335
1472
  """
1336
- # Exact match, word at start, word at end, word in middle
1337
- # Use LOWER() for case-insensitive matching (handles "Edelmetalen" = "edelmetalen")
1338
- # IMPORTANT: project_id must be first param for the LEFT JOIN ta.project_id = ? above
1473
+ # Matching patterns:
1474
+ # 1. Exact match: source_term = search_term
1475
+ # 2. Glossary term starts with search: source_term LIKE "search_term %"
1476
+ # 3. Glossary term ends with search: source_term LIKE "% search_term"
1477
+ # 4. Glossary term contains search: source_term LIKE "% search_term %"
1478
+ # 5. Glossary term (stripped) = search_term: RTRIM(source_term) = search_term (handles "ca." = "ca")
1479
+ # 6. Search starts with glossary term: search_term LIKE source_term || '%'
1480
+ # 7. Search = glossary term stripped: search_term = RTRIM(source_term)
1339
1481
  params = [
1340
1482
  project_id if project_id else 0, # Use 0 if no project (won't match any activation records)
1341
1483
  search_term,
1342
1484
  f"{search_term} %",
1343
1485
  f"% {search_term}",
1344
- f"% {search_term} %"
1486
+ f"% {search_term} %",
1487
+ search_term, # For RTRIM comparison
1488
+ search_term, # For reverse LIKE
1489
+ search_term # For reverse RTRIM comparison
1345
1490
  ]
1346
1491
 
1347
1492
  # Language filters - if term has no language, use termbase language for filtering
@@ -301,6 +301,10 @@ class KeyboardShortcutsWidget(QWidget):
301
301
 
302
302
  def load_shortcuts(self):
303
303
  """Load shortcuts into the table"""
304
+ # CRITICAL: Disable sorting during table modifications to prevent
305
+ # items from becoming disassociated from their rows (causes vanishing text bug)
306
+ self.table.setSortingEnabled(False)
307
+
304
308
  self.table.setRowCount(0)
305
309
 
306
310
  all_shortcuts = self.manager.get_all_shortcuts()
@@ -362,6 +366,9 @@ class KeyboardShortcutsWidget(QWidget):
362
366
  self.table.setItem(row, 4, status_item)
363
367
 
364
368
  row += 1
369
+
370
+ # Re-enable sorting after all modifications are complete
371
+ self.table.setSortingEnabled(True)
365
372
 
366
373
  def _on_enabled_changed(self, state):
367
374
  """Handle checkbox state change for enabling/disabling shortcuts"""
@@ -172,7 +172,7 @@ class NonTranslatablesManager:
172
172
  Initialize manager.
173
173
 
174
174
  Args:
175
- base_path: Base path for NT files (typically user_data/Translation_Resources/Non-translatables)
175
+ base_path: Base path for NT files (typically user_data/resources/non_translatables)
176
176
  log_callback: Optional logging function
177
177
  """
178
178
  self.base_path = Path(base_path)
@@ -29,7 +29,7 @@ class PromptLibraryMigration:
29
29
  def __init__(self, prompt_library_dir: str, log_callback=None):
30
30
  """
31
31
  Args:
32
- prompt_library_dir: Path to user_data/Prompt_Library
32
+ prompt_library_dir: Path to user_data/prompt_library
33
33
  log_callback: Function for logging
34
34
  """
35
35
  self.prompt_library_dir = Path(prompt_library_dir)
modules/setup_wizard.py CHANGED
@@ -80,17 +80,17 @@ class SetupWizard:
80
80
  "Supervertaler will create the following structure:\n\n"
81
81
  f"{self.selected_path}\n"
82
82
  f" ├── api_keys.txt\n"
83
- f" ├── Prompt_Library/\n"
83
+ f" ├── prompt_library/\n"
84
84
  f" │ ├── 1_System_Prompts/\n"
85
85
  f" │ ├── 2_Domain_Prompts/\n"
86
86
  f" │ ├── 3_Project_Prompts/\n"
87
87
  f" │ └── 4_Style_Guides/\n"
88
- f" ├── Translation_Resources/\n"
88
+ f" ├── resources/\n"
89
89
  f" │ ├── TMs/\n"
90
90
  f" │ ├── Glossaries/\n"
91
- f" │ ├── Non-translatables/\n"
92
- f" │ └── Segmentation_rules/\n"
93
- f" └── Projects/\n\n"
91
+ f" │ ├── non_translatables/\n"
92
+ f" │ └── segmentation_rules/\n"
93
+ f" └── projects/\n\n"
94
94
  "Is this correct?"
95
95
  )
96
96
 
@@ -140,9 +140,9 @@ class SetupWizard:
140
140
  f"Your data folder: {self.selected_path}\n\n"
141
141
  f"Created:\n"
142
142
  f" • api_keys.txt (add your API keys here)\n"
143
- f" • Prompt_Library/ (your prompts)\n"
144
- f" • Translation_Resources/ (TMs, glossaries)\n"
145
- f" • Projects/ (your work)\n\n"
143
+ f" • prompt_library/ (your prompts)\n"
144
+ f" • resources/ (TMs, glossaries)\n"
145
+ f" • projects/ (your work)\n\n"
146
146
  f"All your translation memories, prompts, and projects\n"
147
147
  f"will be stored in this location."
148
148
  )
@@ -18,9 +18,15 @@ class ShortcutManager:
18
18
  "file_new": {
19
19
  "category": "File",
20
20
  "description": "New Project",
21
- "default": "Ctrl+N",
21
+ "default": "",
22
22
  "action": "new_project"
23
23
  },
24
+ "editor_focus_notes": {
25
+ "category": "Edit",
26
+ "description": "Focus Segment Note Tab (Ctrl+N)",
27
+ "default": "Ctrl+N",
28
+ "action": "focus_segment_notes"
29
+ },
24
30
  "file_open": {
25
31
  "category": "File",
26
32
  "description": "Open Project",
@@ -542,6 +548,28 @@ class ShortcutManager:
542
548
  "action": "copy_source_to_target",
543
549
  "context": "grid_editor"
544
550
  },
551
+ "editor_add_to_dictionary": {
552
+ "category": "Editor",
553
+ "description": "Add word at cursor to custom dictionary",
554
+ "default": "Alt+D",
555
+ "action": "add_word_to_dictionary",
556
+ "context": "grid_editor"
557
+ },
558
+ "editor_open_quickmenu": {
559
+ "category": "Editor",
560
+ "description": "Open QuickMenu for AI prompt actions",
561
+ "default": "Alt+K",
562
+ "action": "open_quickmenu",
563
+ "context": "grid_editor"
564
+ },
565
+ "editor_show_context_menu_double_shift": {
566
+ "category": "Editor",
567
+ "description": "Show context menu (double-tap Shift)",
568
+ "default": "", # Requires AutoHotkey script: supervertaler_hotkeys.ahk
569
+ "action": "show_context_menu_double_shift",
570
+ "context": "grid_editor",
571
+ "note": "Requires AutoHotkey. Run supervertaler_hotkeys.ahk for this feature."
572
+ },
545
573
 
546
574
  # Filter Operations
547
575
  "filter_selected_text": {
modules/superbrowser.py CHANGED
@@ -45,11 +45,12 @@ def _clear_corrupted_cache(storage_path: str):
45
45
  class ChatColumn(QWidget):
46
46
  """A column containing a chat interface with web browser"""
47
47
 
48
- def __init__(self, title, url, header_color, parent=None):
48
+ def __init__(self, title, url, header_color, parent=None, user_data_path=None):
49
49
  super().__init__(parent)
50
50
  self.title = title
51
51
  self.url = url
52
52
  self.header_color = header_color
53
+ self.user_data_path = user_data_path # Store user data path
53
54
  self.init_ui()
54
55
 
55
56
  def init_ui(self):
@@ -102,12 +103,14 @@ class ChatColumn(QWidget):
102
103
  profile_name = f"superbrowser_{self.title.lower()}"
103
104
  self.profile = QWebEngineProfile(profile_name, self)
104
105
 
105
- # Set persistent storage path (use same pattern as main app)
106
- # Check if running in dev mode (private data folder)
107
- dev_mode_marker = os.path.join(os.path.dirname(__file__), "..", ".supervertaler.local")
108
- base_folder = "user_data_private" if os.path.exists(dev_mode_marker) else "user_data"
109
-
110
- storage_path = os.path.join(os.path.dirname(__file__), "..", base_folder, "superbrowser_profiles", profile_name)
106
+ # Set persistent storage path using user_data_path from parent
107
+ if self.user_data_path:
108
+ storage_path = os.path.join(str(self.user_data_path), "superbrowser_profiles", profile_name)
109
+ else:
110
+ # Fallback to script directory if user_data_path not provided
111
+ dev_mode_marker = os.path.join(os.path.dirname(__file__), "..", ".supervertaler.local")
112
+ base_folder = "user_data_private" if os.path.exists(dev_mode_marker) else "user_data"
113
+ storage_path = os.path.join(os.path.dirname(__file__), "..", base_folder, "superbrowser_profiles", profile_name)
111
114
  os.makedirs(storage_path, exist_ok=True)
112
115
 
113
116
  # Clear potentially corrupted cache to prevent Chromium errors
@@ -166,9 +169,10 @@ class SuperbrowserWidget(QWidget):
166
169
  and concurrent interaction with different AI models.
167
170
  """
168
171
 
169
- def __init__(self, parent=None):
172
+ def __init__(self, parent=None, user_data_path=None):
170
173
  super().__init__(parent)
171
174
  self.parent_window = parent
175
+ self.user_data_path = user_data_path # Store user data path for profiles
172
176
 
173
177
  # Default URLs for AI chat interfaces
174
178
  self.chatgpt_url = "https://chatgpt.com/"
@@ -257,10 +261,10 @@ class SuperbrowserWidget(QWidget):
257
261
  splitter = QSplitter(Qt.Orientation.Horizontal)
258
262
  splitter.setHandleWidth(3)
259
263
 
260
- # Create chat columns
261
- self.chatgpt_column = ChatColumn("ChatGPT", self.chatgpt_url, "#10a37f", self)
262
- self.claude_column = ChatColumn("Claude", self.claude_url, "#c17c4f", self)
263
- self.gemini_column = ChatColumn("Gemini", self.gemini_url, "#4285f4", self)
264
+ # Create chat columns - pass user_data_path for profile storage
265
+ self.chatgpt_column = ChatColumn("ChatGPT", self.chatgpt_url, "#10a37f", self, user_data_path=self.user_data_path)
266
+ self.claude_column = ChatColumn("Claude", self.claude_url, "#c17c4f", self, user_data_path=self.user_data_path)
267
+ self.gemini_column = ChatColumn("Gemini", self.gemini_url, "#4285f4", self, user_data_path=self.user_data_path)
264
268
 
265
269
  # Add columns to splitter
266
270
  splitter.addWidget(self.chatgpt_column)