supervertaler 1.9.190__py3-none-any.whl → 1.9.198__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Supervertaler.py +51253 -50498
- modules/keyboard_shortcuts_widget.py +76 -8
- modules/llm_clients.py +58 -33
- modules/quicktrans.py +670 -0
- modules/shortcut_manager.py +19 -5
- modules/statuses.py +2 -2
- modules/superlookup.py +3 -3
- modules/unified_prompt_manager_qt.py +22 -1
- {supervertaler-1.9.190.dist-info → supervertaler-1.9.198.dist-info}/METADATA +1 -1
- {supervertaler-1.9.190.dist-info → supervertaler-1.9.198.dist-info}/RECORD +14 -13
- {supervertaler-1.9.190.dist-info → supervertaler-1.9.198.dist-info}/WHEEL +0 -0
- {supervertaler-1.9.190.dist-info → supervertaler-1.9.198.dist-info}/entry_points.txt +0 -0
- {supervertaler-1.9.190.dist-info → supervertaler-1.9.198.dist-info}/licenses/LICENSE +0 -0
- {supervertaler-1.9.190.dist-info → supervertaler-1.9.198.dist-info}/top_level.txt +0 -0
|
@@ -5,16 +5,85 @@ Provides UI for viewing, editing, and managing keyboard shortcuts
|
|
|
5
5
|
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
from PyQt6.QtWidgets import (
|
|
8
|
-
QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QTableWidget,
|
|
9
|
-
QTableWidgetItem, QHeaderView, QLineEdit, QLabel, QDialog,
|
|
10
|
-
QDialogButtonBox, QMessageBox, QFileDialog, QGroupBox, QCheckBox
|
|
8
|
+
QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QTableWidget,
|
|
9
|
+
QTableWidgetItem, QHeaderView, QLineEdit, QLabel, QDialog,
|
|
10
|
+
QDialogButtonBox, QMessageBox, QFileDialog, QGroupBox, QCheckBox,
|
|
11
|
+
QStyleOptionButton
|
|
11
12
|
)
|
|
12
|
-
from PyQt6.QtCore import Qt, QEvent
|
|
13
|
-
from PyQt6.QtGui import QKeySequence, QKeyEvent, QFont
|
|
13
|
+
from PyQt6.QtCore import Qt, QEvent, QPointF, QRect
|
|
14
|
+
from PyQt6.QtGui import QKeySequence, QKeyEvent, QFont, QPainter, QPen, QColor
|
|
14
15
|
|
|
15
16
|
from modules.shortcut_manager import ShortcutManager
|
|
16
17
|
|
|
17
18
|
|
|
19
|
+
class CheckmarkCheckBox(QCheckBox):
|
|
20
|
+
"""Custom checkbox with green background and white checkmark when checked"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, text="", parent=None):
|
|
23
|
+
super().__init__(text, parent)
|
|
24
|
+
self.setCheckable(True)
|
|
25
|
+
self.setEnabled(True)
|
|
26
|
+
self.setStyleSheet("""
|
|
27
|
+
QCheckBox {
|
|
28
|
+
font-size: 9pt;
|
|
29
|
+
spacing: 6px;
|
|
30
|
+
}
|
|
31
|
+
QCheckBox::indicator {
|
|
32
|
+
width: 16px;
|
|
33
|
+
height: 16px;
|
|
34
|
+
border: 2px solid #999;
|
|
35
|
+
border-radius: 3px;
|
|
36
|
+
background-color: white;
|
|
37
|
+
}
|
|
38
|
+
QCheckBox::indicator:checked {
|
|
39
|
+
background-color: #4CAF50;
|
|
40
|
+
border-color: #4CAF50;
|
|
41
|
+
}
|
|
42
|
+
QCheckBox::indicator:hover {
|
|
43
|
+
border-color: #666;
|
|
44
|
+
}
|
|
45
|
+
QCheckBox::indicator:checked:hover {
|
|
46
|
+
background-color: #45a049;
|
|
47
|
+
border-color: #45a049;
|
|
48
|
+
}
|
|
49
|
+
""")
|
|
50
|
+
|
|
51
|
+
def paintEvent(self, event):
|
|
52
|
+
"""Override paint event to draw white checkmark when checked"""
|
|
53
|
+
super().paintEvent(event)
|
|
54
|
+
|
|
55
|
+
if self.isChecked():
|
|
56
|
+
opt = QStyleOptionButton()
|
|
57
|
+
self.initStyleOption(opt)
|
|
58
|
+
indicator_rect = self.style().subElementRect(
|
|
59
|
+
self.style().SubElement.SE_CheckBoxIndicator,
|
|
60
|
+
opt,
|
|
61
|
+
self
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
if indicator_rect.isValid():
|
|
65
|
+
# Draw white checkmark
|
|
66
|
+
painter = QPainter(self)
|
|
67
|
+
painter.setRenderHint(QPainter.RenderHint.Antialiasing)
|
|
68
|
+
pen_width = max(2.0, min(indicator_rect.width(), indicator_rect.height()) * 0.12)
|
|
69
|
+
painter.setPen(QPen(QColor(255, 255, 255), pen_width, Qt.PenStyle.SolidLine, Qt.PenCapStyle.RoundCap, Qt.PenJoinStyle.RoundJoin))
|
|
70
|
+
|
|
71
|
+
# Draw checkmark (✓ shape)
|
|
72
|
+
x = indicator_rect.x()
|
|
73
|
+
y = indicator_rect.y()
|
|
74
|
+
w = indicator_rect.width()
|
|
75
|
+
h = indicator_rect.height()
|
|
76
|
+
|
|
77
|
+
# Checkmark coordinates (relative to indicator)
|
|
78
|
+
p1 = QPointF(x + w * 0.20, y + h * 0.50) # Start left
|
|
79
|
+
p2 = QPointF(x + w * 0.40, y + h * 0.70) # Bottom middle
|
|
80
|
+
p3 = QPointF(x + w * 0.80, y + h * 0.30) # End right-top
|
|
81
|
+
|
|
82
|
+
painter.drawLine(p1, p2)
|
|
83
|
+
painter.drawLine(p2, p3)
|
|
84
|
+
painter.end()
|
|
85
|
+
|
|
86
|
+
|
|
18
87
|
class KeySequenceEdit(QLineEdit):
|
|
19
88
|
"""Custom widget for capturing keyboard shortcuts"""
|
|
20
89
|
|
|
@@ -317,10 +386,9 @@ class KeyboardShortcutsWidget(QWidget):
|
|
|
317
386
|
for shortcut_id, data in sorted(shortcuts, key=lambda x: x[1]["description"]):
|
|
318
387
|
self.table.insertRow(row)
|
|
319
388
|
|
|
320
|
-
# Enabled checkbox (column 0)
|
|
321
|
-
checkbox =
|
|
389
|
+
# Enabled checkbox (column 0) - using green checkmark style
|
|
390
|
+
checkbox = CheckmarkCheckBox()
|
|
322
391
|
checkbox.setChecked(data.get("is_enabled", True))
|
|
323
|
-
checkbox.setStyleSheet("margin-left: 10px;")
|
|
324
392
|
checkbox.setToolTip("Enable or disable this shortcut")
|
|
325
393
|
# Store shortcut_id in checkbox for reference
|
|
326
394
|
checkbox.setProperty("shortcut_id", shortcut_id)
|
modules/llm_clients.py
CHANGED
|
@@ -594,18 +594,20 @@ class LLMClient:
|
|
|
594
594
|
context: Optional[str] = None,
|
|
595
595
|
custom_prompt: Optional[str] = None,
|
|
596
596
|
max_tokens: Optional[int] = None,
|
|
597
|
-
images: Optional[List] = None
|
|
597
|
+
images: Optional[List] = None,
|
|
598
|
+
system_prompt: Optional[str] = None
|
|
598
599
|
) -> str:
|
|
599
600
|
"""
|
|
600
601
|
Translate text using configured LLM
|
|
601
|
-
|
|
602
|
+
|
|
602
603
|
Args:
|
|
603
604
|
text: Text to translate
|
|
604
605
|
source_lang: Source language code
|
|
605
606
|
target_lang: Target language code
|
|
606
607
|
context: Optional context for translation
|
|
607
608
|
custom_prompt: Optional custom prompt (overrides default simple prompt)
|
|
608
|
-
|
|
609
|
+
system_prompt: Optional system prompt for AI behavior context
|
|
610
|
+
|
|
609
611
|
Returns:
|
|
610
612
|
Translated text
|
|
611
613
|
"""
|
|
@@ -615,30 +617,30 @@ class LLMClient:
|
|
|
615
617
|
else:
|
|
616
618
|
# Build prompt
|
|
617
619
|
prompt = f"Translate the following text from {source_lang} to {target_lang}:\n\n{text}"
|
|
618
|
-
|
|
620
|
+
|
|
619
621
|
if context:
|
|
620
622
|
prompt = f"Context: {context}\n\n{prompt}"
|
|
621
|
-
|
|
623
|
+
|
|
622
624
|
# Log warning if images provided but model doesn't support vision
|
|
623
625
|
if images and not self.model_supports_vision(self.provider, self.model):
|
|
624
626
|
print(f"⚠️ Warning: Model {self.model} doesn't support vision. Images will be ignored.")
|
|
625
627
|
images = None # Don't pass to API
|
|
626
|
-
|
|
628
|
+
|
|
627
629
|
# Call appropriate provider
|
|
628
630
|
if self.provider == "openai":
|
|
629
|
-
return self._call_openai(prompt, max_tokens=max_tokens, images=images)
|
|
631
|
+
return self._call_openai(prompt, max_tokens=max_tokens, images=images, system_prompt=system_prompt)
|
|
630
632
|
elif self.provider == "claude":
|
|
631
|
-
return self._call_claude(prompt, max_tokens=max_tokens, images=images)
|
|
633
|
+
return self._call_claude(prompt, max_tokens=max_tokens, images=images, system_prompt=system_prompt)
|
|
632
634
|
elif self.provider == "gemini":
|
|
633
|
-
return self._call_gemini(prompt, max_tokens=max_tokens, images=images)
|
|
635
|
+
return self._call_gemini(prompt, max_tokens=max_tokens, images=images, system_prompt=system_prompt)
|
|
634
636
|
elif self.provider == "ollama":
|
|
635
|
-
return self._call_ollama(prompt, max_tokens=max_tokens)
|
|
637
|
+
return self._call_ollama(prompt, max_tokens=max_tokens, system_prompt=system_prompt)
|
|
636
638
|
else:
|
|
637
639
|
raise ValueError(f"Unsupported provider: {self.provider}")
|
|
638
640
|
|
|
639
|
-
def _call_openai(self, prompt: str, max_tokens: Optional[int] = None, images: Optional[List] = None) -> str:
|
|
641
|
+
def _call_openai(self, prompt: str, max_tokens: Optional[int] = None, images: Optional[List] = None, system_prompt: Optional[str] = None) -> str:
|
|
640
642
|
"""Call OpenAI API with GPT-5/o1/o3 reasoning model support and vision capability"""
|
|
641
|
-
print(f"🔵 _call_openai START: model={self.model}, prompt_len={len(prompt)}, max_tokens={max_tokens}, images={len(images) if images else 0}")
|
|
643
|
+
print(f"🔵 _call_openai START: model={self.model}, prompt_len={len(prompt)}, max_tokens={max_tokens}, images={len(images) if images else 0}, has_system={bool(system_prompt)}")
|
|
642
644
|
|
|
643
645
|
try:
|
|
644
646
|
from openai import OpenAI
|
|
@@ -686,10 +688,16 @@ class LLMClient:
|
|
|
686
688
|
# Standard text-only format
|
|
687
689
|
content = prompt
|
|
688
690
|
|
|
691
|
+
# Build messages list
|
|
692
|
+
messages = []
|
|
693
|
+
if system_prompt:
|
|
694
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
695
|
+
messages.append({"role": "user", "content": content})
|
|
696
|
+
|
|
689
697
|
# Build API call parameters
|
|
690
698
|
api_params = {
|
|
691
699
|
"model": self.model,
|
|
692
|
-
"messages":
|
|
700
|
+
"messages": messages,
|
|
693
701
|
"timeout": timeout_seconds
|
|
694
702
|
}
|
|
695
703
|
|
|
@@ -742,7 +750,7 @@ class LLMClient:
|
|
|
742
750
|
print(f" Response: {e.response}")
|
|
743
751
|
raise # Re-raise to be caught by calling code
|
|
744
752
|
|
|
745
|
-
def _call_claude(self, prompt: str, max_tokens: Optional[int] = None, images: Optional[List] = None) -> str:
|
|
753
|
+
def _call_claude(self, prompt: str, max_tokens: Optional[int] = None, images: Optional[List] = None, system_prompt: Optional[str] = None) -> str:
|
|
746
754
|
"""Call Anthropic Claude API with vision support"""
|
|
747
755
|
try:
|
|
748
756
|
import anthropic
|
|
@@ -786,12 +794,19 @@ class LLMClient:
|
|
|
786
794
|
# Standard text-only format
|
|
787
795
|
content = prompt
|
|
788
796
|
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
797
|
+
# Build API call parameters
|
|
798
|
+
api_params = {
|
|
799
|
+
"model": self.model,
|
|
800
|
+
"max_tokens": tokens_to_use,
|
|
801
|
+
"messages": [{"role": "user", "content": content}],
|
|
802
|
+
"timeout": timeout_seconds # Explicit timeout
|
|
803
|
+
}
|
|
804
|
+
|
|
805
|
+
# Add system prompt if provided (Claude uses 'system' parameter, not a message)
|
|
806
|
+
if system_prompt:
|
|
807
|
+
api_params["system"] = system_prompt
|
|
808
|
+
|
|
809
|
+
response = client.messages.create(**api_params)
|
|
795
810
|
|
|
796
811
|
translation = response.content[0].text.strip()
|
|
797
812
|
|
|
@@ -800,7 +815,7 @@ class LLMClient:
|
|
|
800
815
|
|
|
801
816
|
return translation
|
|
802
817
|
|
|
803
|
-
def _call_gemini(self, prompt: str, max_tokens: Optional[int] = None, images: Optional[List] = None) -> str:
|
|
818
|
+
def _call_gemini(self, prompt: str, max_tokens: Optional[int] = None, images: Optional[List] = None, system_prompt: Optional[str] = None) -> str:
|
|
804
819
|
"""Call Google Gemini API with vision support"""
|
|
805
820
|
try:
|
|
806
821
|
import google.generativeai as genai
|
|
@@ -809,10 +824,15 @@ class LLMClient:
|
|
|
809
824
|
raise ImportError(
|
|
810
825
|
"Google AI library not installed. Install with: pip install google-generativeai pillow"
|
|
811
826
|
)
|
|
812
|
-
|
|
827
|
+
|
|
813
828
|
genai.configure(api_key=self.api_key)
|
|
814
|
-
|
|
815
|
-
|
|
829
|
+
|
|
830
|
+
# Gemini supports system instructions via GenerativeModel parameter
|
|
831
|
+
if system_prompt:
|
|
832
|
+
model = genai.GenerativeModel(self.model, system_instruction=system_prompt)
|
|
833
|
+
else:
|
|
834
|
+
model = genai.GenerativeModel(self.model)
|
|
835
|
+
|
|
816
836
|
# Build content (text + optional images)
|
|
817
837
|
if images:
|
|
818
838
|
# Gemini format: list with prompt text followed by PIL Image objects
|
|
@@ -823,7 +843,7 @@ class LLMClient:
|
|
|
823
843
|
else:
|
|
824
844
|
# Standard text-only
|
|
825
845
|
content = prompt
|
|
826
|
-
|
|
846
|
+
|
|
827
847
|
response = model.generate_content(content)
|
|
828
848
|
translation = response.text.strip()
|
|
829
849
|
|
|
@@ -832,20 +852,21 @@ class LLMClient:
|
|
|
832
852
|
|
|
833
853
|
return translation
|
|
834
854
|
|
|
835
|
-
def _call_ollama(self, prompt: str, max_tokens: Optional[int] = None) -> str:
|
|
855
|
+
def _call_ollama(self, prompt: str, max_tokens: Optional[int] = None, system_prompt: Optional[str] = None) -> str:
|
|
836
856
|
"""
|
|
837
857
|
Call local Ollama server for translation.
|
|
838
|
-
|
|
858
|
+
|
|
839
859
|
Ollama provides a simple REST API compatible with local LLM inference.
|
|
840
860
|
Models run entirely on the user's computer - no API keys, no internet required.
|
|
841
|
-
|
|
861
|
+
|
|
842
862
|
Args:
|
|
843
863
|
prompt: The full prompt to send
|
|
844
864
|
max_tokens: Maximum tokens to generate (default: 4096)
|
|
845
|
-
|
|
865
|
+
system_prompt: Optional system prompt for AI behavior context
|
|
866
|
+
|
|
846
867
|
Returns:
|
|
847
868
|
Translated text
|
|
848
|
-
|
|
869
|
+
|
|
849
870
|
Raises:
|
|
850
871
|
ConnectionError: If Ollama is not running
|
|
851
872
|
ValueError: If model is not available
|
|
@@ -866,13 +887,17 @@ class LLMClient:
|
|
|
866
887
|
print(f"🟠 _call_ollama START: model={self.model}, prompt_len={len(prompt)}, max_tokens={tokens_to_use}")
|
|
867
888
|
print(f"🟠 Ollama endpoint: {endpoint}")
|
|
868
889
|
|
|
890
|
+
# Build messages list
|
|
891
|
+
messages = []
|
|
892
|
+
if system_prompt:
|
|
893
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
894
|
+
messages.append({"role": "user", "content": prompt})
|
|
895
|
+
|
|
869
896
|
# Build request payload
|
|
870
897
|
# Using /api/chat for chat-style interaction (better for translation prompts)
|
|
871
898
|
payload = {
|
|
872
899
|
"model": self.model,
|
|
873
|
-
"messages":
|
|
874
|
-
{"role": "user", "content": prompt}
|
|
875
|
-
],
|
|
900
|
+
"messages": messages,
|
|
876
901
|
"stream": False, # Get complete response at once
|
|
877
902
|
"options": {
|
|
878
903
|
"temperature": 0.3, # Low temperature for consistent translations
|