supervertaler 1.9.163__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- Supervertaler.py +48473 -0
- modules/__init__.py +10 -0
- modules/ai_actions.py +964 -0
- modules/ai_attachment_manager.py +343 -0
- modules/ai_file_viewer_dialog.py +210 -0
- modules/autofingers_engine.py +466 -0
- modules/cafetran_docx_handler.py +379 -0
- modules/config_manager.py +469 -0
- modules/database_manager.py +1911 -0
- modules/database_migrations.py +417 -0
- modules/dejavurtf_handler.py +779 -0
- modules/document_analyzer.py +427 -0
- modules/docx_handler.py +689 -0
- modules/encoding_repair.py +319 -0
- modules/encoding_repair_Qt.py +393 -0
- modules/encoding_repair_ui.py +481 -0
- modules/feature_manager.py +350 -0
- modules/figure_context_manager.py +340 -0
- modules/file_dialog_helper.py +148 -0
- modules/find_replace.py +164 -0
- modules/find_replace_qt.py +457 -0
- modules/glossary_manager.py +433 -0
- modules/image_extractor.py +188 -0
- modules/keyboard_shortcuts_widget.py +571 -0
- modules/llm_clients.py +1211 -0
- modules/llm_leaderboard.py +737 -0
- modules/llm_superbench_ui.py +1401 -0
- modules/local_llm_setup.py +1104 -0
- modules/model_update_dialog.py +381 -0
- modules/model_version_checker.py +373 -0
- modules/mqxliff_handler.py +638 -0
- modules/non_translatables_manager.py +743 -0
- modules/pdf_rescue_Qt.py +1822 -0
- modules/pdf_rescue_tkinter.py +909 -0
- modules/phrase_docx_handler.py +516 -0
- modules/project_home_panel.py +209 -0
- modules/prompt_assistant.py +357 -0
- modules/prompt_library.py +689 -0
- modules/prompt_library_migration.py +447 -0
- modules/quick_access_sidebar.py +282 -0
- modules/ribbon_widget.py +597 -0
- modules/sdlppx_handler.py +874 -0
- modules/setup_wizard.py +353 -0
- modules/shortcut_manager.py +932 -0
- modules/simple_segmenter.py +128 -0
- modules/spellcheck_manager.py +727 -0
- modules/statuses.py +207 -0
- modules/style_guide_manager.py +315 -0
- modules/superbench_ui.py +1319 -0
- modules/superbrowser.py +329 -0
- modules/supercleaner.py +600 -0
- modules/supercleaner_ui.py +444 -0
- modules/superdocs.py +19 -0
- modules/superdocs_viewer_qt.py +382 -0
- modules/superlookup.py +252 -0
- modules/tag_cleaner.py +260 -0
- modules/tag_manager.py +351 -0
- modules/term_extractor.py +270 -0
- modules/termbase_entry_editor.py +842 -0
- modules/termbase_import_export.py +488 -0
- modules/termbase_manager.py +1060 -0
- modules/termview_widget.py +1176 -0
- modules/theme_manager.py +499 -0
- modules/tm_editor_dialog.py +99 -0
- modules/tm_manager_qt.py +1280 -0
- modules/tm_metadata_manager.py +545 -0
- modules/tmx_editor.py +1461 -0
- modules/tmx_editor_qt.py +2784 -0
- modules/tmx_generator.py +284 -0
- modules/tracked_changes.py +900 -0
- modules/trados_docx_handler.py +430 -0
- modules/translation_memory.py +715 -0
- modules/translation_results_panel.py +2134 -0
- modules/translation_services.py +282 -0
- modules/unified_prompt_library.py +659 -0
- modules/unified_prompt_manager_qt.py +3951 -0
- modules/voice_commands.py +920 -0
- modules/voice_dictation.py +477 -0
- modules/voice_dictation_lite.py +249 -0
- supervertaler-1.9.163.dist-info/METADATA +906 -0
- supervertaler-1.9.163.dist-info/RECORD +85 -0
- supervertaler-1.9.163.dist-info/WHEEL +5 -0
- supervertaler-1.9.163.dist-info/entry_points.txt +2 -0
- supervertaler-1.9.163.dist-info/licenses/LICENSE +21 -0
- supervertaler-1.9.163.dist-info/top_level.txt +2 -0
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Project Home Panel - Collapsible sidebar like memoQ's Project Home
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from PyQt6.QtWidgets import (
|
|
6
|
+
QWidget, QVBoxLayout, QHBoxLayout, QPushButton, QLabel,
|
|
7
|
+
QFrame, QListWidget, QListWidgetItem, QScrollArea
|
|
8
|
+
)
|
|
9
|
+
from PyQt6.QtCore import Qt, pyqtSignal, QPropertyAnimation, QRect, QEasingCurve
|
|
10
|
+
from PyQt6.QtGui import QColor, QFont, QIcon
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ProjectHomeItem(QWidget):
|
|
14
|
+
"""Individual item in the Project Home panel"""
|
|
15
|
+
|
|
16
|
+
clicked = pyqtSignal(str)
|
|
17
|
+
|
|
18
|
+
def __init__(self, icon: str, text: str, item_id: str, parent=None):
|
|
19
|
+
super().__init__(parent)
|
|
20
|
+
self.item_id = item_id
|
|
21
|
+
|
|
22
|
+
layout = QHBoxLayout(self)
|
|
23
|
+
layout.setContentsMargins(12, 8, 12, 8)
|
|
24
|
+
layout.setSpacing(10)
|
|
25
|
+
|
|
26
|
+
# Icon/emoji
|
|
27
|
+
icon_label = QLabel(icon)
|
|
28
|
+
icon_label.setStyleSheet("font-size: 18px; background: transparent;")
|
|
29
|
+
icon_label.setMaximumWidth(30)
|
|
30
|
+
layout.addWidget(icon_label)
|
|
31
|
+
|
|
32
|
+
# Text
|
|
33
|
+
text_label = QLabel(text)
|
|
34
|
+
text_label.setStyleSheet("""
|
|
35
|
+
QLabel {
|
|
36
|
+
color: #2c3e50;
|
|
37
|
+
background: transparent;
|
|
38
|
+
font-size: 11px;
|
|
39
|
+
font-weight: 500;
|
|
40
|
+
}
|
|
41
|
+
""")
|
|
42
|
+
layout.addWidget(text_label, stretch=1)
|
|
43
|
+
|
|
44
|
+
# Make the whole widget clickable
|
|
45
|
+
self.setCursor(Qt.CursorShape.PointingHandCursor)
|
|
46
|
+
self.setStyleSheet("""
|
|
47
|
+
ProjectHomeItem {
|
|
48
|
+
background: transparent;
|
|
49
|
+
border-radius: 4px;
|
|
50
|
+
}
|
|
51
|
+
ProjectHomeItem:hover {
|
|
52
|
+
background: rgba(0, 0, 0, 0.05);
|
|
53
|
+
}
|
|
54
|
+
""")
|
|
55
|
+
|
|
56
|
+
def mousePressEvent(self, event):
|
|
57
|
+
"""Emit signal when clicked"""
|
|
58
|
+
self.clicked.emit(self.item_id)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class ProjectHomePanel(QWidget):
|
|
62
|
+
"""Collapsible Project Home panel similar to memoQ"""
|
|
63
|
+
|
|
64
|
+
# Signals
|
|
65
|
+
item_selected = pyqtSignal(str) # Emits item ID
|
|
66
|
+
|
|
67
|
+
def __init__(self, parent=None):
|
|
68
|
+
super().__init__(parent)
|
|
69
|
+
self.is_expanded = False
|
|
70
|
+
self.setup_ui()
|
|
71
|
+
|
|
72
|
+
def setup_ui(self):
|
|
73
|
+
"""Setup the UI"""
|
|
74
|
+
main_layout = QVBoxLayout(self)
|
|
75
|
+
main_layout.setContentsMargins(0, 0, 0, 0)
|
|
76
|
+
main_layout.setSpacing(0)
|
|
77
|
+
|
|
78
|
+
# Header with tab
|
|
79
|
+
header = QFrame()
|
|
80
|
+
header.setStyleSheet("""
|
|
81
|
+
QFrame {
|
|
82
|
+
background: #f0f0f0;
|
|
83
|
+
border-right: 1px solid #cccccc;
|
|
84
|
+
}
|
|
85
|
+
""")
|
|
86
|
+
header_layout = QVBoxLayout(header)
|
|
87
|
+
header_layout.setContentsMargins(0, 0, 0, 0)
|
|
88
|
+
header_layout.setSpacing(0)
|
|
89
|
+
|
|
90
|
+
# Tab button (left edge)
|
|
91
|
+
tab_btn = QPushButton("PROJECT\nHOME")
|
|
92
|
+
tab_btn.setMaximumWidth(60)
|
|
93
|
+
tab_btn.setMinimumHeight(80)
|
|
94
|
+
tab_btn.setStyleSheet("""
|
|
95
|
+
QPushButton {
|
|
96
|
+
background: #e8e8e8;
|
|
97
|
+
border: 1px solid #cccccc;
|
|
98
|
+
border-right: none;
|
|
99
|
+
color: #333333;
|
|
100
|
+
font-size: 9px;
|
|
101
|
+
font-weight: bold;
|
|
102
|
+
padding: 8px 4px;
|
|
103
|
+
}
|
|
104
|
+
QPushButton:hover {
|
|
105
|
+
background: #f5f5f5;
|
|
106
|
+
}
|
|
107
|
+
QPushButton:pressed {
|
|
108
|
+
background: #e0e0e0;
|
|
109
|
+
}
|
|
110
|
+
""")
|
|
111
|
+
tab_btn.clicked.connect(self.toggle_panel)
|
|
112
|
+
header_layout.addWidget(tab_btn)
|
|
113
|
+
header_layout.addStretch()
|
|
114
|
+
|
|
115
|
+
main_layout.addWidget(header)
|
|
116
|
+
|
|
117
|
+
# Panel content (initially hidden)
|
|
118
|
+
content = QFrame()
|
|
119
|
+
content.setMaximumWidth(0) # Start collapsed
|
|
120
|
+
content.setStyleSheet("""
|
|
121
|
+
QFrame {
|
|
122
|
+
background: white;
|
|
123
|
+
border-right: 1px solid #cccccc;
|
|
124
|
+
}
|
|
125
|
+
""")
|
|
126
|
+
self.content = content
|
|
127
|
+
|
|
128
|
+
content_layout = QVBoxLayout(content)
|
|
129
|
+
content_layout.setContentsMargins(0, 0, 0, 0)
|
|
130
|
+
content_layout.setSpacing(0)
|
|
131
|
+
|
|
132
|
+
# Scroll area for items
|
|
133
|
+
scroll = QScrollArea()
|
|
134
|
+
scroll.setWidgetResizable(True)
|
|
135
|
+
scroll.setStyleSheet("""
|
|
136
|
+
QScrollArea {
|
|
137
|
+
background: white;
|
|
138
|
+
border: none;
|
|
139
|
+
}
|
|
140
|
+
QScrollBar:vertical {
|
|
141
|
+
background: white;
|
|
142
|
+
width: 12px;
|
|
143
|
+
}
|
|
144
|
+
QScrollBar::handle:vertical {
|
|
145
|
+
background: #cccccc;
|
|
146
|
+
border-radius: 6px;
|
|
147
|
+
min-height: 20px;
|
|
148
|
+
}
|
|
149
|
+
QScrollBar::handle:vertical:hover {
|
|
150
|
+
background: #999999;
|
|
151
|
+
}
|
|
152
|
+
""")
|
|
153
|
+
|
|
154
|
+
# Container for items
|
|
155
|
+
items_container = QWidget()
|
|
156
|
+
items_layout = QVBoxLayout(items_container)
|
|
157
|
+
items_layout.setContentsMargins(0, 0, 0, 0)
|
|
158
|
+
items_layout.setSpacing(1)
|
|
159
|
+
|
|
160
|
+
# Add menu items
|
|
161
|
+
menu_items = [
|
|
162
|
+
("📋", "Overview", "overview"),
|
|
163
|
+
("📁", "Translations", "translations"),
|
|
164
|
+
("📚", "Live Docs", "live_docs"),
|
|
165
|
+
("🔄", "Translation Memories", "memories"),
|
|
166
|
+
("🏷️", "Termbases", "termbases"),
|
|
167
|
+
("🤖", "Muses", "muses"),
|
|
168
|
+
("⚙️", "Settings", "settings"),
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
self.items_map = {}
|
|
172
|
+
for icon, text, item_id in menu_items:
|
|
173
|
+
item = ProjectHomeItem(icon, text, item_id)
|
|
174
|
+
item.clicked.connect(self.on_item_clicked)
|
|
175
|
+
self.items_map[item_id] = item
|
|
176
|
+
items_layout.addWidget(item)
|
|
177
|
+
|
|
178
|
+
items_layout.addStretch()
|
|
179
|
+
scroll.setWidget(items_container)
|
|
180
|
+
content_layout.addWidget(scroll)
|
|
181
|
+
|
|
182
|
+
main_layout.addWidget(content)
|
|
183
|
+
|
|
184
|
+
# Animation for expand/collapse
|
|
185
|
+
self.animation = QPropertyAnimation(self.content, b"maximumWidth")
|
|
186
|
+
self.animation.setDuration(300)
|
|
187
|
+
self.animation.setEasingCurve(QEasingCurve.Type.InOutQuad)
|
|
188
|
+
|
|
189
|
+
def toggle_panel(self):
|
|
190
|
+
"""Toggle panel expanded/collapsed"""
|
|
191
|
+
self.is_expanded = not self.is_expanded
|
|
192
|
+
|
|
193
|
+
if self.is_expanded:
|
|
194
|
+
# Expand
|
|
195
|
+
self.animation.setEndValue(250) # Width when expanded
|
|
196
|
+
else:
|
|
197
|
+
# Collapse
|
|
198
|
+
self.animation.setEndValue(0) # Width when collapsed
|
|
199
|
+
|
|
200
|
+
self.animation.start()
|
|
201
|
+
|
|
202
|
+
def on_item_clicked(self, item_id: str):
|
|
203
|
+
"""Handle item click"""
|
|
204
|
+
self.item_selected.emit(item_id)
|
|
205
|
+
|
|
206
|
+
def setMaximumWidth(self, width: int):
|
|
207
|
+
"""Override to allow animation"""
|
|
208
|
+
super().setMaximumWidth(width)
|
|
209
|
+
self.content.setMaximumWidth(width - 60) # Account for tab width
|
|
@@ -0,0 +1,357 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AI Prompt Assistant Module
|
|
3
|
+
|
|
4
|
+
Provides AI-powered prompt modification through natural language conversation.
|
|
5
|
+
Part of Phase 1 implementation for v4.0.0-beta.
|
|
6
|
+
|
|
7
|
+
Features:
|
|
8
|
+
- Conversational prompt modification
|
|
9
|
+
- Visual diff generation
|
|
10
|
+
- Prompt versioning
|
|
11
|
+
- Chat history tracking
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import difflib
|
|
15
|
+
import json
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
from typing import Dict, List, Optional, Tuple
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class PromptAssistant:
|
|
21
|
+
"""AI-powered prompt modification and learning system"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, llm_client=None):
|
|
24
|
+
"""
|
|
25
|
+
Initialize the Prompt Assistant.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
llm_client: LLM client instance (OpenAI, Anthropic, or Google)
|
|
29
|
+
"""
|
|
30
|
+
self.llm = llm_client
|
|
31
|
+
self.chat_history = []
|
|
32
|
+
self.modification_history = [] # Track all prompt versions
|
|
33
|
+
|
|
34
|
+
# System prompt for the AI prompt engineer
|
|
35
|
+
self.system_prompt = """You are an expert prompt engineer specialising in translation and localisation prompts.
|
|
36
|
+
|
|
37
|
+
Your role is to help users refine and optimise their translation prompts based on their specific needs.
|
|
38
|
+
|
|
39
|
+
When a user requests a modification to a prompt:
|
|
40
|
+
1. Analyze the current prompt structure and content
|
|
41
|
+
2. Understand the user's specific request
|
|
42
|
+
3. Make targeted, meaningful changes that improve the prompt
|
|
43
|
+
4. Explain your modifications clearly
|
|
44
|
+
5. Preserve the overall structure unless changes are needed
|
|
45
|
+
|
|
46
|
+
Guidelines:
|
|
47
|
+
- Be specific and precise in your modifications
|
|
48
|
+
- Maintain professional translation terminology
|
|
49
|
+
- Consider linguistic and cultural aspects
|
|
50
|
+
- Keep prompts concise but comprehensive
|
|
51
|
+
- Use clear, actionable language
|
|
52
|
+
|
|
53
|
+
Always respond with:
|
|
54
|
+
1. An explanation of what you're changing and why
|
|
55
|
+
2. The complete modified prompt text
|
|
56
|
+
3. Key improvements made
|
|
57
|
+
|
|
58
|
+
Format your response as JSON:
|
|
59
|
+
{
|
|
60
|
+
"explanation": "Brief explanation of changes",
|
|
61
|
+
"modified_prompt": "Complete new prompt text",
|
|
62
|
+
"changes_summary": ["Change 1", "Change 2", ...]
|
|
63
|
+
}"""
|
|
64
|
+
|
|
65
|
+
def set_llm_client(self, llm_client):
|
|
66
|
+
"""Set or update the LLM client"""
|
|
67
|
+
self.llm = llm_client
|
|
68
|
+
|
|
69
|
+
def send_message(self, system_prompt: str, user_message: str, callback=None) -> Optional[str]:
|
|
70
|
+
"""
|
|
71
|
+
Send a message to the LLM and get a response (for Style Guides chat).
|
|
72
|
+
|
|
73
|
+
Args:
|
|
74
|
+
system_prompt: System prompt to use for context
|
|
75
|
+
user_message: User's message
|
|
76
|
+
callback: Optional callback function to handle response (called with response text)
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
Response text or None if LLM not available
|
|
80
|
+
"""
|
|
81
|
+
if not self.llm:
|
|
82
|
+
response = """I'm currently running in offline mode without LLM access.
|
|
83
|
+
For now, you can use these commands:
|
|
84
|
+
- 'add to all: [text]' to add rules to all languages
|
|
85
|
+
- 'add to [Language]: [text]' to add rules to a specific language
|
|
86
|
+
- 'show' to list available languages
|
|
87
|
+
|
|
88
|
+
When you have your LLM configured (OpenAI, Claude, or Gemini),
|
|
89
|
+
I'll be able to provide AI-powered suggestions for your style guides."""
|
|
90
|
+
if callback:
|
|
91
|
+
callback(response)
|
|
92
|
+
return response
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
# Build messages
|
|
96
|
+
messages = [
|
|
97
|
+
{"role": "system", "content": system_prompt},
|
|
98
|
+
{"role": "user", "content": user_message}
|
|
99
|
+
]
|
|
100
|
+
|
|
101
|
+
# Call LLM through self.llm (which should be an LLM client)
|
|
102
|
+
# This is designed to work with OpenAI, Anthropic, or Google clients
|
|
103
|
+
response_text = None
|
|
104
|
+
|
|
105
|
+
# Try different LLM client interfaces
|
|
106
|
+
if hasattr(self.llm, 'chat'):
|
|
107
|
+
# OpenAI-style interface
|
|
108
|
+
response_text = self.llm.chat(messages)
|
|
109
|
+
elif hasattr(self.llm, 'generate_content'):
|
|
110
|
+
# Google Gemini-style interface
|
|
111
|
+
response_text = self.llm.generate_content(f"{system_prompt}\n\nUser: {user_message}").text
|
|
112
|
+
elif hasattr(self.llm, 'messages'):
|
|
113
|
+
# Anthropic-style interface
|
|
114
|
+
response = self.llm.messages.create(
|
|
115
|
+
model="claude-opus-4-1",
|
|
116
|
+
max_tokens=1024,
|
|
117
|
+
system=system_prompt,
|
|
118
|
+
messages=[{"role": "user", "content": user_message}]
|
|
119
|
+
)
|
|
120
|
+
response_text = response.content[0].text
|
|
121
|
+
else:
|
|
122
|
+
response_text = "LLM client not properly configured"
|
|
123
|
+
|
|
124
|
+
# Record in chat history
|
|
125
|
+
self.chat_history.append({
|
|
126
|
+
"role": "user",
|
|
127
|
+
"content": user_message,
|
|
128
|
+
"timestamp": datetime.now().isoformat()
|
|
129
|
+
})
|
|
130
|
+
self.chat_history.append({
|
|
131
|
+
"role": "assistant",
|
|
132
|
+
"content": response_text,
|
|
133
|
+
"timestamp": datetime.now().isoformat()
|
|
134
|
+
})
|
|
135
|
+
|
|
136
|
+
# Call callback if provided
|
|
137
|
+
if callback:
|
|
138
|
+
callback(response_text)
|
|
139
|
+
|
|
140
|
+
return response_text
|
|
141
|
+
|
|
142
|
+
except Exception as e:
|
|
143
|
+
error_msg = f"Error communicating with LLM: {str(e)}"
|
|
144
|
+
if callback:
|
|
145
|
+
callback(error_msg)
|
|
146
|
+
return error_msg
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def suggest_modification(self, prompt_name: str, current_prompt: str, user_request: str) -> Dict:
|
|
150
|
+
"""
|
|
151
|
+
AI suggests changes to a prompt based on user request.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
prompt_name: Name of the prompt being modified
|
|
155
|
+
current_prompt: Current content of the prompt
|
|
156
|
+
user_request: User's natural language request for modification
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
Dictionary containing:
|
|
160
|
+
- explanation: Why the changes were made
|
|
161
|
+
- modified_prompt: New version of the prompt
|
|
162
|
+
- changes_summary: List of key changes
|
|
163
|
+
- diff: Unified diff showing changes
|
|
164
|
+
- success: Boolean indicating if modification succeeded
|
|
165
|
+
"""
|
|
166
|
+
if not self.llm:
|
|
167
|
+
return {
|
|
168
|
+
"success": False,
|
|
169
|
+
"error": "No LLM client configured"
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
# Build conversation context
|
|
174
|
+
messages = [
|
|
175
|
+
{"role": "system", "content": self.system_prompt},
|
|
176
|
+
{"role": "user", "content": f"""Please modify the following translation prompt:
|
|
177
|
+
|
|
178
|
+
PROMPT NAME: {prompt_name}
|
|
179
|
+
|
|
180
|
+
CURRENT PROMPT:
|
|
181
|
+
{current_prompt}
|
|
182
|
+
|
|
183
|
+
USER REQUEST: {user_request}
|
|
184
|
+
|
|
185
|
+
Provide your response in JSON format with explanation, modified_prompt, and changes_summary."""}
|
|
186
|
+
]
|
|
187
|
+
|
|
188
|
+
# Get AI response
|
|
189
|
+
response = self.llm.chat(messages)
|
|
190
|
+
|
|
191
|
+
# Parse JSON response
|
|
192
|
+
try:
|
|
193
|
+
result = json.loads(response)
|
|
194
|
+
except json.JSONDecodeError:
|
|
195
|
+
# Try to extract JSON from markdown code blocks
|
|
196
|
+
if "```json" in response:
|
|
197
|
+
json_text = response.split("```json")[1].split("```")[0].strip()
|
|
198
|
+
result = json.loads(json_text)
|
|
199
|
+
elif "```" in response:
|
|
200
|
+
json_text = response.split("```")[1].split("```")[0].strip()
|
|
201
|
+
result = json.loads(json_text)
|
|
202
|
+
else:
|
|
203
|
+
# Fallback: treat entire response as modified prompt
|
|
204
|
+
result = {
|
|
205
|
+
"explanation": "Modified based on your request",
|
|
206
|
+
"modified_prompt": response,
|
|
207
|
+
"changes_summary": ["Prompt updated"]
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
# Generate diff
|
|
211
|
+
diff = self.generate_diff(current_prompt, result["modified_prompt"])
|
|
212
|
+
|
|
213
|
+
# Record in modification history
|
|
214
|
+
self.modification_history.append({
|
|
215
|
+
"timestamp": datetime.now().isoformat(),
|
|
216
|
+
"prompt_name": prompt_name,
|
|
217
|
+
"original": current_prompt,
|
|
218
|
+
"modified": result["modified_prompt"],
|
|
219
|
+
"request": user_request,
|
|
220
|
+
"explanation": result.get("explanation", "")
|
|
221
|
+
})
|
|
222
|
+
|
|
223
|
+
# Record in chat history
|
|
224
|
+
self.chat_history.append({
|
|
225
|
+
"role": "user",
|
|
226
|
+
"content": user_request,
|
|
227
|
+
"timestamp": datetime.now().isoformat()
|
|
228
|
+
})
|
|
229
|
+
self.chat_history.append({
|
|
230
|
+
"role": "assistant",
|
|
231
|
+
"content": result.get("explanation", "Prompt modified"),
|
|
232
|
+
"timestamp": datetime.now().isoformat(),
|
|
233
|
+
"has_suggestion": True
|
|
234
|
+
})
|
|
235
|
+
|
|
236
|
+
return {
|
|
237
|
+
"success": True,
|
|
238
|
+
"explanation": result.get("explanation", ""),
|
|
239
|
+
"modified_prompt": result["modified_prompt"],
|
|
240
|
+
"changes_summary": result.get("changes_summary", []),
|
|
241
|
+
"diff": diff,
|
|
242
|
+
"diff_html": self.generate_diff_html(current_prompt, result["modified_prompt"])
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
except Exception as e:
|
|
246
|
+
return {
|
|
247
|
+
"success": False,
|
|
248
|
+
"error": f"Failed to generate modification: {str(e)}"
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
def generate_diff(self, original: str, modified: str) -> str:
|
|
252
|
+
"""
|
|
253
|
+
Generate unified diff between two prompts.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
original: Original prompt text
|
|
257
|
+
modified: Modified prompt text
|
|
258
|
+
|
|
259
|
+
Returns:
|
|
260
|
+
Unified diff string
|
|
261
|
+
"""
|
|
262
|
+
original_lines = original.splitlines(keepends=True)
|
|
263
|
+
modified_lines = modified.splitlines(keepends=True)
|
|
264
|
+
|
|
265
|
+
diff = difflib.unified_diff(
|
|
266
|
+
original_lines,
|
|
267
|
+
modified_lines,
|
|
268
|
+
fromfile="Original",
|
|
269
|
+
tofile="Modified",
|
|
270
|
+
lineterm=""
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
return ''.join(diff)
|
|
274
|
+
|
|
275
|
+
def generate_diff_html(self, original: str, modified: str) -> List[Tuple[str, str]]:
|
|
276
|
+
"""
|
|
277
|
+
Generate line-by-line diff suitable for colored display.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
original: Original prompt text
|
|
281
|
+
modified: Modified prompt text
|
|
282
|
+
|
|
283
|
+
Returns:
|
|
284
|
+
List of tuples (change_type, line) where change_type is:
|
|
285
|
+
- 'equal': unchanged line
|
|
286
|
+
- 'delete': removed line
|
|
287
|
+
- 'insert': added line
|
|
288
|
+
- 'replace': modified line
|
|
289
|
+
"""
|
|
290
|
+
original_lines = original.splitlines()
|
|
291
|
+
modified_lines = modified.splitlines()
|
|
292
|
+
|
|
293
|
+
diff = []
|
|
294
|
+
matcher = difflib.SequenceMatcher(None, original_lines, modified_lines)
|
|
295
|
+
|
|
296
|
+
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
|
|
297
|
+
if tag == 'equal':
|
|
298
|
+
for line in original_lines[i1:i2]:
|
|
299
|
+
diff.append(('equal', line))
|
|
300
|
+
elif tag == 'delete':
|
|
301
|
+
for line in original_lines[i1:i2]:
|
|
302
|
+
diff.append(('delete', line))
|
|
303
|
+
elif tag == 'insert':
|
|
304
|
+
for line in modified_lines[j1:j2]:
|
|
305
|
+
diff.append(('insert', line))
|
|
306
|
+
elif tag == 'replace':
|
|
307
|
+
for line in original_lines[i1:i2]:
|
|
308
|
+
diff.append(('delete', line))
|
|
309
|
+
for line in modified_lines[j1:j2]:
|
|
310
|
+
diff.append(('insert', line))
|
|
311
|
+
|
|
312
|
+
return diff
|
|
313
|
+
|
|
314
|
+
def get_chat_history(self) -> List[Dict]:
|
|
315
|
+
"""Get the chat history"""
|
|
316
|
+
return self.chat_history
|
|
317
|
+
|
|
318
|
+
def clear_chat_history(self):
|
|
319
|
+
"""Clear the chat history"""
|
|
320
|
+
self.chat_history = []
|
|
321
|
+
|
|
322
|
+
def get_modification_history(self) -> List[Dict]:
|
|
323
|
+
"""Get the modification history"""
|
|
324
|
+
return self.modification_history
|
|
325
|
+
|
|
326
|
+
def undo_last_modification(self) -> Optional[Dict]:
|
|
327
|
+
"""
|
|
328
|
+
Get the previous version of the prompt for undo functionality.
|
|
329
|
+
|
|
330
|
+
Returns:
|
|
331
|
+
Dictionary with original prompt info, or None if no history
|
|
332
|
+
"""
|
|
333
|
+
if not self.modification_history:
|
|
334
|
+
return None
|
|
335
|
+
|
|
336
|
+
last_mod = self.modification_history[-1]
|
|
337
|
+
return {
|
|
338
|
+
"prompt_name": last_mod["prompt_name"],
|
|
339
|
+
"original_prompt": last_mod["original"],
|
|
340
|
+
"timestamp": last_mod["timestamp"]
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
def export_chat_session(self, filepath: str):
|
|
344
|
+
"""
|
|
345
|
+
Export chat history and modifications to a file.
|
|
346
|
+
|
|
347
|
+
Args:
|
|
348
|
+
filepath: Path to save the session data
|
|
349
|
+
"""
|
|
350
|
+
session_data = {
|
|
351
|
+
"chat_history": self.chat_history,
|
|
352
|
+
"modification_history": self.modification_history,
|
|
353
|
+
"exported_at": datetime.now().isoformat()
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
with open(filepath, 'w', encoding='utf-8') as f:
|
|
357
|
+
json.dump(session_data, f, indent=2, ensure_ascii=False)
|