thonny-codemate 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- thonny_codemate-0.1.0.dist-info/METADATA +307 -0
- thonny_codemate-0.1.0.dist-info/RECORD +27 -0
- thonny_codemate-0.1.0.dist-info/WHEEL +5 -0
- thonny_codemate-0.1.0.dist-info/licenses/LICENSE +21 -0
- thonny_codemate-0.1.0.dist-info/top_level.txt +1 -0
- thonnycontrib/__init__.py +1 -0
- thonnycontrib/thonny_codemate/__init__.py +397 -0
- thonnycontrib/thonny_codemate/api.py +154 -0
- thonnycontrib/thonny_codemate/context_manager.py +296 -0
- thonnycontrib/thonny_codemate/external_providers.py +714 -0
- thonnycontrib/thonny_codemate/i18n.py +506 -0
- thonnycontrib/thonny_codemate/llm_client.py +841 -0
- thonnycontrib/thonny_codemate/message_virtualization.py +136 -0
- thonnycontrib/thonny_codemate/model_manager.py +515 -0
- thonnycontrib/thonny_codemate/performance_monitor.py +141 -0
- thonnycontrib/thonny_codemate/prompts.py +102 -0
- thonnycontrib/thonny_codemate/ui/__init__.py +1 -0
- thonnycontrib/thonny_codemate/ui/chat_view.py +687 -0
- thonnycontrib/thonny_codemate/ui/chat_view_html.py +1299 -0
- thonnycontrib/thonny_codemate/ui/custom_prompt_dialog.py +175 -0
- thonnycontrib/thonny_codemate/ui/markdown_renderer.py +484 -0
- thonnycontrib/thonny_codemate/ui/model_download_dialog.py +355 -0
- thonnycontrib/thonny_codemate/ui/settings_dialog.py +1218 -0
- thonnycontrib/thonny_codemate/utils/__init__.py +25 -0
- thonnycontrib/thonny_codemate/utils/constants.py +138 -0
- thonnycontrib/thonny_codemate/utils/error_messages.py +92 -0
- thonnycontrib/thonny_codemate/utils/unified_error_handler.py +310 -0
@@ -0,0 +1,506 @@
|
|
1
|
+
"""
|
2
|
+
国際化(i18n)サポート
|
3
|
+
英語、日本語、中国語(簡体字・繁体字)をサポート
|
4
|
+
"""
|
5
|
+
from typing import Dict
|
6
|
+
from thonny import get_workbench
|
7
|
+
|
8
|
+
|
9
|
+
# 翻訳辞書
|
10
|
+
TRANSLATIONS: Dict[str, Dict[str, str]] = {
|
11
|
+
# 日本語
|
12
|
+
"ja": {
|
13
|
+
# UI要素
|
14
|
+
"LLM Assistant": "LLMアシスタント",
|
15
|
+
"Show LLM Assistant": "LLMアシスタントを表示",
|
16
|
+
"Explain Selected Code": "選択したコードを説明",
|
17
|
+
"Generate Code from Comment": "コメントからコードを生成",
|
18
|
+
"AI: Explain Selected Code": "AI: 選択したコードを説明",
|
19
|
+
"AI: Generate Code from Comment": "AI: コメントからコードを生成",
|
20
|
+
"Clear": "クリア",
|
21
|
+
"Send": "送信",
|
22
|
+
"Stop": "停止",
|
23
|
+
"Stopping...": "停止中...",
|
24
|
+
"Ctrl+Enter to send": "Ctrl+Enterで送信",
|
25
|
+
"Explain Error": "エラーを説明",
|
26
|
+
"Include Context": "コンテキストを含める",
|
27
|
+
"Settings": "設定",
|
28
|
+
|
29
|
+
# 設定ダイアログ
|
30
|
+
"LLM Assistant Settings": "LLMアシスタント設定",
|
31
|
+
"Model Settings": "モデル設定",
|
32
|
+
"Model Path:": "モデルパス:",
|
33
|
+
"Browse...": "参照...",
|
34
|
+
"Context Size:": "コンテキストサイズ:",
|
35
|
+
"Generation Settings": "生成設定",
|
36
|
+
"Temperature:": "温度(創造性):",
|
37
|
+
"Max Tokens:": "出力最大トークン数:",
|
38
|
+
"User Settings": "ユーザー設定",
|
39
|
+
"Skill Level:": "スキルレベル:",
|
40
|
+
"Use Markdown View:": "Markdownビューを使用:",
|
41
|
+
"Enable Markdown rendering (requires tkinterweb)": "Markdownレンダリングを有効化(tkinterwebが必要)",
|
42
|
+
"Output Language:": "出力言語:",
|
43
|
+
"Auto (Follow Thonny)": "自動(Thonnyに従う)",
|
44
|
+
"Language code:": "言語コード:",
|
45
|
+
"Provider Settings": "プロバイダー設定",
|
46
|
+
"Provider:": "LLMプロバイダー:",
|
47
|
+
"API Key:": "APIキー:",
|
48
|
+
"Base URL:": "ベースURL:",
|
49
|
+
"Model Name:": "モデル名:",
|
50
|
+
"System Prompt": "システムプロンプト",
|
51
|
+
"Prompt Type:": "プロンプトタイプ:",
|
52
|
+
"Default": "デフォルト",
|
53
|
+
"Custom": "カスタム",
|
54
|
+
"Edit Custom Prompt": "カスタムプロンプトを編集",
|
55
|
+
"Save": "保存",
|
56
|
+
"Cancel": "キャンセル",
|
57
|
+
"Test Model": "モデルをテスト",
|
58
|
+
"Test": "テスト",
|
59
|
+
"Download Models": "モデルをダウンロード",
|
60
|
+
"Test Connection": "接続テスト",
|
61
|
+
"Basic Settings": "基本設定",
|
62
|
+
"Advanced Settings": "詳細設定",
|
63
|
+
"Repeat Penalty:": "繰り返しペナルティ:",
|
64
|
+
|
65
|
+
|
66
|
+
# モデルダウンロードダイアログ
|
67
|
+
"Model Manager": "モデルマネージャー",
|
68
|
+
"Recommended Models": "推奨モデル",
|
69
|
+
"Refresh": "更新",
|
70
|
+
"Size:": "サイズ:",
|
71
|
+
"Languages:": "言語:",
|
72
|
+
"Installed": "インストール済み",
|
73
|
+
"Use This Model": "このモデルを使用",
|
74
|
+
"Delete": "削除",
|
75
|
+
"Downloading...": "ダウンロード中...",
|
76
|
+
"Not installed": "未インストール",
|
77
|
+
"Download": "ダウンロード",
|
78
|
+
|
79
|
+
# メッセージ
|
80
|
+
"Loading": "読み込み中",
|
81
|
+
"Ready": "準備完了",
|
82
|
+
"No model loaded": "モデルが読み込まれていません",
|
83
|
+
"Load failed": "読み込み失敗",
|
84
|
+
"Previous conversation restored": "前回の会話を復元しました",
|
85
|
+
"Context enabled for selected text": "選択テキストのコンテキストを有効化",
|
86
|
+
"Context enabled for current file": "現在のファイルのコンテキストを有効化",
|
87
|
+
"Context enabled but no file is currently open": "コンテキストは有効ですが、開いているファイルがありません",
|
88
|
+
"Context disabled": "コンテキスト無効",
|
89
|
+
"[Generation stopped by user]": "[ユーザーによって生成が停止されました]",
|
90
|
+
"Error": "エラー",
|
91
|
+
"Success": "成功",
|
92
|
+
"Failed to load model:": "モデルの読み込みに失敗:",
|
93
|
+
"No model found. Please download a model from Settings → Download Models.": "モデルが見つかりません。設定→モデルをダウンロードからモデルをダウンロードしてください。",
|
94
|
+
"Model downloaded successfully!": "モデルのダウンロードが完了しました!",
|
95
|
+
"Failed to download": "ダウンロードに失敗しました",
|
96
|
+
"Please select a valid model file!": "有効なモデルファイルを選択してください!",
|
97
|
+
"Model file found!": "モデルファイルが見つかりました!",
|
98
|
+
"API key is required for {}": "{}にはAPIキーが必要です",
|
99
|
+
"Testing {} connection...": "{}への接続をテスト中...",
|
100
|
+
"Model file does not exist!": "モデルファイルが存在しません!",
|
101
|
+
"LLM Busy": "LLM実行中",
|
102
|
+
"Please wait for the current generation to complete.": "現在の生成が完了するまでお待ちください。",
|
103
|
+
"Generating...": "生成中...",
|
104
|
+
"Loading...": "読み込み中...",
|
105
|
+
"Testing...": "テスト中...",
|
106
|
+
"No Models": "モデルなし",
|
107
|
+
"No models found in Ollama. Please pull a model first using 'ollama pull <model>'": "Ollamaにモデルが見つかりません。まず 'ollama pull <model>' でモデルをダウンロードしてください。",
|
108
|
+
"Failed to connect to Ollama: {}": "Ollama/LM Studioへの接続に失敗しました: {}",
|
109
|
+
"Failed to fetch models: {}": "モデルの取得に失敗しました: {}",
|
110
|
+
"Server:": "サーバー:",
|
111
|
+
"Host:": "ホスト:",
|
112
|
+
"Port:": "ポート:",
|
113
|
+
"(File not found)": "(ファイルが見つかりません)",
|
114
|
+
"Presets:": "プリセット:",
|
115
|
+
"No Editor": "エディタなし",
|
116
|
+
"Please open a file in the editor first.": "まずエディタでファイルを開いてください。",
|
117
|
+
"Code inserted into editor!": "コードをエディタに挿入しました!",
|
118
|
+
"Connected to {} API. Ready to chat!": "{} APIに接続しました。チャット準備完了!",
|
119
|
+
"LLM model loaded successfully!": "LLMモデルの読み込みに成功しました!",
|
120
|
+
"tkinterweb is not installed": "tkinterwebがインストールされていません",
|
121
|
+
"To enable Markdown rendering and interactive features,\nplease install tkinterweb:\n\npip install tkinterweb": "Markdownレンダリングと対話機能を有効にするには、\ntkinterwebをインストールしてください:\n\npip install tkinterweb",
|
122
|
+
|
123
|
+
# エラーメッセージ
|
124
|
+
"File not found during {}: {}": "{}中にファイルが見つかりません: {}",
|
125
|
+
"Permission denied during {}: {}": "{}中にアクセスが拒否されました: {}",
|
126
|
+
"Connection failed during {}: {}": "{}中に接続に失敗しました: {}",
|
127
|
+
"Operation timed out during {}: {}": "{}中にタイムアウトしました: {}",
|
128
|
+
"Invalid value during {}: {}": "{}中に無効な値です: {}",
|
129
|
+
"Missing dependency during {}: {}": "{}中に依存関係が不足しています: {}",
|
130
|
+
"Error during {}: {}": "{}中にエラーが発生しました: {}",
|
131
|
+
"Connection failed. Please check your internet connection or server settings.": "接続に失敗しました。インターネット接続またはサーバー設定を確認してください。",
|
132
|
+
"Request timed out. Please try again.": "リクエストがタイムアウトしました。もう一度お試しください。",
|
133
|
+
"API key error. Please check your API key in settings.": "APIキーエラー。設定でAPIキーを確認してください。",
|
134
|
+
"Model error. Please check if the model is properly loaded.": "モデルエラー。モデルが正しく読み込まれているか確認してください。",
|
135
|
+
"LLM module not installed. Please install llama-cpp-python.": "LLMモジュールがインストールされていません。llama-cpp-pythonをインストールしてください。",
|
136
|
+
"Failed to initialize LLM": "LLMの初期化に失敗しました",
|
137
|
+
|
138
|
+
# スキルレベル
|
139
|
+
"beginner": "初心者",
|
140
|
+
"intermediate": "中級者",
|
141
|
+
"advanced": "上級者",
|
142
|
+
|
143
|
+
# カスタムプロンプトダイアログ
|
144
|
+
"Edit Custom System Prompt": "カスタムシステムプロンプトを編集",
|
145
|
+
"Enter your custom system prompt. This will be used to instruct the AI on how to respond.": "カスタムシステムプロンプトを入力してください。これはAIの応答方法を指示するために使用されます。",
|
146
|
+
"Tips": "ヒント",
|
147
|
+
"• Use {skill_level} to reference the user's skill level": "• {skill_level} でユーザーのスキルレベルを参照",
|
148
|
+
"• Use {language} to reference the output language": "• {language} で出力言語を参照",
|
149
|
+
"• Be specific about the coding style and explanation depth": "• コーディングスタイルと説明の深さを具体的に指定",
|
150
|
+
"• Include examples of how you want the AI to respond": "• AIにどのように応答してほしいかの例を含める",
|
151
|
+
"Presets:": "プリセット:",
|
152
|
+
"Educational": "教育用",
|
153
|
+
"Professional": "プロフェッショナル",
|
154
|
+
"Minimal": "最小限",
|
155
|
+
"Variables: {skill_level} = 'beginner/intermediate/advanced (with detailed description)', {language} = 'ja/en/zh-CN/zh-TW/auto'": "変数: {skill_level} = '初心者/中級者/上級者(詳細説明付き)', {language} = 'ja/en/zh-CN/zh-TW/auto'",
|
156
|
+
"Controls randomness: 0.0 = deterministic, 2.0 = very creative": "ランダム性を制御: 0.0 = 決定的, 2.0 = 非常に創造的",
|
157
|
+
"Maximum number of tokens the model can process at once": "モデルが一度に処理できる最大トークン数(記憶の大きさ)。大きいほどメモリ消費量が増え、生成速度が遅くなります",
|
158
|
+
"Maximum number of tokens the model will generate in one response": "モデルが1回の応答で生成する最大トークン数",
|
159
|
+
"Penalty for repeating tokens: 1.0 = no penalty, 2.0 = strong penalty": "トークン繰り返しのペナルティ: 1.0 = ペナルティなし, 2.0 = 強いペナルティ",
|
160
|
+
},
|
161
|
+
|
162
|
+
# 簡体字中国語
|
163
|
+
"zh_CN": {
|
164
|
+
# UI要素
|
165
|
+
"LLM Assistant": "LLM助手",
|
166
|
+
"Show LLM Assistant": "显示LLM助手",
|
167
|
+
"Explain Selected Code": "解释选中的代码",
|
168
|
+
"Generate Code from Comment": "从注释生成代码",
|
169
|
+
"AI: Explain Selected Code": "AI: 解释选中的代码",
|
170
|
+
"AI: Generate Code from Comment": "AI: 从注释生成代码",
|
171
|
+
"Clear": "清除",
|
172
|
+
"Send": "发送",
|
173
|
+
"Stop": "停止",
|
174
|
+
"Stopping...": "正在停止...",
|
175
|
+
"Ctrl+Enter to send": "按Ctrl+Enter发送",
|
176
|
+
"Explain Error": "解释错误",
|
177
|
+
"Include Context": "包含上下文",
|
178
|
+
"Settings": "设置",
|
179
|
+
|
180
|
+
# 設定ダイアログ
|
181
|
+
"LLM Assistant Settings": "LLM助手设置",
|
182
|
+
"Model Settings": "模型设置",
|
183
|
+
"Model Path:": "模型路径:",
|
184
|
+
"Browse...": "浏览...",
|
185
|
+
"Context Size:": "上下文大小:",
|
186
|
+
"Generation Settings": "生成设置",
|
187
|
+
"Temperature:": "温度(创造性):",
|
188
|
+
"Max Tokens:": "输出最大令牌数:",
|
189
|
+
"User Settings": "用户设置",
|
190
|
+
"Skill Level:": "技能水平:",
|
191
|
+
"Use Markdown View:": "使用Markdown视图:",
|
192
|
+
"Enable Markdown rendering (requires tkinterweb)": "启用Markdown渲染(需要tkinterweb)",
|
193
|
+
"Output Language:": "输出语言:",
|
194
|
+
"Auto (Follow Thonny)": "自动(跟随Thonny)",
|
195
|
+
"Language code:": "语言代码:",
|
196
|
+
"Provider Settings": "提供商设置",
|
197
|
+
"Provider:": "LLM提供商:",
|
198
|
+
"API Key:": "API密钥:",
|
199
|
+
"Base URL:": "基础URL:",
|
200
|
+
"Model Name:": "模型名称:",
|
201
|
+
"System Prompt": "系统提示",
|
202
|
+
"Prompt Type:": "提示类型:",
|
203
|
+
"Default": "默认",
|
204
|
+
"Custom": "自定义",
|
205
|
+
"Edit Custom Prompt": "编辑自定义提示",
|
206
|
+
"Save": "保存",
|
207
|
+
"Cancel": "取消",
|
208
|
+
"Test Model": "测试模型",
|
209
|
+
"Test": "测试",
|
210
|
+
"Download Models": "下载模型",
|
211
|
+
"Test Connection": "测试连接",
|
212
|
+
"Basic Settings": "基本设置",
|
213
|
+
"Advanced Settings": "高级设置",
|
214
|
+
"Repeat Penalty:": "重复惩罚:",
|
215
|
+
|
216
|
+
# モデルダウンロードダイアログ
|
217
|
+
"Model Manager": "模型管理器",
|
218
|
+
"Recommended Models": "推荐模型",
|
219
|
+
"Refresh": "刷新",
|
220
|
+
"Size:": "大小:",
|
221
|
+
"Languages:": "语言:",
|
222
|
+
"Installed": "已安装",
|
223
|
+
"Use This Model": "使用此模型",
|
224
|
+
"Delete": "删除",
|
225
|
+
"Downloading...": "正在下载...",
|
226
|
+
"Not installed": "未安装",
|
227
|
+
"Download": "下载",
|
228
|
+
|
229
|
+
# メッセージ
|
230
|
+
"Loading": "正在加载",
|
231
|
+
"Ready": "就绪",
|
232
|
+
"No model loaded": "未加载模型",
|
233
|
+
"Load failed": "加载失败",
|
234
|
+
"Previous conversation restored": "已恢复上次对话",
|
235
|
+
"Context enabled for selected text": "已为选定文本启用上下文",
|
236
|
+
"Context enabled for current file": "已为当前文件启用上下文",
|
237
|
+
"Context enabled but no file is currently open": "上下文已启用但当前没有打开的文件",
|
238
|
+
"Context disabled": "上下文已禁用",
|
239
|
+
"[Generation stopped by user]": "[用户已停止生成]",
|
240
|
+
"Error": "错误",
|
241
|
+
"Success": "成功",
|
242
|
+
"Failed to load model:": "加载模型失败:",
|
243
|
+
"No model found. Please download a model from Settings → Download Models.": "未找到模型。请从设置→下载模型中下载模型。",
|
244
|
+
"Model downloaded successfully!": "模型下载成功!",
|
245
|
+
"Failed to download": "下载失败",
|
246
|
+
"Please select a valid model file!": "请选择有效的模型文件!",
|
247
|
+
"Model file found!": "找到模型文件!",
|
248
|
+
"API key is required for {}": "{}需要API密钥",
|
249
|
+
"Testing {} connection...": "正在测试{}连接...",
|
250
|
+
"Model file does not exist!": "模型文件不存在!",
|
251
|
+
"LLM Busy": "LLM正在执行",
|
252
|
+
"Please wait for the current generation to complete.": "请等待当前生成完成。",
|
253
|
+
"Generating...": "正在生成...",
|
254
|
+
"Loading...": "正在加载...",
|
255
|
+
"Testing...": "正在测试...",
|
256
|
+
"No Models": "没有模型",
|
257
|
+
"No models found in Ollama. Please pull a model first using 'ollama pull <model>'": "在Ollama中未找到模型。请先使用 'ollama pull <model>' 下载模型。",
|
258
|
+
"Failed to connect to Ollama: {}": "连接到Ollama/LM Studio失败: {}",
|
259
|
+
"Failed to fetch models: {}": "获取模型失败: {}",
|
260
|
+
"Server:": "服务器:",
|
261
|
+
"Host:": "主机:",
|
262
|
+
"Port:": "端口:",
|
263
|
+
"(File not found)": "(文件未找到)",
|
264
|
+
"Presets:": "预设:",
|
265
|
+
"No Editor": "无编辑器",
|
266
|
+
"Please open a file in the editor first.": "请先在编辑器中打开文件。",
|
267
|
+
"Code inserted into editor!": "代码已插入编辑器!",
|
268
|
+
"Connected to {} API. Ready to chat!": "已连接到 {} API。准备聊天!",
|
269
|
+
"LLM model loaded successfully!": "LLM模型加载成功!",
|
270
|
+
"tkinterweb is not installed": "未安装tkinterweb",
|
271
|
+
"To enable Markdown rendering and interactive features,\nplease install tkinterweb:\n\npip install tkinterweb": "要启用Markdown渲染和交互功能,\n请安装tkinterweb:\n\npip install tkinterweb",
|
272
|
+
|
273
|
+
# 错误消息
|
274
|
+
"File not found during {}: {}": "{}时未找到文件: {}",
|
275
|
+
"Permission denied during {}: {}": "{}时访问被拒绝: {}",
|
276
|
+
"Connection failed during {}: {}": "{}时连接失败: {}",
|
277
|
+
"Operation timed out during {}: {}": "{}时操作超时: {}",
|
278
|
+
"Invalid value during {}: {}": "{}时值无效: {}",
|
279
|
+
"Missing dependency during {}: {}": "{}时缺少依赖项: {}",
|
280
|
+
"Error during {}: {}": "{}时发生错误: {}",
|
281
|
+
"Connection failed. Please check your internet connection or server settings.": "连接失败。请检查您的互联网连接或服务器设置。",
|
282
|
+
"Request timed out. Please try again.": "请求超时。请重试。",
|
283
|
+
"API key error. Please check your API key in settings.": "API密钥错误。请在设置中检查您的API密钥。",
|
284
|
+
"Model error. Please check if the model is properly loaded.": "模型错误。请检查模型是否正确加载。",
|
285
|
+
"LLM module not installed. Please install llama-cpp-python.": "未安装LLM模块。请安装llama-cpp-python。",
|
286
|
+
"Failed to initialize LLM": "LLM初始化失败",
|
287
|
+
|
288
|
+
# スキルレベル
|
289
|
+
"beginner": "初学者",
|
290
|
+
"intermediate": "中级",
|
291
|
+
"advanced": "高级",
|
292
|
+
|
293
|
+
# 自定义提示词对话框
|
294
|
+
"Edit Custom System Prompt": "编辑自定义系统提示",
|
295
|
+
"Enter your custom system prompt. This will be used to instruct the AI on how to respond.": "输入您的自定义系统提示。这将用于指导AI如何回应。",
|
296
|
+
"Tips": "提示",
|
297
|
+
"• Use {skill_level} to reference the user's skill level": "• 使用 {skill_level} 引用用户的技能水平",
|
298
|
+
"• Use {language} to reference the output language": "• 使用 {language} 引用输出语言",
|
299
|
+
"• Be specific about the coding style and explanation depth": "• 明确指定编码风格和解释深度",
|
300
|
+
"• Include examples of how you want the AI to respond": "• 包含您希望AI如何回应的示例",
|
301
|
+
"Presets:": "预设:",
|
302
|
+
"Educational": "教育用途",
|
303
|
+
"Professional": "专业",
|
304
|
+
"Minimal": "最小化",
|
305
|
+
"Variables: {skill_level} = 'beginner/intermediate/advanced (with detailed description)', {language} = 'ja/en/zh-CN/zh-TW/auto'": "变量: {skill_level} = '初学者/中级/高级(含详细描述)', {language} = 'ja/en/zh-CN/zh-TW/auto'",
|
306
|
+
"Controls randomness: 0.0 = deterministic, 2.0 = very creative": "控制随机性: 0.0 = 确定性, 2.0 = 非常有创意",
|
307
|
+
"Maximum number of tokens the model can process at once": "模型一次可以处理的最大令牌数(记忆大小)。越大内存消耗越多,生成速度越慢",
|
308
|
+
"Maximum number of tokens the model will generate in one response": "模型在一次响应中生成的最大令牌数",
|
309
|
+
"Penalty for repeating tokens: 1.0 = no penalty, 2.0 = strong penalty": "重复令牌的惩罚: 1.0 = 无惩罚, 2.0 = 强惩罚",
|
310
|
+
},
|
311
|
+
|
312
|
+
# 繁体字中国語
|
313
|
+
"zh_TW": {
|
314
|
+
# UI要素
|
315
|
+
"LLM Assistant": "LLM助手",
|
316
|
+
"Show LLM Assistant": "顯示LLM助手",
|
317
|
+
"Explain Selected Code": "解釋選取的程式碼",
|
318
|
+
"Generate Code from Comment": "從註解生成程式碼",
|
319
|
+
"AI: Explain Selected Code": "AI: 解釋選取的程式碼",
|
320
|
+
"AI: Generate Code from Comment": "AI: 從註解生成程式碼",
|
321
|
+
"Clear": "清除",
|
322
|
+
"Send": "發送",
|
323
|
+
"Stop": "停止",
|
324
|
+
"Stopping...": "正在停止...",
|
325
|
+
"Ctrl+Enter to send": "按Ctrl+Enter發送",
|
326
|
+
"Explain Error": "解釋錯誤",
|
327
|
+
"Include Context": "包含上下文",
|
328
|
+
"Settings": "設定",
|
329
|
+
|
330
|
+
# 設定ダイアログ
|
331
|
+
"LLM Assistant Settings": "LLM助手設定",
|
332
|
+
"Model Settings": "模型設定",
|
333
|
+
"Model Path:": "模型路徑:",
|
334
|
+
"Browse...": "瀏覽...",
|
335
|
+
"Context Size:": "上下文大小:",
|
336
|
+
"Generation Settings": "生成設定",
|
337
|
+
"Temperature:": "溫度(創造性):",
|
338
|
+
"Max Tokens:": "輸出最大令牌數:",
|
339
|
+
"User Settings": "使用者設定",
|
340
|
+
"Skill Level:": "技能水準:",
|
341
|
+
"Use Markdown View:": "使用Markdown檢視:",
|
342
|
+
"Enable Markdown rendering (requires tkinterweb)": "啟用Markdown渲染(需要tkinterweb)",
|
343
|
+
"Output Language:": "輸出語言:",
|
344
|
+
"Auto (Follow Thonny)": "自動(跟隨Thonny)",
|
345
|
+
"Language code:": "語言代碼:",
|
346
|
+
"Provider Settings": "提供商設定",
|
347
|
+
"Provider:": "LLM提供商:",
|
348
|
+
"API Key:": "API金鑰:",
|
349
|
+
"Base URL:": "基礎URL:",
|
350
|
+
"Model Name:": "模型名稱:",
|
351
|
+
"System Prompt": "系統提示",
|
352
|
+
"Prompt Type:": "提示類型:",
|
353
|
+
"Default": "預設",
|
354
|
+
"Custom": "自訂",
|
355
|
+
"Edit Custom Prompt": "編輯自訂提示",
|
356
|
+
"Save": "儲存",
|
357
|
+
"Cancel": "取消",
|
358
|
+
"Test Model": "測試模型",
|
359
|
+
"Test": "測試",
|
360
|
+
"Download Models": "下載模型",
|
361
|
+
"Test Connection": "測試連接",
|
362
|
+
"Basic Settings": "基本設定",
|
363
|
+
"Advanced Settings": "進階設定",
|
364
|
+
"Repeat Penalty:": "重複懲罰:",
|
365
|
+
|
366
|
+
# モデルダウンロードダイアログ
|
367
|
+
"Model Manager": "模型管理器",
|
368
|
+
"Recommended Models": "推薦模型",
|
369
|
+
"Refresh": "重新整理",
|
370
|
+
"Size:": "大小:",
|
371
|
+
"Languages:": "語言:",
|
372
|
+
"Installed": "已安裝",
|
373
|
+
"Use This Model": "使用此模型",
|
374
|
+
"Delete": "刪除",
|
375
|
+
"Downloading...": "正在下載...",
|
376
|
+
"Not installed": "未安裝",
|
377
|
+
"Download": "下載",
|
378
|
+
|
379
|
+
# メッセージ
|
380
|
+
"Loading": "正在載入",
|
381
|
+
"Ready": "就緒",
|
382
|
+
"No model loaded": "未載入模型",
|
383
|
+
"Load failed": "載入失敗",
|
384
|
+
"Previous conversation restored": "已還原上次對話",
|
385
|
+
"Context enabled for selected text": "已為選取文字啟用上下文",
|
386
|
+
"Context enabled for current file": "已為目前檔案啟用上下文",
|
387
|
+
"Context enabled but no file is currently open": "上下文已啟用但目前沒有開啟的檔案",
|
388
|
+
"Context disabled": "上下文已停用",
|
389
|
+
"[Generation stopped by user]": "[使用者已停止生成]",
|
390
|
+
"Error": "錯誤",
|
391
|
+
"Success": "成功",
|
392
|
+
"Failed to load model:": "載入模型失敗:",
|
393
|
+
"No model found. Please download a model from Settings → Download Models.": "未找到模型。請從設定→下載模型中下載模型。",
|
394
|
+
"Model downloaded successfully!": "模型下載成功!",
|
395
|
+
"Failed to download": "下載失敗",
|
396
|
+
"Please select a valid model file!": "請選擇有效的模型檔案!",
|
397
|
+
"Model file found!": "找到模型檔案!",
|
398
|
+
"API key is required for {}": "{}需要API金鑰",
|
399
|
+
"Testing {} connection...": "正在測試{}連接...",
|
400
|
+
"Model file does not exist!": "模型檔案不存在!",
|
401
|
+
"LLM Busy": "LLM正在執行",
|
402
|
+
"Please wait for the current generation to complete.": "請等待當前生成完成。",
|
403
|
+
"Generating...": "正在生成...",
|
404
|
+
"Loading...": "正在載入...",
|
405
|
+
"Testing...": "正在測試...",
|
406
|
+
"No Models": "沒有模型",
|
407
|
+
"No models found in Ollama. Please pull a model first using 'ollama pull <model>'": "在Ollama中未找到模型。請先使用 'ollama pull <model>' 下載模型。",
|
408
|
+
"Failed to connect to Ollama: {}": "連接到Ollama/LM Studio失敗: {}",
|
409
|
+
"Failed to fetch models: {}": "獲取模型失敗: {}",
|
410
|
+
"Server:": "伺服器:",
|
411
|
+
"Host:": "主機:",
|
412
|
+
"Port:": "連接埠:",
|
413
|
+
"(File not found)": "(檔案未找到)",
|
414
|
+
"Presets:": "預設:",
|
415
|
+
"No Editor": "無編輯器",
|
416
|
+
"Please open a file in the editor first.": "請先在編輯器中開啟檔案。",
|
417
|
+
"Code inserted into editor!": "程式碼已插入編輯器!",
|
418
|
+
"Connected to {} API. Ready to chat!": "已連接到 {} API。準備聊天!",
|
419
|
+
"LLM model loaded successfully!": "LLM模型載入成功!",
|
420
|
+
"tkinterweb is not installed": "未安裝tkinterweb",
|
421
|
+
"To enable Markdown rendering and interactive features,\nplease install tkinterweb:\n\npip install tkinterweb": "要啟用Markdown渲染和互動功能,\n請安裝tkinterweb:\n\npip install tkinterweb",
|
422
|
+
|
423
|
+
# 錯誤訊息
|
424
|
+
"File not found during {}: {}": "{}時未找到檔案: {}",
|
425
|
+
"Permission denied during {}: {}": "{}時存取被拒絕: {}",
|
426
|
+
"Connection failed during {}: {}": "{}時連接失敗: {}",
|
427
|
+
"Operation timed out during {}: {}": "{}時操作逾時: {}",
|
428
|
+
"Invalid value during {}: {}": "{}時值無效: {}",
|
429
|
+
"Missing dependency during {}: {}": "{}時缺少依賴項: {}",
|
430
|
+
"Error during {}: {}": "{}時發生錯誤: {}",
|
431
|
+
"Connection failed. Please check your internet connection or server settings.": "連接失敗。請檢查您的網際網路連接或伺服器設定。",
|
432
|
+
"Request timed out. Please try again.": "請求逾時。請重試。",
|
433
|
+
"API key error. Please check your API key in settings.": "API金鑰錯誤。請在設定中檢查您的API金鑰。",
|
434
|
+
"Model error. Please check if the model is properly loaded.": "模型錯誤。請檢查模型是否正確載入。",
|
435
|
+
"LLM module not installed. Please install llama-cpp-python.": "未安裝LLM模組。請安裝llama-cpp-python。",
|
436
|
+
"Failed to initialize LLM": "LLM初始化失敗",
|
437
|
+
|
438
|
+
# スキルレベル
|
439
|
+
"beginner": "初學者",
|
440
|
+
"intermediate": "中級",
|
441
|
+
"advanced": "高級",
|
442
|
+
|
443
|
+
# 自訂提示詞對話框
|
444
|
+
"Edit Custom System Prompt": "編輯自訂系統提示",
|
445
|
+
"Enter your custom system prompt. This will be used to instruct the AI on how to respond.": "輸入您的自訂系統提示。這將用於指導AI如何回應。",
|
446
|
+
"Tips": "提示",
|
447
|
+
"• Use {skill_level} to reference the user's skill level": "• 使用 {skill_level} 引用使用者的技能水準",
|
448
|
+
"• Use {language} to reference the output language": "• 使用 {language} 引用輸出語言",
|
449
|
+
"• Be specific about the coding style and explanation depth": "• 明確指定程式碼風格和解釋深度",
|
450
|
+
"• Include examples of how you want the AI to respond": "• 包含您希望AI如何回應的範例",
|
451
|
+
"Presets:": "預設:",
|
452
|
+
"Educational": "教育用途",
|
453
|
+
"Professional": "專業",
|
454
|
+
"Minimal": "最小化",
|
455
|
+
"Variables: {skill_level} = 'beginner/intermediate/advanced (with detailed description)', {language} = 'ja/en/zh-CN/zh-TW/auto'": "變數: {skill_level} = '初學者/中級/高級(含詳細描述)', {language} = 'ja/en/zh-CN/zh-TW/auto'",
|
456
|
+
"Controls randomness: 0.0 = deterministic, 2.0 = very creative": "控制隨機性: 0.0 = 確定性, 2.0 = 非常有創意",
|
457
|
+
"Maximum number of tokens the model can process at once": "模型一次可以處理的最大令牌數(記憶大小)。越大記憶體消耗越多,生成速度越慢",
|
458
|
+
"Maximum number of tokens the model will generate in one response": "模型在一次回應中生成的最大令牌數",
|
459
|
+
"Penalty for repeating tokens: 1.0 = no penalty, 2.0 = strong penalty": "重複令牌的懲罰: 1.0 = 無懲罰, 2.0 = 強懲罰",
|
460
|
+
}
|
461
|
+
}
|
462
|
+
|
463
|
+
|
464
|
+
def get_current_language() -> str:
|
465
|
+
"""現在の言語コードを取得(英語にフォールバック)"""
|
466
|
+
try:
|
467
|
+
workbench = get_workbench()
|
468
|
+
lang = workbench.get_option("general.language", "en_US")
|
469
|
+
|
470
|
+
# 言語コードを正規化
|
471
|
+
if lang.startswith("ja"):
|
472
|
+
return "ja"
|
473
|
+
elif lang.startswith("zh"):
|
474
|
+
if "TW" in lang or "HK" in lang:
|
475
|
+
return "zh_TW"
|
476
|
+
else:
|
477
|
+
return "zh_CN"
|
478
|
+
else:
|
479
|
+
# その他の言語は英語にフォールバック
|
480
|
+
return "en"
|
481
|
+
except:
|
482
|
+
return "en"
|
483
|
+
|
484
|
+
|
485
|
+
def tr(text: str) -> str:
|
486
|
+
"""
|
487
|
+
テキストを翻訳(Translation)
|
488
|
+
|
489
|
+
Args:
|
490
|
+
text: 翻訳するテキスト
|
491
|
+
|
492
|
+
Returns:
|
493
|
+
翻訳されたテキスト(見つからない場合は元のテキスト)
|
494
|
+
"""
|
495
|
+
lang = get_current_language()
|
496
|
+
|
497
|
+
# 英語の場合はそのまま返す
|
498
|
+
if lang == "en":
|
499
|
+
return text
|
500
|
+
|
501
|
+
# 翻訳辞書から取得
|
502
|
+
if lang in TRANSLATIONS and text in TRANSLATIONS[lang]:
|
503
|
+
return TRANSLATIONS[lang][text]
|
504
|
+
|
505
|
+
# 見つからない場合は元のテキストを返す
|
506
|
+
return text
|