thonny-codemate 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. thonny_codemate-0.1.0.dist-info/METADATA +307 -0
  2. thonny_codemate-0.1.0.dist-info/RECORD +27 -0
  3. thonny_codemate-0.1.0.dist-info/WHEEL +5 -0
  4. thonny_codemate-0.1.0.dist-info/licenses/LICENSE +21 -0
  5. thonny_codemate-0.1.0.dist-info/top_level.txt +1 -0
  6. thonnycontrib/__init__.py +1 -0
  7. thonnycontrib/thonny_codemate/__init__.py +397 -0
  8. thonnycontrib/thonny_codemate/api.py +154 -0
  9. thonnycontrib/thonny_codemate/context_manager.py +296 -0
  10. thonnycontrib/thonny_codemate/external_providers.py +714 -0
  11. thonnycontrib/thonny_codemate/i18n.py +506 -0
  12. thonnycontrib/thonny_codemate/llm_client.py +841 -0
  13. thonnycontrib/thonny_codemate/message_virtualization.py +136 -0
  14. thonnycontrib/thonny_codemate/model_manager.py +515 -0
  15. thonnycontrib/thonny_codemate/performance_monitor.py +141 -0
  16. thonnycontrib/thonny_codemate/prompts.py +102 -0
  17. thonnycontrib/thonny_codemate/ui/__init__.py +1 -0
  18. thonnycontrib/thonny_codemate/ui/chat_view.py +687 -0
  19. thonnycontrib/thonny_codemate/ui/chat_view_html.py +1299 -0
  20. thonnycontrib/thonny_codemate/ui/custom_prompt_dialog.py +175 -0
  21. thonnycontrib/thonny_codemate/ui/markdown_renderer.py +484 -0
  22. thonnycontrib/thonny_codemate/ui/model_download_dialog.py +355 -0
  23. thonnycontrib/thonny_codemate/ui/settings_dialog.py +1218 -0
  24. thonnycontrib/thonny_codemate/utils/__init__.py +25 -0
  25. thonnycontrib/thonny_codemate/utils/constants.py +138 -0
  26. thonnycontrib/thonny_codemate/utils/error_messages.py +92 -0
  27. thonnycontrib/thonny_codemate/utils/unified_error_handler.py +310 -0
@@ -0,0 +1,1218 @@
1
+ """
2
+ 新しいデザインの設定ダイアログ
3
+ 重要度順に項目を配置
4
+ """
5
+ import tkinter as tk
6
+ from tkinter import ttk, filedialog, messagebox
7
+ from pathlib import Path
8
+ from typing import Optional
9
+ import logging
10
+
11
+ from thonny import get_workbench
12
+ from ..i18n import tr
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ class SettingsDialog(tk.Toplevel):
18
+ """新しいデザインの設定ダイアログ"""
19
+
20
+ def __init__(self, parent):
21
+ super().__init__(parent)
22
+
23
+ self.title(tr("LLM Assistant Settings"))
24
+ self.geometry("700x750") # 固定サイズ
25
+ self.resizable(False, False) # リサイズ無効化
26
+
27
+ # モーダルダイアログ
28
+ self.transient(parent)
29
+
30
+ self.workbench = get_workbench()
31
+ self.settings_changed = False
32
+
33
+ # メインコンテナ
34
+ main_container = ttk.Frame(self, padding="10")
35
+ main_container.pack(fill="both", expand=True)
36
+
37
+ # スクロール可能な領域を作成
38
+ canvas = tk.Canvas(main_container, highlightthickness=0, width=680)
39
+ scrollbar = ttk.Scrollbar(main_container, orient="vertical", command=canvas.yview)
40
+ self.scrollable_frame = ttk.Frame(canvas)
41
+
42
+ self.scrollable_frame.bind(
43
+ "<Configure>",
44
+ lambda e: canvas.configure(scrollregion=canvas.bbox("all"))
45
+ )
46
+
47
+ # ウィンドウを作成時に幅を指定
48
+ canvas.create_window((0, 0), window=self.scrollable_frame, anchor="nw", width=680)
49
+ canvas.configure(yscrollcommand=scrollbar.set)
50
+
51
+ # レイアウト
52
+ canvas.pack(side="left", fill="both", expand=True)
53
+ scrollbar.pack(side="right", fill="y")
54
+
55
+ # セクションを作成
56
+ self._create_basic_section()
57
+ self._create_generation_section()
58
+ self._create_advanced_section()
59
+
60
+ # ボタンフレーム
61
+ button_frame = ttk.Frame(self)
62
+ button_frame.pack(fill="x", padx=10, pady=15) # パディングを増やす
63
+
64
+ # ボタンのスタイルを設定(フォントサイズを大きく)
65
+ button_style = ttk.Style()
66
+ button_style.configure("Large.TButton", font=("", 11))
67
+
68
+ # 左側のボタン
69
+ left_buttons = ttk.Frame(button_frame)
70
+ left_buttons.pack(side="left")
71
+
72
+
73
+ self.test_connection_button = ttk.Button(
74
+ left_buttons,
75
+ text=tr("Test Connection"),
76
+ command=self._test_connection,
77
+ width=20, # 幅を指定
78
+ style="Large.TButton"
79
+ )
80
+ self.test_connection_button.pack(side="left", ipady=5) # ipadyで高さを増やす
81
+
82
+ # 右側のボタン
83
+ right_buttons = ttk.Frame(button_frame)
84
+ right_buttons.pack(side="right")
85
+
86
+
87
+ ttk.Button(
88
+ right_buttons,
89
+ text=tr("Save"),
90
+ command=self._save_settings,
91
+ width=12, # 幅を指定
92
+ style="Large.TButton"
93
+ ).pack(side="left", padx=(0, 8), ipady=5) # ipadyで高さを増やす
94
+
95
+ ttk.Button(
96
+ right_buttons,
97
+ text=tr("Cancel"),
98
+ command=self.destroy,
99
+ width=12, # 幅を指定
100
+ style="Large.TButton"
101
+ ).pack(side="left", ipady=5) # ipadyで高さを増やす
102
+
103
+ # 設定を読み込む
104
+ self._load_settings()
105
+
106
+ # 初期状態を更新
107
+ self._on_provider_changed()
108
+
109
+ # 初期化完了フラグを設定
110
+ self._initialization_complete = True
111
+
112
+ def _create_basic_section(self):
113
+ """基本設定セクション"""
114
+ # 基本設定(常に展開)
115
+ basic_frame = ttk.LabelFrame(
116
+ self.scrollable_frame,
117
+ text=tr("Basic Settings"),
118
+ padding="10",
119
+ width=650
120
+ )
121
+ basic_frame.pack(fill="x", padx=5, pady=5)
122
+
123
+ # グリッドの重み設定(第1列を拡張可能に)
124
+ basic_frame.grid_columnconfigure(1, weight=1)
125
+
126
+ # Provider
127
+ ttk.Label(basic_frame, text=tr("Provider:")).grid(row=0, column=0, sticky="w", pady=5)
128
+ self.provider_var = tk.StringVar(value="local")
129
+ self.provider_combo = ttk.Combobox(
130
+ basic_frame,
131
+ textvariable=self.provider_var,
132
+ values=["local", "chatgpt", "ollama/lmstudio", "openrouter"],
133
+ state="readonly"
134
+ )
135
+ self.provider_combo.grid(row=0, column=1, sticky="ew", pady=5)
136
+ self.provider_combo.bind("<<ComboboxSelected>>", self._on_provider_changed)
137
+
138
+ # Model/API Key(動的に切り替え)
139
+ # 固定サイズのコンテナフレームを作成
140
+ self.model_container = ttk.Frame(basic_frame)
141
+ self.model_container.grid(row=1, column=0, columnspan=3, sticky="ew", pady=5)
142
+
143
+ # 実際のコンテンツフレーム
144
+ self.model_frame = ttk.Frame(self.model_container)
145
+ self.model_frame.pack(fill="both", expand=True)
146
+
147
+ # ローカルモデル用
148
+ self.local_model_frame = ttk.Frame(self.model_frame)
149
+
150
+ # 上部のパス入力部分
151
+ path_frame = ttk.Frame(self.local_model_frame)
152
+ path_frame.pack(fill="x")
153
+
154
+ ttk.Label(path_frame, text=tr("Model:")).pack(side="left", padx=(0, 10))
155
+ self.model_path_var = tk.StringVar()
156
+ self.model_path_entry = ttk.Entry(
157
+ path_frame,
158
+ textvariable=self.model_path_var
159
+ )
160
+ self.model_path_entry.pack(side="left", fill="x", expand=True, padx=(0, 5))
161
+
162
+ # エントリーフィールドのツールチップ設定
163
+ self._create_tooltip(self.model_path_entry, dynamic=True)
164
+ ttk.Button(
165
+ path_frame,
166
+ text=tr("Browse..."),
167
+ command=self._browse_model,
168
+ width=12 # 幅を指定
169
+ ).pack(side="left")
170
+
171
+ # 下部のファイル名表示ラベル
172
+ self.model_filename_label = ttk.Label(
173
+ self.local_model_frame,
174
+ text="",
175
+ foreground="gray",
176
+ font=("", 9)
177
+ )
178
+ self.model_filename_label.pack(anchor="w", padx=(60, 0), pady=(2, 0))
179
+
180
+ # パスが変更されたときにファイル名を更新
181
+ self.model_path_var.trace_add("write", self._update_model_filename_label)
182
+
183
+ # モデルダウンロードボタン(ローカルモデル用)
184
+ download_frame = ttk.Frame(self.local_model_frame)
185
+ download_frame.pack(fill="x", pady=(5, 0))
186
+
187
+ self.download_models_button = ttk.Button(
188
+ download_frame,
189
+ text=tr("Download Models"),
190
+ command=self._show_model_manager,
191
+ width=15
192
+ )
193
+ self.download_models_button.pack(side="left", padx=(60, 0))
194
+
195
+ # 外部API用
196
+ self.api_frame = ttk.Frame(self.model_frame)
197
+
198
+ # API Key
199
+ self.api_key_frame = ttk.Frame(self.api_frame)
200
+ ttk.Label(self.api_key_frame, text=tr("API Key:")).pack(side="left", padx=(0, 10))
201
+ self.api_key_var = tk.StringVar()
202
+ self.api_key_entry = ttk.Entry(
203
+ self.api_key_frame,
204
+ textvariable=self.api_key_var,
205
+ show="*"
206
+ )
207
+ self.api_key_entry.pack(side="left", fill="x", expand=True)
208
+
209
+ # Ollama/LM Studio Server設定 (Basic Settingsに移動)
210
+ self.ollama_server_frame = ttk.Frame(self.api_frame)
211
+ ttk.Label(self.ollama_server_frame, text=tr("Server:")).pack(side="left", padx=(0, 10))
212
+
213
+ # IP/Host
214
+ ttk.Label(self.ollama_server_frame, text=tr("Host:")).pack(side="left", padx=(0, 5))
215
+ self.ollama_host_var = tk.StringVar(value="localhost")
216
+ self.ollama_host_entry = ttk.Entry(
217
+ self.ollama_server_frame,
218
+ textvariable=self.ollama_host_var
219
+ )
220
+ self.ollama_host_entry.pack(side="left", fill="x", expand=True, padx=(0, 10))
221
+
222
+ # Port (デフォルトは11434だが、LM Studioの場合は1234)
223
+ ttk.Label(self.ollama_server_frame, text=tr("Port:")).pack(side="left", padx=(0, 5))
224
+ self.ollama_port_var = tk.StringVar(value="11434")
225
+ self.ollama_port_entry = ttk.Entry(
226
+ self.ollama_server_frame,
227
+ textvariable=self.ollama_port_var,
228
+ width=8
229
+ )
230
+ self.ollama_port_entry.pack(side="left")
231
+
232
+ # クイック設定ボタン
233
+ preset_frame = ttk.Frame(self.ollama_server_frame)
234
+ preset_frame.pack(side="left", padx=(10, 0))
235
+
236
+ ttk.Label(preset_frame, text=tr("Presets:")).pack(side="left", padx=(0, 5))
237
+
238
+ # Ollamaプリセットボタン
239
+ ttk.Button(
240
+ preset_frame,
241
+ text="Ollama",
242
+ command=lambda: self._set_ollama_defaults(),
243
+ width=10
244
+ ).pack(side="left", padx=(0, 5))
245
+
246
+ # LM Studioプリセットボタン
247
+ ttk.Button(
248
+ preset_frame,
249
+ text="LM Studio",
250
+ command=lambda: self._set_lmstudio_defaults(),
251
+ width=10
252
+ ).pack(side="left")
253
+
254
+ # Host/Port変更時にBase URLを更新
255
+ self.ollama_host_var.trace_add("write", self._update_base_url_from_host_port)
256
+ self.ollama_port_var.trace_add("write", self._update_base_url_from_host_port)
257
+
258
+ # Model Name(外部API用)
259
+ self.model_name_frame = ttk.Frame(self.api_frame)
260
+ ttk.Label(self.model_name_frame, text=tr("Model Name:")).pack(side="left", padx=(0, 10))
261
+ self.external_model_var = tk.StringVar()
262
+
263
+ # ChatGPT/OpenRouter用コンボボックス
264
+ self.external_model_combo = ttk.Combobox(
265
+ self.model_name_frame,
266
+ textvariable=self.external_model_var,
267
+ state="readonly"
268
+ )
269
+
270
+ # Ollama用エントリー(削除予定、互換性のために残す)
271
+ self.external_model_entry = ttk.Entry(
272
+ self.model_name_frame,
273
+ textvariable=self.external_model_var
274
+ )
275
+
276
+ # Ollama用リフレッシュボタン
277
+ self.refresh_ollama_button = ttk.Button(
278
+ self.model_name_frame,
279
+ text=tr("Refresh"),
280
+ command=self._fetch_ollama_models,
281
+ width=10
282
+ )
283
+
284
+ # Language
285
+ ttk.Label(basic_frame, text=tr("Language:")).grid(row=2, column=0, sticky="w", pady=5)
286
+ self.output_language_var = tk.StringVar(value="auto")
287
+ self.language_combo = ttk.Combobox(
288
+ basic_frame,
289
+ textvariable=self.output_language_var,
290
+ values=["auto", "ja", "en", "zh-CN", "zh-TW"],
291
+ state="readonly"
292
+ )
293
+ self.language_combo.grid(row=2, column=1, sticky="ew", pady=5)
294
+
295
+ # 言語表示名
296
+ self.language_label = ttk.Label(basic_frame, text="", foreground="gray")
297
+ self.language_label.grid(row=2, column=2, padx=(10, 0), pady=5)
298
+ self.language_combo.bind("<<ComboboxSelected>>", self._update_language_label)
299
+
300
+ # Skill Level
301
+ ttk.Label(basic_frame, text=tr("Skill Level:")).grid(row=3, column=0, sticky="w", pady=5)
302
+ self.skill_level_var = tk.StringVar(value="beginner")
303
+ self.skill_combo = ttk.Combobox(
304
+ basic_frame,
305
+ textvariable=self.skill_level_var,
306
+ values=["beginner", "intermediate", "advanced"],
307
+ state="readonly"
308
+ )
309
+ self.skill_combo.grid(row=3, column=1, sticky="ew", pady=5)
310
+
311
+ # スキルレベル表示名
312
+ self.skill_label = ttk.Label(basic_frame, text="", foreground="gray")
313
+ self.skill_label.grid(row=3, column=2, padx=(10, 0), pady=5)
314
+ self.skill_combo.bind("<<ComboboxSelected>>", self._update_skill_label)
315
+
316
+ # グリッド設定
317
+ basic_frame.columnconfigure(1, weight=1)
318
+
319
+ def _create_generation_section(self):
320
+ """生成設定セクション"""
321
+ # 生成設定
322
+ generation_frame = ttk.LabelFrame(
323
+ self.scrollable_frame,
324
+ text=tr("Generation Settings"),
325
+ padding="10",
326
+ width=650
327
+ )
328
+ generation_frame.pack(fill="x", padx=5, pady=5)
329
+
330
+ gen_frame = generation_frame
331
+
332
+ # Temperature
333
+ temp_frame = ttk.Frame(gen_frame)
334
+ temp_frame.pack(fill="x", pady=5)
335
+
336
+ # Temperature行の要素が均等に配置されるようにスペーサーを追加
337
+ temp_spacer_frame = ttk.Frame(temp_frame)
338
+ temp_spacer_frame.pack(side="right", padx=(10, 0))
339
+
340
+ temp_label = ttk.Label(temp_frame, text=tr("Temperature:"))
341
+ temp_label.pack(side="left", padx=(0, 10))
342
+
343
+ # Temperatureの説明ツールチップ
344
+ temp_help = ttk.Label(temp_frame, text="(?)", foreground="blue", cursor="hand2")
345
+ temp_help.pack(side="left", padx=(0, 10))
346
+ self._create_tooltip(temp_help, tr("Controls randomness: 0.0 = deterministic, 2.0 = very creative"))
347
+ self.temperature_var = tk.DoubleVar()
348
+ temp_scale = ttk.Scale(
349
+ temp_frame,
350
+ from_=0.0,
351
+ to=2.0,
352
+ orient=tk.HORIZONTAL,
353
+ variable=self.temperature_var
354
+ )
355
+ temp_scale.pack(side="left", fill="x", expand=True, padx=(0, 10))
356
+
357
+ self.temp_label = ttk.Label(temp_frame, text="0.7")
358
+ self.temp_label.pack(side="left")
359
+
360
+ def update_temp_label(value):
361
+ self.temp_label.config(text=f"{float(value):.1f}")
362
+ temp_scale.config(command=update_temp_label)
363
+
364
+ # Max Tokens
365
+ tokens_frame = ttk.Frame(gen_frame)
366
+ tokens_frame.pack(fill="x", pady=5)
367
+
368
+ ttk.Label(tokens_frame, text=tr("Max Tokens:")).pack(side="left", padx=(0, 10))
369
+
370
+ # Max Tokensの説明ツールチップ
371
+ tokens_help = ttk.Label(tokens_frame, text="(?)", foreground="blue", cursor="hand2")
372
+ tokens_help.pack(side="left", padx=(0, 10))
373
+ self._create_tooltip(tokens_help, tr("Maximum number of tokens the model will generate in one response"))
374
+
375
+ self.max_tokens_var = tk.IntVar()
376
+ tokens_spinbox = ttk.Spinbox(
377
+ tokens_frame,
378
+ from_=128,
379
+ to=4096,
380
+ increment=128,
381
+ textvariable=self.max_tokens_var,
382
+ width=10
383
+ )
384
+ tokens_spinbox.pack(side="left")
385
+
386
+ # Context Size
387
+ context_frame = ttk.Frame(gen_frame)
388
+ context_frame.pack(fill="x", pady=5)
389
+
390
+ context_label = ttk.Label(context_frame, text=tr("Context Size:"))
391
+ context_label.pack(side="left", padx=(0, 10))
392
+
393
+ # Context Sizeの説明ツールチップ
394
+ context_help = ttk.Label(context_frame, text="(?)", foreground="blue", cursor="hand2")
395
+ context_help.pack(side="left", padx=(0, 10))
396
+ self._create_tooltip(context_help, tr("Maximum number of tokens the model can process at once"))
397
+ self.context_size_var = tk.IntVar()
398
+ context_spinbox = ttk.Spinbox(
399
+ context_frame,
400
+ from_=512,
401
+ to=32768,
402
+ increment=512,
403
+ textvariable=self.context_size_var,
404
+ width=10
405
+ )
406
+ context_spinbox.pack(side="left")
407
+
408
+ # 自動設定ボタンを追加
409
+ auto_context_button = ttk.Button(
410
+ context_frame,
411
+ text=tr("Auto"),
412
+ command=self._auto_set_context_size,
413
+ width=8
414
+ )
415
+ auto_context_button.pack(side="left", padx=(10, 0))
416
+
417
+ # Repeat Penalty
418
+ repeat_frame = ttk.Frame(gen_frame)
419
+ repeat_frame.pack(fill="x", pady=5)
420
+
421
+ # Repeat Penalty行の要素が均等に配置されるようにスペーサーを追加
422
+ repeat_spacer_frame = ttk.Frame(repeat_frame)
423
+ repeat_spacer_frame.pack(side="right", padx=(10, 0))
424
+
425
+ repeat_label = ttk.Label(repeat_frame, text=tr("Repeat Penalty:"))
426
+ repeat_label.pack(side="left", padx=(0, 10))
427
+
428
+ # Repeat Penaltyの説明ツールチップ
429
+ repeat_help = ttk.Label(repeat_frame, text="(?)", foreground="blue", cursor="hand2")
430
+ repeat_help.pack(side="left", padx=(0, 10))
431
+ self._create_tooltip(repeat_help, tr("Penalty for repeating tokens: 1.0 = no penalty, 2.0 = strong penalty"))
432
+ self.repeat_penalty_var = tk.DoubleVar()
433
+ repeat_scale = ttk.Scale(
434
+ repeat_frame,
435
+ from_=1.0,
436
+ to=2.0,
437
+ orient=tk.HORIZONTAL,
438
+ variable=self.repeat_penalty_var
439
+ )
440
+ repeat_scale.pack(side="left", fill="x", expand=True, padx=(0, 10))
441
+
442
+ self.repeat_label = ttk.Label(repeat_frame, text="1.1")
443
+ self.repeat_label.pack(side="left")
444
+
445
+ def update_repeat_label(value):
446
+ self.repeat_label.config(text=f"{float(value):.2f}")
447
+ repeat_scale.config(command=update_repeat_label)
448
+
449
+ def _create_advanced_section(self):
450
+ """詳細設定セクション"""
451
+ # 詳細設定
452
+ advanced_frame = ttk.LabelFrame(
453
+ self.scrollable_frame,
454
+ text=tr("Advanced Settings"),
455
+ padding="10",
456
+ width=650
457
+ )
458
+ advanced_frame.pack(fill="x", padx=5, pady=5)
459
+
460
+ adv_frame = advanced_frame
461
+
462
+ # Base URL (内部用、非表示)
463
+ self.base_url_var = tk.StringVar(value="http://localhost:11434")
464
+
465
+ # Base URLが変更された時の処理は削除(手動でRefreshボタンを押してもらう)
466
+
467
+ # System Prompt Type
468
+ prompt_frame = ttk.Frame(adv_frame)
469
+ prompt_frame.pack(fill="x", pady=5)
470
+
471
+ ttk.Label(prompt_frame, text=tr("System Prompt:")).pack(side="left", padx=(0, 10))
472
+ self.prompt_type_var = tk.StringVar(value="default")
473
+
474
+ ttk.Radiobutton(
475
+ prompt_frame,
476
+ text=tr("Default"),
477
+ variable=self.prompt_type_var,
478
+ value="default"
479
+ ).pack(side="left", padx=(0, 10))
480
+
481
+ ttk.Radiobutton(
482
+ prompt_frame,
483
+ text=tr("Custom"),
484
+ variable=self.prompt_type_var,
485
+ value="custom"
486
+ ).pack(side="left", padx=(0, 10))
487
+
488
+ ttk.Button(
489
+ prompt_frame,
490
+ text=tr("Edit Custom Prompt"),
491
+ command=self._edit_custom_prompt,
492
+ width=20 # 幅を指定
493
+ ).pack(side="left")
494
+
495
+ def _on_provider_changed(self, event=None):
496
+ """プロバイダー変更時の処理"""
497
+ provider = self.provider_var.get()
498
+
499
+ # ollama/lmstudioの場合はollamaとして扱う
500
+ if provider == "ollama/lmstudio":
501
+ provider = "ollama"
502
+
503
+ # プロバイダー変更時に適切なAPIキーを読み込む
504
+ if provider == "chatgpt":
505
+ self.api_key_var.set(self.workbench.get_option("llm.chatgpt_api_key", ""))
506
+ elif provider == "openrouter":
507
+ self.api_key_var.set(self.workbench.get_option("llm.openrouter_api_key", ""))
508
+ else:
509
+ self.api_key_var.set("")
510
+
511
+ # フレームを一旦非表示
512
+ self.local_model_frame.pack_forget()
513
+ self.api_frame.pack_forget()
514
+ self.api_key_frame.pack_forget()
515
+ self.model_name_frame.pack_forget()
516
+ self.refresh_ollama_button.pack_forget()
517
+ self.ollama_server_frame.pack_forget()
518
+
519
+ if provider == "local":
520
+ # ローカルモデル
521
+ self.local_model_frame.pack(fill="x")
522
+ else:
523
+ # 外部API
524
+ self.api_frame.pack(fill="x")
525
+
526
+ if provider in ["chatgpt", "openrouter"]:
527
+ self.api_key_frame.pack(fill="x", pady=2)
528
+ self.model_name_frame.pack(fill="x", pady=2)
529
+
530
+ # コンボボックスを表示
531
+ self.external_model_entry.pack_forget()
532
+ self.external_model_combo.pack(side="left", fill="x", expand=True)
533
+
534
+ # モデルリストを更新
535
+ from ..utils.constants import ProviderConstants
536
+ models = ProviderConstants.PROVIDER_MODELS.get(provider, [])
537
+
538
+ self.external_model_combo['values'] = models
539
+ if self.external_model_var.get() not in models:
540
+ self.external_model_var.set(models[0])
541
+
542
+ elif provider == "ollama":
543
+ # サーバー設定を表示
544
+ self.ollama_server_frame.pack(fill="both", expand=True, pady=2)
545
+ self.model_name_frame.pack(fill="x", pady=2)
546
+
547
+ # Ollamaの場合もコンボボックスを使用
548
+ self.external_model_entry.pack_forget()
549
+ self.external_model_combo.pack(side="left", fill="x", expand=True)
550
+
551
+ # リフレッシュボタンを表示
552
+ self.refresh_ollama_button.pack(side="left", padx=(5, 0))
553
+
554
+ # 初回は手動でRefreshボタンを押してもらう
555
+ self.external_model_combo['values'] = []
556
+ self.external_model_var.set("")
557
+
558
+ def _update_language_label(self, event=None):
559
+ """言語ラベルを更新"""
560
+ lang_names = {
561
+ "auto": tr("Auto (Follow Thonny)"),
562
+ "ja": "日本語",
563
+ "en": "English",
564
+ "zh-CN": "中文(简体)",
565
+ "zh-TW": "中文(繁體)"
566
+ }
567
+ lang = self.output_language_var.get()
568
+ self.language_label.config(text=lang_names.get(lang, ""))
569
+
570
+ def _update_skill_label(self, event=None):
571
+ """スキルレベルラベルを更新"""
572
+ skill_names = {
573
+ "beginner": tr("beginner"),
574
+ "intermediate": tr("intermediate"),
575
+ "advanced": tr("advanced")
576
+ }
577
+ skill = self.skill_level_var.get()
578
+ self.skill_label.config(text=skill_names.get(skill, ""))
579
+
580
+ def _browse_model(self):
581
+ """モデルファイルを選択"""
582
+ from ..model_manager import ModelManager
583
+ model_manager = ModelManager()
584
+ models_dir = model_manager.get_models_dir()
585
+ initial_dir = str(models_dir) if models_dir.exists() else str(Path.home())
586
+
587
+ filename = filedialog.askopenfilename(
588
+ title="Select GGUF Model File",
589
+ filetypes=[("GGUF files", "*.gguf"), ("All files", "*.*")],
590
+ initialdir=initial_dir
591
+ )
592
+ if filename:
593
+ self.model_path_var.set(filename)
594
+
595
+ def _show_model_manager(self):
596
+ """モデルマネージャーを表示"""
597
+ from .model_download_dialog import ModelDownloadDialog
598
+ dialog = ModelDownloadDialog(self)
599
+ self.wait_window(dialog)
600
+
601
+ # モデルが選択された場合
602
+ if hasattr(dialog, 'selected_model_path') and dialog.selected_model_path:
603
+ self.model_path_var.set(dialog.selected_model_path)
604
+ self.provider_var.set("local")
605
+ self._on_provider_changed()
606
+
607
+ def _test_connection(self):
608
+ """接続テスト"""
609
+ provider = self.provider_var.get()
610
+
611
+ # ollama/lmstudioの場合はollamaとして扱う
612
+ if provider == "ollama/lmstudio":
613
+ provider = "ollama"
614
+
615
+ if provider == "local":
616
+ model_path = self.model_path_var.get()
617
+ if not model_path or not Path(model_path).exists():
618
+ messagebox.showerror(tr("Error"), tr("Please select a valid model file!"))
619
+ else:
620
+ messagebox.showinfo(tr("Success"), tr("Model file found!"))
621
+ else:
622
+ # 外部APIのテスト
623
+ if provider in ["chatgpt", "openrouter"] and not self.api_key_var.get():
624
+ messagebox.showerror(tr("Error"), tr("API key is required for {}").format(provider))
625
+ return
626
+
627
+ # 実際のAPI接続テストを実装
628
+ self.test_connection_button.config(state="disabled", text=tr("Testing..."))
629
+
630
+ def test_api():
631
+ try:
632
+ if provider == "ollama":
633
+ from ..external_providers import OllamaProvider
634
+ api_provider = OllamaProvider(
635
+ base_url=self.base_url_var.get(),
636
+ model=self.external_model_var.get()
637
+ )
638
+ elif provider == "chatgpt":
639
+ from ..external_providers import ChatGPTProvider
640
+ api_provider = ChatGPTProvider(
641
+ api_key=self.api_key_var.get(),
642
+ model=self.external_model_var.get()
643
+ )
644
+ elif provider == "openrouter":
645
+ from ..external_providers import OpenRouterProvider
646
+ api_provider = OpenRouterProvider(
647
+ api_key=self.api_key_var.get(),
648
+ model=self.external_model_var.get()
649
+ )
650
+
651
+ result = api_provider.test_connection()
652
+ self.after(0, lambda: self._show_test_result(result))
653
+
654
+ except Exception as e:
655
+ import traceback
656
+ error_details = traceback.format_exc()
657
+ logger.error(f"Test connection error: {e}\n{error_details}")
658
+
659
+ # ユーザーフレンドリーなエラーメッセージ
660
+ from ..utils.error_messages import format_api_error
661
+ user_error = format_api_error(provider, e)
662
+
663
+ self.after(0, lambda: self._show_test_result({
664
+ "success": False,
665
+ "provider": provider,
666
+ "error": user_error
667
+ }))
668
+
669
+ import threading
670
+ thread = threading.Thread(target=test_api, daemon=True)
671
+ thread.start()
672
+
673
+ def _show_test_result(self, result: dict):
674
+ """接続テスト結果を表示"""
675
+ self.test_connection_button.config(state="normal", text=tr("Test Connection"))
676
+
677
+ if result["success"]:
678
+ # 表示用のプロバイダー名を取得
679
+ display_provider = result["provider"]
680
+ if display_provider == "Ollama":
681
+ display_provider = "Ollama/LM Studio"
682
+
683
+ if result["provider"] == "Ollama":
684
+ models = result.get("available_models", [])
685
+ model_info = f"\nModels: {len(models)}" if models else "\nNo models found"
686
+ messagebox.showinfo(
687
+ tr("Success"),
688
+ f"Connected to {display_provider} successfully!{model_info}"
689
+ )
690
+ else:
691
+ messagebox.showinfo(
692
+ tr("Success"),
693
+ f"Connected to {display_provider} successfully!"
694
+ )
695
+ else:
696
+ # 表示用のプロバイダー名を取得
697
+ display_provider = result.get("provider", "Unknown")
698
+ if display_provider == "Ollama":
699
+ display_provider = "Ollama/LM Studio"
700
+
701
+ messagebox.showerror(
702
+ tr("Error"),
703
+ f"Failed to connect to {display_provider}: {result.get('error', 'Unknown error')}"
704
+ )
705
+
706
+ def _edit_custom_prompt(self):
707
+ """カスタムプロンプトを編集"""
708
+ from .custom_prompt_dialog import CustomPromptDialog
709
+ current_prompt = self.workbench.get_option("llm.custom_prompt", "")
710
+ dialog = CustomPromptDialog(self, current_prompt)
711
+ self.wait_window(dialog)
712
+
713
+ if hasattr(dialog, 'result'):
714
+ self.custom_prompt = dialog.result
715
+ self.prompt_type_var.set("custom")
716
+
717
+ def _load_settings(self):
718
+ """設定を読み込む"""
719
+ # 基本設定
720
+ provider = self.workbench.get_option("llm.provider", "local")
721
+ # ollamaの場合は表示用にollama/lmstudioに変換
722
+ if provider == "ollama":
723
+ provider = "ollama/lmstudio"
724
+ self.provider_var.set(provider)
725
+ self.model_path_var.set(self.workbench.get_option("llm.model_path", ""))
726
+ self.output_language_var.set(self.workbench.get_option("llm.output_language", "auto"))
727
+ self.skill_level_var.set(self.workbench.get_option("llm.skill_level", "beginner"))
728
+
729
+ # 生成設定
730
+ self.temperature_var.set(self.workbench.get_option("llm.temperature", 0.3))
731
+ self.max_tokens_var.set(self.workbench.get_option("llm.max_tokens", 2048))
732
+ self.context_size_var.set(self.workbench.get_option("llm.context_size", 4096))
733
+ self.repeat_penalty_var.set(self.workbench.get_option("llm.repeat_penalty", 1.1))
734
+
735
+ # 詳細設定
736
+ # プロバイダーに応じて適切なAPIキーを読み込む
737
+ provider = self.provider_var.get()
738
+ from ..utils.constants import ProviderConstants
739
+ api_key_option = ProviderConstants.API_KEY_OPTIONS.get(provider)
740
+ if api_key_option:
741
+ self.api_key_var.set(self.workbench.get_option(api_key_option, ""))
742
+ else:
743
+ self.api_key_var.set("")
744
+
745
+ base_url = self.workbench.get_option("llm.base_url", "http://localhost:11434")
746
+ self.base_url_var.set(base_url)
747
+
748
+ # Base URLからHost/Portを抽出(urllib.parseを使用して安全に解析)
749
+ try:
750
+ from urllib.parse import urlparse
751
+
752
+ parsed = urlparse(base_url)
753
+
754
+ # ホスト名の取得
755
+ host = parsed.hostname or "localhost"
756
+ self.ollama_host_var.set(host)
757
+
758
+ # ポート番号の取得
759
+ if parsed.port:
760
+ self.ollama_port_var.set(str(parsed.port))
761
+ else:
762
+ # デフォルトポート
763
+ if base_url.endswith(":1234") or ":1234/" in base_url:
764
+ self.ollama_port_var.set("1234")
765
+ else:
766
+ self.ollama_port_var.set("11434")
767
+
768
+ except Exception as e:
769
+ import traceback
770
+ logger.error(f"Error parsing base URL: {e}\n{traceback.format_exc()}")
771
+ logger.error(f"Failed to parse URL: {base_url}")
772
+ # フォールバック値
773
+ self.ollama_host_var.set("localhost")
774
+ self.ollama_port_var.set("11434")
775
+
776
+ self.external_model_var.set(self.workbench.get_option("llm.external_model", "gpt-4o-mini"))
777
+ self.prompt_type_var.set(self.workbench.get_option("llm.prompt_type", "default"))
778
+
779
+ # カスタムプロンプト
780
+ self.custom_prompt = self.workbench.get_option("llm.custom_prompt", "")
781
+
782
+ # UI更新
783
+ self._update_language_label()
784
+ self._update_skill_label()
785
+ self._update_model_filename_label()
786
+ self.temp_label.config(text=f"{self.temperature_var.get():.1f}")
787
+ self.repeat_label.config(text=f"{self.repeat_penalty_var.get():.2f}")
788
+
789
+ def _save_settings(self):
790
+ """設定を保存"""
791
+ # 検証
792
+ provider = self.provider_var.get()
793
+
794
+ # 保存時はollama/lmstudioをollamaとして保存
795
+ save_provider = provider
796
+ if provider == "ollama/lmstudio":
797
+ save_provider = "ollama"
798
+
799
+ if provider == "local":
800
+ model_path = self.model_path_var.get()
801
+ if model_path and not Path(model_path).exists():
802
+ messagebox.showerror(tr("Error"), tr("Model file does not exist!"))
803
+ return
804
+ else:
805
+ # 外部プロバイダーの検証
806
+ if provider in ["chatgpt", "openrouter"] and not self.api_key_var.get():
807
+ messagebox.showerror(tr("Error"), tr("API key is required for {}").format(provider))
808
+ return
809
+
810
+ # 保存
811
+ self.workbench.set_option("llm.provider", save_provider)
812
+ self.workbench.set_option("llm.model_path", self.model_path_var.get())
813
+ self.workbench.set_option("llm.output_language", self.output_language_var.get())
814
+ self.workbench.set_option("llm.skill_level", self.skill_level_var.get())
815
+
816
+ self.workbench.set_option("llm.temperature", self.temperature_var.get())
817
+ self.workbench.set_option("llm.max_tokens", self.max_tokens_var.get())
818
+ self.workbench.set_option("llm.context_size", self.context_size_var.get())
819
+ self.workbench.set_option("llm.repeat_penalty", self.repeat_penalty_var.get())
820
+
821
+ # プロバイダーに応じて適切なAPIキーを保存
822
+ api_key_option = ProviderConstants.API_KEY_OPTIONS.get(provider)
823
+ if api_key_option:
824
+ self.workbench.set_option(api_key_option, self.api_key_var.get())
825
+
826
+ self.workbench.set_option("llm.base_url", self.base_url_var.get())
827
+ self.workbench.set_option("llm.external_model", self.external_model_var.get())
828
+ self.workbench.set_option("llm.prompt_type", self.prompt_type_var.get())
829
+
830
+ if hasattr(self, 'custom_prompt'):
831
+ self.workbench.set_option("llm.custom_prompt", self.custom_prompt)
832
+
833
+ self.settings_changed = True
834
+ self.destroy()
835
+
836
+ def _fetch_ollama_models(self):
837
+ """Ollamaからモデルリストを取得"""
838
+ try:
839
+ # 現在の設定を一時的に保存
840
+ current_model = self.external_model_var.get()
841
+
842
+ # ボタンを無効化
843
+ self.refresh_ollama_button.config(state="disabled", text=tr("Loading..."))
844
+
845
+ # OllamaProviderを使ってモデルを取得
846
+ from ..external_providers import OllamaProvider
847
+ base_url = self.base_url_var.get()
848
+
849
+ # バックグラウンドで取得
850
+ def fetch_models():
851
+ try:
852
+ provider = OllamaProvider(base_url=base_url)
853
+ models = provider.get_models()
854
+
855
+ # UIスレッドで更新
856
+ self.after(0, lambda: self._update_ollama_models(models, current_model))
857
+ except urllib.error.URLError as e:
858
+ import traceback
859
+ logger.error(f"Failed to connect to Ollama: {e}\n{traceback.format_exc()}")
860
+ error_msg = tr("Cannot connect to server. Please check if Ollama/LM Studio is running.")
861
+ self.after(0, lambda: self._update_ollama_models([], current_model, error=error_msg))
862
+ except Exception as e:
863
+ import traceback
864
+ logger.error(f"Failed to fetch Ollama models: {e}\n{traceback.format_exc()}")
865
+ self.after(0, lambda: self._update_ollama_models([], current_model, error=str(e)))
866
+
867
+ import threading
868
+ thread = threading.Thread(target=fetch_models, daemon=True)
869
+ thread.start()
870
+
871
+ except Exception as e:
872
+ import traceback
873
+ logger.error(f"Error in _fetch_ollama_models: {e}\n{traceback.format_exc()}")
874
+ messagebox.showerror(tr("Error"), tr("Failed to fetch models: {}").format(str(e)))
875
+ self.refresh_ollama_button.config(state="normal", text=tr("Refresh"))
876
+
877
+ def _update_ollama_models(self, models: list, current_model: str, error: Optional[str] = None):
878
+ """Ollamaモデルリストを更新"""
879
+ try:
880
+ # ボタンを有効化
881
+ self.refresh_ollama_button.config(state="normal", text=tr("Refresh"))
882
+
883
+ if error:
884
+ # 初期化中でなければエラーを表示
885
+ if hasattr(self, '_initialization_complete'):
886
+ messagebox.showerror(tr("Error"), tr("Failed to connect to Ollama: {}").format(error))
887
+ else:
888
+ logger.warning(f"Failed to connect to Ollama during initialization: {error}")
889
+ self.external_model_combo['values'] = []
890
+ return
891
+
892
+ if not models:
893
+ # モデルがない場合
894
+ if hasattr(self, '_initialization_complete'):
895
+ messagebox.showwarning(
896
+ tr("No Models"),
897
+ tr("No models found in Ollama. Please pull a model first using 'ollama pull <model>'")
898
+ )
899
+ self.external_model_combo['values'] = []
900
+ return
901
+
902
+ # モデルリストを更新
903
+ self.external_model_combo['values'] = models
904
+
905
+ # 現在のモデルがリストにある場合は選択を維持
906
+ if current_model in models:
907
+ self.external_model_var.set(current_model)
908
+ else:
909
+ # ない場合は最初のモデルを選択
910
+ self.external_model_var.set(models[0])
911
+
912
+ except Exception as e:
913
+ logger.error(f"Error updating Ollama models: {e}")
914
+
915
+ def _set_ollama_defaults(self):
916
+ """Ollamaのデフォルト設定を適用"""
917
+ self.ollama_host_var.set("localhost")
918
+ self.ollama_port_var.set("11434")
919
+ # モデルリストを再取得
920
+ self._fetch_ollama_models()
921
+
922
+ def _set_lmstudio_defaults(self):
923
+ """LM Studioのデフォルト設定を適用"""
924
+ self.ollama_host_var.set("localhost")
925
+ self.ollama_port_var.set("1234")
926
+ # モデルリストを再取得
927
+ self._fetch_ollama_models()
928
+
929
+ def _update_model_filename_label(self, *args):
930
+ """モデルファイル名ラベルを更新"""
931
+ try:
932
+ path = self.model_path_var.get().strip()
933
+ if path:
934
+ # パスからファイル名を抽出
935
+ filename = Path(path).name
936
+ # ファイルサイズも表示(存在する場合)
937
+ if Path(path).exists():
938
+ size_bytes = Path(path).stat().st_size
939
+ # サイズを人間が読みやすい形式に変換
940
+ if size_bytes < 1024:
941
+ size_str = f"{size_bytes} B"
942
+ elif size_bytes < 1024 * 1024:
943
+ size_str = f"{size_bytes / 1024:.1f} KB"
944
+ elif size_bytes < 1024 * 1024 * 1024:
945
+ size_str = f"{size_bytes / (1024 * 1024):.1f} MB"
946
+ else:
947
+ size_str = f"{size_bytes / (1024 * 1024 * 1024):.1f} GB"
948
+
949
+ self.model_filename_label.config(
950
+ text=f"📄 {filename} ({size_str})",
951
+ foreground="blue"
952
+ )
953
+ else:
954
+ self.model_filename_label.config(
955
+ text=f"⚠️ {filename} " + tr("(File not found)"),
956
+ foreground="red"
957
+ )
958
+ else:
959
+ self.model_filename_label.config(text="", foreground="gray")
960
+ except Exception as e:
961
+ import traceback
962
+ logger.error(f"Error updating filename label: {e}\n{traceback.format_exc()}")
963
+ logger.error(f"Path that caused error: {self.model_path_var.get()}")
964
+ self.model_filename_label.config(text="", foreground="gray")
965
+
966
+ def _update_base_url_from_host_port(self, *args):
967
+ """Host/PortからBase URLを更新"""
968
+ # 既に更新中の場合はスキップ(競合状態を防ぐ)
969
+ if hasattr(self, '_updating_base_url') and self._updating_base_url:
970
+ return
971
+
972
+ self._updating_base_url = True
973
+ try:
974
+ host = self.ollama_host_var.get().strip()
975
+ port = self.ollama_port_var.get().strip()
976
+
977
+ if host and port:
978
+ # Base URLを構築
979
+ new_url = f"http://{host}:{port}"
980
+ # 現在の値と異なる場合のみ更新
981
+ if self.base_url_var.get() != new_url:
982
+ self.base_url_var.set(new_url)
983
+ except Exception as e:
984
+ import traceback
985
+ logger.error(f"Error updating base URL: {e}\n{traceback.format_exc()}")
986
+ logger.error(f"Host: {self.ollama_host_var.get()}, Port: {self.ollama_port_var.get()}")
987
+ finally:
988
+ self._updating_base_url = False
989
+
990
+ def _on_base_url_changed(self, *args):
991
+ """Base URLが変更された時の処理"""
992
+ # Ollama/LM Studioが選択されている場合のみ
993
+ if self.provider_var.get() in ["ollama", "ollama/lmstudio"]:
994
+ # URLが変更されたらモデルリストをクリア
995
+ self.external_model_combo['values'] = []
996
+ self.external_model_var.set("")
997
+
998
+ def _create_tooltip(self, widget, text=None, dynamic=False):
999
+ """静的または動的なツールチップを作成
1000
+
1001
+ Args:
1002
+ widget: ツールチップを追加するウィジェット
1003
+ text: 静的ツールチップのテキスト(dynamicがFalseの場合に使用)
1004
+ dynamic: Trueの場合、ウィジェットのget()メソッドから動的にテキストを取得
1005
+ """
1006
+ def on_enter(event):
1007
+ # ツールチップテキストを決定
1008
+ tooltip_text = None
1009
+ if dynamic and hasattr(widget, 'get'):
1010
+ tooltip_text = widget.get()
1011
+ else:
1012
+ tooltip_text = text
1013
+
1014
+ # テキストがある場合のみツールチップを表示
1015
+ if tooltip_text:
1016
+ tooltip = tk.Toplevel()
1017
+ tooltip.wm_overrideredirect(True)
1018
+ tooltip.wm_geometry(f"+{event.x_root+10}+{event.y_root+10}")
1019
+ label = ttk.Label(tooltip, text=tooltip_text, relief="solid", borderwidth=1)
1020
+ label.pack()
1021
+ widget.tooltip = tooltip
1022
+
1023
+ def on_leave(event):
1024
+ if hasattr(widget, 'tooltip'):
1025
+ widget.tooltip.destroy()
1026
+ del widget.tooltip
1027
+
1028
+ widget.bind("<Enter>", on_enter)
1029
+ widget.bind("<Leave>", on_leave)
1030
+
1031
+
1032
+ # _create_dynamic_tooltipは_create_tooltipに統合されたため削除
1033
+
1034
+ def _get_model_max_context_size(self, provider: str, model_name: str = "") -> int:
1035
+ """モデルの最大コンテキストサイズを取得"""
1036
+ # ローカルモデルの場合
1037
+ if provider == "local":
1038
+ model_path = self.model_path_var.get()
1039
+ if not model_path:
1040
+ return 4096 # デフォルト値
1041
+
1042
+ # GGUFファイルから直接メタデータを読み取る
1043
+ try:
1044
+ from llama_cpp import Llama, llama_model_n_ctx_train
1045
+
1046
+ # モデルを最小限の設定で読み込む(n_ctx=0でGGUFのデフォルト値を使用)
1047
+ llm = Llama(model_path=model_path, n_ctx=0, verbose=False)
1048
+
1049
+ # トレーニング時のコンテキスト長を取得
1050
+ n_ctx_train = llama_model_n_ctx_train(llm.model)
1051
+ logger.info(f"Found training context length in GGUF: {n_ctx_train}")
1052
+ return int(n_ctx_train)
1053
+
1054
+ except ImportError:
1055
+ raise ImportError("llama_cpp not available. Please install or upgrade llama-cpp-python>=0.3.9: pip install 'llama-cpp-python>=0.3.9'")
1056
+ except Exception as e:
1057
+ raise Exception(f"Error reading GGUF metadata from {Path(model_path).name}: {str(e)}")
1058
+
1059
+ # OpenAI API の場合
1060
+ elif provider == "openai":
1061
+ if not model_name:
1062
+ model_name = self.external_model_var.get()
1063
+
1064
+ # ChatGPTプロバイダーからモデル情報を取得
1065
+ try:
1066
+ from ..external_providers import ChatGPTProvider
1067
+ chatgpt_provider = ChatGPTProvider(
1068
+ api_key=self.api_key_var.get(),
1069
+ model=model_name,
1070
+ base_url=self.base_url_var.get() if hasattr(self, 'base_url_var') else None
1071
+ )
1072
+
1073
+ model_info = chatgpt_provider.get_model_info(model_name)
1074
+ context_size = model_info.get("context_size")
1075
+
1076
+ if context_size:
1077
+ logger.info(f"Found context size from ChatGPT provider: {context_size}")
1078
+ return int(context_size)
1079
+ else:
1080
+ logger.warning(f"Could not get context size from ChatGPT provider: {model_info.get('error', 'Unknown error')}")
1081
+
1082
+ except Exception as e:
1083
+ logger.warning(f"Error getting context size from ChatGPT provider: {e}")
1084
+
1085
+ return 4096 # デフォルト
1086
+
1087
+ # LM Studio / Ollama の場合
1088
+ elif provider in ["ollama", "ollama/lmstudio"]:
1089
+ if not model_name:
1090
+ model_name = self.external_model_var.get()
1091
+
1092
+ # Ollama/LM Studio APIからモデル情報を取得
1093
+ try:
1094
+ from ..external_providers import OllamaProvider
1095
+ ollama_provider = OllamaProvider(
1096
+ base_url=self.base_url_var.get(),
1097
+ model=model_name
1098
+ )
1099
+
1100
+ model_info = ollama_provider.get_model_info(model_name)
1101
+ context_size = model_info.get("context_size")
1102
+
1103
+ # サーバータイプを判定してログメッセージを調整
1104
+ server_type = "LM Studio" if ":1234" in self.base_url_var.get() else "Ollama"
1105
+
1106
+ if context_size:
1107
+ logger.info(f"Found context size from {server_type} API: {context_size}")
1108
+ return int(context_size)
1109
+ else:
1110
+ logger.warning(f"Could not get context size from {server_type} API: {model_info.get('error', 'Unknown error')}")
1111
+
1112
+ except Exception as e:
1113
+ logger.warning(f"Error getting context size from Ollama/LM Studio API: {e}")
1114
+
1115
+ # フォールバック: モデル名からコンテキストサイズを推定
1116
+ model_lower = model_name.lower()
1117
+
1118
+ if "llama" in model_lower:
1119
+ if "3.2" in model_lower or "3.1" in model_lower:
1120
+ return 128000
1121
+ elif "3" in model_lower:
1122
+ return 8192
1123
+ else:
1124
+ return 4096
1125
+ elif "qwen" in model_lower:
1126
+ if "2.5" in model_lower:
1127
+ return 32768
1128
+ else:
1129
+ return 8192
1130
+ elif "gemma" in model_lower:
1131
+ return 8192
1132
+ elif "phi" in model_lower:
1133
+ return 4096
1134
+ elif "codellama" in model_lower:
1135
+ return 16384
1136
+ elif "mistral" in model_lower:
1137
+ return 32768
1138
+ elif "mixtral" in model_lower:
1139
+ return 32768
1140
+ else:
1141
+ return 4096 # デフォルト
1142
+
1143
+ # OpenRouter の場合
1144
+ elif provider == "openrouter":
1145
+ if not model_name:
1146
+ model_name = self.external_model_var.get()
1147
+
1148
+ # OpenRouter APIからモデル情報を取得
1149
+ try:
1150
+ from ..external_providers import OpenRouterProvider
1151
+ openrouter_provider = OpenRouterProvider(
1152
+ api_key=self.api_key_var.get(),
1153
+ model=model_name
1154
+ )
1155
+
1156
+ model_info = openrouter_provider.get_model_info(model_name)
1157
+ context_size = model_info.get("context_size")
1158
+
1159
+ if context_size:
1160
+ logger.info(f"Found context size from OpenRouter API: {context_size}")
1161
+ return int(context_size)
1162
+ else:
1163
+ logger.warning(f"Could not get context size from OpenRouter API: {model_info.get('error', 'Unknown error')}")
1164
+
1165
+ except Exception as e:
1166
+ logger.warning(f"Error getting context size from OpenRouter API: {e}")
1167
+
1168
+ # フォールバック: デフォルト値
1169
+ return 4096
1170
+
1171
+ return 4096 # デフォルト値
1172
+
1173
+ def _auto_set_context_size(self):
1174
+ """現在の設定に基づいてコンテキストサイズを自動設定"""
1175
+ try:
1176
+ provider = self.provider_var.get()
1177
+
1178
+ if provider == "local":
1179
+ model_path = self.model_path_var.get()
1180
+ if not model_path:
1181
+ messagebox.showwarning(
1182
+ tr("No Model Selected"),
1183
+ tr("Please select a local model first.")
1184
+ )
1185
+ return
1186
+ elif provider in ["openai", "ollama", "ollama/lmstudio"]:
1187
+ model_name = self.external_model_var.get()
1188
+ if not model_name:
1189
+ messagebox.showwarning(
1190
+ tr("No Model Selected"),
1191
+ tr("Please select a model first.")
1192
+ )
1193
+ return
1194
+
1195
+ # 最大コンテキストサイズを取得
1196
+ max_context = self._get_model_max_context_size(provider)
1197
+
1198
+ # 設定を更新
1199
+ self.context_size_var.set(max_context)
1200
+
1201
+ # ユーザーに通知
1202
+ model_info = ""
1203
+ if provider == "local":
1204
+ model_info = Path(self.model_path_var.get()).name
1205
+ else:
1206
+ model_info = self.external_model_var.get()
1207
+
1208
+ messagebox.showinfo(
1209
+ tr("Context Size Updated"),
1210
+ tr(f"Context size automatically set to {max_context:,} tokens for {model_info}")
1211
+ )
1212
+
1213
+ except Exception as e:
1214
+ logger.error(f"Error in auto context size setting: {e}")
1215
+ messagebox.showerror(
1216
+ tr("Error"),
1217
+ tr(f"Failed to auto-set context size: {str(e)}")
1218
+ )