thonny-codemate 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. thonny_codemate-0.1.0.dist-info/METADATA +307 -0
  2. thonny_codemate-0.1.0.dist-info/RECORD +27 -0
  3. thonny_codemate-0.1.0.dist-info/WHEEL +5 -0
  4. thonny_codemate-0.1.0.dist-info/licenses/LICENSE +21 -0
  5. thonny_codemate-0.1.0.dist-info/top_level.txt +1 -0
  6. thonnycontrib/__init__.py +1 -0
  7. thonnycontrib/thonny_codemate/__init__.py +397 -0
  8. thonnycontrib/thonny_codemate/api.py +154 -0
  9. thonnycontrib/thonny_codemate/context_manager.py +296 -0
  10. thonnycontrib/thonny_codemate/external_providers.py +714 -0
  11. thonnycontrib/thonny_codemate/i18n.py +506 -0
  12. thonnycontrib/thonny_codemate/llm_client.py +841 -0
  13. thonnycontrib/thonny_codemate/message_virtualization.py +136 -0
  14. thonnycontrib/thonny_codemate/model_manager.py +515 -0
  15. thonnycontrib/thonny_codemate/performance_monitor.py +141 -0
  16. thonnycontrib/thonny_codemate/prompts.py +102 -0
  17. thonnycontrib/thonny_codemate/ui/__init__.py +1 -0
  18. thonnycontrib/thonny_codemate/ui/chat_view.py +687 -0
  19. thonnycontrib/thonny_codemate/ui/chat_view_html.py +1299 -0
  20. thonnycontrib/thonny_codemate/ui/custom_prompt_dialog.py +175 -0
  21. thonnycontrib/thonny_codemate/ui/markdown_renderer.py +484 -0
  22. thonnycontrib/thonny_codemate/ui/model_download_dialog.py +355 -0
  23. thonnycontrib/thonny_codemate/ui/settings_dialog.py +1218 -0
  24. thonnycontrib/thonny_codemate/utils/__init__.py +25 -0
  25. thonnycontrib/thonny_codemate/utils/constants.py +138 -0
  26. thonnycontrib/thonny_codemate/utils/error_messages.py +92 -0
  27. thonnycontrib/thonny_codemate/utils/unified_error_handler.py +310 -0
@@ -0,0 +1,136 @@
1
+ """
2
+ メッセージの仮想化によるパフォーマンス最適化
3
+ 大量のメッセージがある場合でも高速にレンダリング
4
+ """
5
+ from typing import List, Tuple, Optional
6
+ import math
7
+
8
+
9
+ class MessageVirtualizer:
10
+ """
11
+ メッセージリストの仮想化を管理するクラス
12
+ 画面に表示される範囲のメッセージのみをレンダリング
13
+ """
14
+
15
+ def __init__(self, viewport_height: int = 600, message_height: int = 80):
16
+ """
17
+ Args:
18
+ viewport_height: ビューポートの高さ(ピクセル)
19
+ message_height: 1メッセージの平均高さ(ピクセル)
20
+ """
21
+ self.viewport_height = viewport_height
22
+ self.message_height = message_height
23
+ self.scroll_position = 0
24
+ self.visible_range = 5 # 前後に余分にレンダリングする数
25
+
26
+ def get_visible_messages(
27
+ self,
28
+ messages: List[Tuple[str, str]],
29
+ force_bottom: bool = False
30
+ ) -> Tuple[List[Tuple[int, str, str]], int, int]:
31
+ """
32
+ 表示すべきメッセージを計算
33
+
34
+ Args:
35
+ messages: 全メッセージリスト
36
+ force_bottom: 最下部にスクロールするか
37
+
38
+ Returns:
39
+ (表示するメッセージのリスト[(index, sender, text)], 開始インデックス, 終了インデックス)
40
+ """
41
+ if not messages:
42
+ return [], 0, 0
43
+
44
+ total_messages = len(messages)
45
+ messages_per_viewport = math.ceil(self.viewport_height / self.message_height)
46
+
47
+ if force_bottom:
48
+ # 最下部を表示
49
+ end_index = total_messages
50
+ start_index = max(0, end_index - messages_per_viewport - self.visible_range)
51
+ else:
52
+ # 現在のスクロール位置から計算
53
+ first_visible = int(self.scroll_position / self.message_height)
54
+ start_index = max(0, first_visible - self.visible_range)
55
+ end_index = min(
56
+ total_messages,
57
+ first_visible + messages_per_viewport + self.visible_range
58
+ )
59
+
60
+ # 表示するメッセージを抽出
61
+ visible_messages = []
62
+ for i in range(start_index, end_index):
63
+ sender, text = messages[i]
64
+ visible_messages.append((i, sender, text))
65
+
66
+ return visible_messages, start_index, end_index
67
+
68
+ def update_scroll_position(self, position: int):
69
+ """スクロール位置を更新"""
70
+ self.scroll_position = max(0, position)
71
+
72
+ def get_total_height(self, message_count: int) -> int:
73
+ """全メッセージの高さを計算"""
74
+ return message_count * self.message_height
75
+
76
+ def should_virtualize(self, message_count: int) -> bool:
77
+ """仮想化が必要かどうかを判定"""
78
+ # 100件以上のメッセージがある場合は仮想化を推奨
79
+ return message_count > 100
80
+
81
+ def get_placeholder_html(self, start_index: int, end_index: int, total_count: int) -> str:
82
+ """表示範囲外のメッセージ用のプレースホルダーHTML"""
83
+ before_height = start_index * self.message_height
84
+ after_height = (total_count - end_index) * self.message_height
85
+
86
+ html = ""
87
+ if before_height > 0:
88
+ html += f'<div style="height: {before_height}px;"></div>'
89
+
90
+ # ここに実際のメッセージが入る
91
+
92
+ if after_height > 0:
93
+ html += f'<div style="height: {after_height}px;"></div>'
94
+
95
+ return html
96
+
97
+
98
+ class MessageCache:
99
+ """レンダリング済みHTMLのキャッシュ"""
100
+
101
+ def __init__(self, max_size: int = 200):
102
+ self.cache = {}
103
+ self.max_size = max_size
104
+ self.access_order = []
105
+
106
+ def get(self, key: str) -> Optional[str]:
107
+ """キャッシュからHTMLを取得"""
108
+ if key in self.cache:
109
+ # アクセス順を更新
110
+ self.access_order.remove(key)
111
+ self.access_order.append(key)
112
+ return self.cache[key]
113
+ return None
114
+
115
+ def set(self, key: str, html: str):
116
+ """キャッシュにHTMLを保存"""
117
+ if key in self.cache:
118
+ self.access_order.remove(key)
119
+ elif len(self.cache) >= self.max_size:
120
+ # 最も古いアイテムを削除
121
+ oldest = self.access_order.pop(0)
122
+ del self.cache[oldest]
123
+
124
+ self.cache[key] = html
125
+ self.access_order.append(key)
126
+
127
+ def clear(self):
128
+ """キャッシュをクリア"""
129
+ self.cache.clear()
130
+ self.access_order.clear()
131
+
132
+ def invalidate(self, key: str):
133
+ """特定のキャッシュを無効化"""
134
+ if key in self.cache:
135
+ del self.cache[key]
136
+ self.access_order.remove(key)
@@ -0,0 +1,515 @@
1
+ """
2
+ モデル管理モジュール
3
+ 推奨モデルのダウンロードと管理機能を提供
4
+ """
5
+ import os
6
+
7
+ # huggingface_hubのロギングを環境変数で無効化
8
+ os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1"
9
+ os.environ["HF_HUB_DISABLE_IMPLICIT_TOKEN"] = "1"
10
+
11
+ import threading
12
+ from pathlib import Path
13
+ from typing import Dict, List, Optional, Callable
14
+ from dataclasses import dataclass
15
+
16
+ # ロギングを無効化(Thonny環境での問題を回避)
17
+ import logging
18
+ logging.disable(logging.CRITICAL)
19
+
20
+ # huggingface_hubのロギングも無効化
21
+ logging.getLogger("huggingface_hub").setLevel(logging.ERROR)
22
+ logging.getLogger("huggingface_hub.file_download").setLevel(logging.ERROR)
23
+ logging.getLogger("urllib3").setLevel(logging.ERROR)
24
+ logging.getLogger("requests").setLevel(logging.ERROR)
25
+
26
+ # すべてのロガーにNullHandlerを設定
27
+ for logger_name in ["huggingface_hub", "huggingface_hub.file_download", "urllib3", "requests"]:
28
+ logger = logging.getLogger(logger_name)
29
+ logger.handlers = []
30
+ logger.addHandler(logging.NullHandler())
31
+
32
+ # 推奨モデルの定義
33
+ RECOMMENDED_MODELS = {
34
+ # Llama 3.2シリーズ(Meta公式の最新モデル)
35
+ "llama3.2-1b": {
36
+ "name": "Llama-3.2-1B-Instruct-Q4_K_M.gguf",
37
+ "repo_id": "bartowski/Llama-3.2-1B-Instruct-GGUF",
38
+ "filename": "Llama-3.2-1B-Instruct-Q4_K_M.gguf",
39
+ "size": "0.8GB",
40
+ "description": "Llama 3.2 1B - 最新の軽量モデル。高速で効率的。",
41
+ "languages": ["en", "multi"]
42
+ },
43
+ "llama3.2-3b": {
44
+ "name": "Llama-3.2-3B-Instruct-Q4_K_M.gguf",
45
+ "repo_id": "bartowski/Llama-3.2-3B-Instruct-GGUF",
46
+ "filename": "Llama-3.2-3B-Instruct-Q4_K_M.gguf",
47
+ "size": "2.0GB",
48
+ "description": "Llama 3.2 3B - バランスの良いモデル。品質と速度の両立。",
49
+ "languages": ["en", "multi"]
50
+ },
51
+ "llama3.1-8b": {
52
+ "name": "Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf",
53
+ "repo_id": "bartowski/Meta-Llama-3.1-8B-Instruct-GGUF",
54
+ "filename": "Meta-Llama-3.1-8B-Instruct-Q4_K_M.gguf",
55
+ "size": "4.9GB",
56
+ "description": "Llama 3.1 8B - 高性能モデル。GPT-3.5相当の能力。",
57
+ "languages": ["en", "multi"]
58
+ },
59
+ "llama3-elyza-jp-8b": {
60
+ "name": "Llama-3-ELYZA-JP-8B-q4_k_m.gguf",
61
+ "repo_id": "elyza/Llama-3-ELYZA-JP-8B-GGUF",
62
+ "filename": "Llama-3-ELYZA-JP-8B-q4_k_m.gguf",
63
+ "size": "4.9GB",
64
+ "description": "Llama 3 ELYZA JP 8B - 日本語に特化した高性能モデル。",
65
+ "languages": ["ja", "en"]
66
+ },
67
+ "qwen2.5-coder-14b": {
68
+ "name": "Qwen2.5-Coder-14B-Instruct-Q4_K_M.gguf",
69
+ "repo_id": "bartowski/Qwen2.5-Coder-14B-Instruct-GGUF",
70
+ "filename": "Qwen2.5-Coder-14B-Instruct-Q4_K_M.gguf",
71
+ "size": "8.8GB",
72
+ "description": "Qwen2.5 Coder 14B - プログラミングに特化した最新の高性能モデル。",
73
+ "languages": ["en", "zh", "multi", "code"]
74
+ },
75
+ # Gemma 3nモデルは現在llama-cpp-pythonが対応していないためコメントアウト
76
+ # llama-cpp-pythonが最新のllama.cppに追従したら有効化する
77
+ # "gemma-3n-e4b": {
78
+ # "name": "gemma-3n-E4B-it-Q4_K_M.gguf",
79
+ # "repo_id": "tripolskypetr/gemma-3n-e4b-it",
80
+ # "filename": "gemma-3n-E4B-it-Q4_K_M.gguf",
81
+ # "size": "4.2GB",
82
+ # "description": "Gemma 3n E4B - Google DeepMindの最新軽量モデル。マルチモーダル対応。",
83
+ # "languages": ["en", "multi"]
84
+ # },
85
+ # "gemma-3n-e2b": {
86
+ # "name": "gemma-3n-E2B-it-Q4_K_M.gguf",
87
+ # "repo_id": "tripolskypetr/gemma-3n-e2b-it",
88
+ # "filename": "gemma-3n-E2B-it-Q4_K_M.gguf",
89
+ # "size": "2.8GB",
90
+ # "description": "Gemma 3n E2B - 小規模版の軽量モデル。低リソース環境に最適。",
91
+ # "languages": ["en", "multi"]
92
+ # },
93
+ }
94
+
95
+ @dataclass
96
+ class DownloadProgress:
97
+ """ダウンロード進捗情報"""
98
+ model_name: str
99
+ downloaded: int
100
+ total: int
101
+ status: str # "downloading", "completed", "error"
102
+ error_message: Optional[str] = None
103
+ speed: float = 0.0 # bytes per second
104
+ eta: int = 0 # estimated time remaining in seconds
105
+
106
+ @property
107
+ def percentage(self) -> float:
108
+ """ダウンロード進捗率を取得"""
109
+ if self.total > 0:
110
+ return (self.downloaded / self.total) * 100
111
+ return 0.0
112
+
113
+ @property
114
+ def speed_str(self) -> str:
115
+ """人間が読みやすい速度表示を取得"""
116
+ if self.speed < 1024:
117
+ return f"{self.speed:.0f} B/s"
118
+ elif self.speed < 1024 * 1024:
119
+ return f"{self.speed / 1024:.1f} KB/s"
120
+ else:
121
+ return f"{self.speed / (1024 * 1024):.1f} MB/s"
122
+
123
+ @property
124
+ def eta_str(self) -> str:
125
+ """人間が読みやすい残り時間表示を取得"""
126
+ if self.eta <= 0:
127
+ return "Calculating..."
128
+ elif self.eta < 60:
129
+ return f"{self.eta}s"
130
+ elif self.eta < 3600:
131
+ return f"{self.eta // 60}m {self.eta % 60}s"
132
+ else:
133
+ hours = self.eta // 3600
134
+ minutes = (self.eta % 3600) // 60
135
+ return f"{hours}h {minutes}m"
136
+
137
+ @property
138
+ def size_str(self) -> str:
139
+ """人間が読みやすいサイズ表示を取得"""
140
+ def format_size(size):
141
+ if size < 1024:
142
+ return f"{size} B"
143
+ elif size < 1024 * 1024:
144
+ return f"{size / 1024:.1f} KB"
145
+ elif size < 1024 * 1024 * 1024:
146
+ return f"{size / (1024 * 1024):.1f} MB"
147
+ else:
148
+ return f"{size / (1024 * 1024 * 1024):.2f} GB"
149
+
150
+ if self.total > 0:
151
+ return f"{format_size(self.downloaded)} / {format_size(self.total)}"
152
+ else:
153
+ return format_size(self.downloaded)
154
+
155
+
156
+ class ModelManager:
157
+ """モデルのダウンロードと管理を行うクラス"""
158
+
159
+ def __init__(self, models_dir: Optional[Path] = None):
160
+ if models_dir is None:
161
+ # デフォルトはプロジェクトのmodelsディレクトリ
162
+ self.models_dir = Path(__file__).parent.parent.parent / "models"
163
+ else:
164
+ self.models_dir = Path(models_dir)
165
+
166
+ # モデルディレクトリを作成
167
+ self.models_dir.mkdir(parents=True, exist_ok=True)
168
+
169
+ # ダウンロード状態
170
+ self._downloading = {}
171
+ self._download_callbacks = {}
172
+
173
+ def get_models_dir(self) -> Path:
174
+ """モデルディレクトリのパスを取得"""
175
+ return self.models_dir
176
+
177
+ def list_available_models(self) -> List[Dict]:
178
+ """利用可能なモデルのリストを取得"""
179
+ models = []
180
+
181
+ # 推奨モデルの情報を追加
182
+ for key, model_info in RECOMMENDED_MODELS.items():
183
+ model_path = self.models_dir / model_info["filename"]
184
+ model_data = {
185
+ "key": key,
186
+ "name": model_info["name"],
187
+ "description": model_info["description"],
188
+ "size": model_info["size"],
189
+ "languages": model_info.get("languages", ["en"]),
190
+ "path": str(model_path),
191
+ "installed": model_path.exists(),
192
+ "downloading": key in self._downloading
193
+ }
194
+ models.append(model_data)
195
+
196
+ # カスタムモデル(ディレクトリ内の他のGGUFファイル)も追加
197
+ for gguf_file in self.models_dir.glob("*.gguf"):
198
+ # 推奨モデルでない場合
199
+ if not any(gguf_file.name == m["filename"] for m in RECOMMENDED_MODELS.values()):
200
+ models.append({
201
+ "key": f"custom_{gguf_file.stem}",
202
+ "name": gguf_file.name,
203
+ "description": "Custom model",
204
+ "size": f"{gguf_file.stat().st_size / 1024 / 1024 / 1024:.1f}GB",
205
+ "path": str(gguf_file),
206
+ "installed": True,
207
+ "downloading": False
208
+ })
209
+
210
+ return models
211
+
212
+ def get_model_path(self, model_key: str = "llama3.2-1b") -> Optional[str]:
213
+ """
214
+ 指定されたモデルのパスを取得
215
+
216
+ Args:
217
+ model_key: モデルのキー("llama3.2-1b"など)
218
+
219
+ Returns:
220
+ モデルファイルのパス(存在する場合)
221
+ """
222
+ if model_key in RECOMMENDED_MODELS:
223
+ model_info = RECOMMENDED_MODELS[model_key]
224
+ model_path = self.models_dir / model_info["filename"]
225
+ if model_path.exists():
226
+ return str(model_path)
227
+
228
+ # フォールバック:任意のGGUFファイルを返す
229
+ for gguf_file in self.models_dir.glob("*.gguf"):
230
+ return str(gguf_file)
231
+
232
+ return None
233
+
234
+ def download_model(self, model_key: str, progress_callback: Optional[Callable[[DownloadProgress], None]] = None):
235
+ """
236
+ モデルをダウンロード
237
+
238
+ Args:
239
+ model_key: RECOMMENDED_MODELSのキー
240
+ progress_callback: 進捗コールバック関数
241
+ """
242
+ if model_key not in RECOMMENDED_MODELS:
243
+ raise ValueError(f"Unknown model key: {model_key}")
244
+
245
+ if model_key in self._downloading:
246
+ # すでにダウンロード中
247
+ return
248
+
249
+ model_info = RECOMMENDED_MODELS[model_key]
250
+
251
+ # バックグラウンドでダウンロード
252
+ thread = threading.Thread(
253
+ target=self._download_model_thread,
254
+ args=(model_key, model_info, progress_callback),
255
+ daemon=True
256
+ )
257
+ thread.start()
258
+
259
+ def _download_model_thread(self, model_key: str, model_info: Dict, progress_callback: Optional[Callable]):
260
+ """モデルをダウンロードするスレッド関数"""
261
+ self._downloading[model_key] = True
262
+
263
+ try:
264
+ # huggingface_hubをインポート
265
+ try:
266
+ # インポート前に追加のロギング無効化
267
+ import sys
268
+ if hasattr(sys.stderr, 'write') and sys.stderr is None:
269
+ # sys.stderrがNoneの場合、ダミーのファイルオブジェクトを設定
270
+ import io
271
+ sys.stderr = io.StringIO()
272
+
273
+ from huggingface_hub import hf_hub_download
274
+ from huggingface_hub.utils import tqdm as hf_tqdm
275
+ except ImportError:
276
+ error_msg = "huggingface_hub is not installed. Please run: pip install huggingface-hub"
277
+ if progress_callback:
278
+ progress = DownloadProgress(
279
+ model_name=model_info["name"],
280
+ downloaded=0,
281
+ total=0,
282
+ status="error",
283
+ error_message=error_msg
284
+ )
285
+ progress_callback(progress)
286
+ return
287
+
288
+ # ダウンロード開始通知
289
+ if progress_callback:
290
+ progress = DownloadProgress(
291
+ model_name=model_info["name"],
292
+ downloaded=0,
293
+ total=0,
294
+ status="downloading"
295
+ )
296
+ progress_callback(progress)
297
+
298
+ # 進捗追跡用の変数
299
+ import time
300
+ last_update_time = time.time()
301
+ last_downloaded = 0
302
+
303
+ # カスタム進捗コールバック
304
+ def custom_progress_callback(progress_dict):
305
+ nonlocal last_update_time, last_downloaded
306
+
307
+ if progress_callback and progress_dict:
308
+ current_time = time.time()
309
+ time_diff = current_time - last_update_time
310
+
311
+ # 0.5秒ごとに更新(頻繁すぎる更新を防ぐ)
312
+ if time_diff >= 0.5:
313
+ downloaded = progress_dict.get("downloaded", 0)
314
+ total = progress_dict.get("total", 0)
315
+
316
+ # 速度計算
317
+ if time_diff > 0:
318
+ bytes_diff = downloaded - last_downloaded
319
+ speed = bytes_diff / time_diff
320
+ else:
321
+ speed = 0
322
+
323
+ # 残り時間計算
324
+ if speed > 0 and total > downloaded:
325
+ eta = int((total - downloaded) / speed)
326
+ else:
327
+ eta = 0
328
+
329
+ progress = DownloadProgress(
330
+ model_name=model_info["name"],
331
+ downloaded=downloaded,
332
+ total=total,
333
+ status="downloading",
334
+ speed=speed,
335
+ eta=eta
336
+ )
337
+ progress_callback(progress)
338
+
339
+ last_update_time = current_time
340
+ last_downloaded = downloaded
341
+
342
+ # ダウンロード実行
343
+ try:
344
+ # まずファイルが既に存在するかチェック
345
+ target_path = self.models_dir / model_info["filename"]
346
+ if target_path.exists():
347
+ # 既に存在する場合は完了を通知
348
+ if progress_callback:
349
+ progress = DownloadProgress(
350
+ model_name=model_info["name"],
351
+ downloaded=100,
352
+ total=100,
353
+ status="completed"
354
+ )
355
+ progress_callback(progress)
356
+ return
357
+
358
+ # ロギング出力を一時的にキャプチャ
359
+ import io
360
+ import sys
361
+ old_stderr = sys.stderr
362
+ sys.stderr = io.StringIO()
363
+
364
+ try:
365
+ # URLベースのダウンロードを実装
366
+ import urllib.request
367
+ import tempfile
368
+ import shutil
369
+
370
+ # Hugging Face URLを構築
371
+ base_url = f"https://huggingface.co/{model_info['repo_id']}/resolve/main/{model_info['filename']}"
372
+
373
+ # 一時ファイルにダウンロード
374
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".gguf") as temp_file:
375
+ temp_path = temp_file.name
376
+
377
+ def download_with_progress(url, dest_path):
378
+ """進捗表示付きダウンロード"""
379
+ nonlocal last_update_time, last_downloaded
380
+
381
+ response = urllib.request.urlopen(url)
382
+ total_size = int(response.headers.get('Content-Length', 0))
383
+
384
+ # 初期進捗を送信
385
+ if progress_callback and total_size > 0:
386
+ progress = DownloadProgress(
387
+ model_name=model_info["name"],
388
+ downloaded=0,
389
+ total=total_size,
390
+ status="downloading"
391
+ )
392
+ progress_callback(progress)
393
+
394
+ block_size = 8192 # 8KB
395
+ downloaded = 0
396
+ last_update_time = time.time()
397
+ last_downloaded = 0
398
+
399
+ with open(dest_path, 'wb') as f:
400
+ while True:
401
+ buffer = response.read(block_size)
402
+ if not buffer:
403
+ break
404
+
405
+ f.write(buffer)
406
+ downloaded += len(buffer)
407
+
408
+ # 進捗を計算して送信
409
+ current_time = time.time()
410
+ time_diff = current_time - last_update_time
411
+
412
+ if time_diff >= 0.5 and progress_callback:
413
+ speed = (downloaded - last_downloaded) / time_diff if time_diff > 0 else 0
414
+ eta = int((total_size - downloaded) / speed) if speed > 0 else 0
415
+
416
+ progress = DownloadProgress(
417
+ model_name=model_info["name"],
418
+ downloaded=downloaded,
419
+ total=total_size,
420
+ status="downloading",
421
+ speed=speed,
422
+ eta=eta
423
+ )
424
+ progress_callback(progress)
425
+
426
+ last_update_time = current_time
427
+ last_downloaded = downloaded
428
+
429
+ try:
430
+ # ダウンロード実行
431
+ download_with_progress(base_url, temp_path)
432
+
433
+ # 成功したら正式な場所に移動
434
+ shutil.move(temp_path, str(target_path))
435
+
436
+ except urllib.error.HTTPError:
437
+ # Hugging Face APIが使えない場合は従来の方法にフォールバック
438
+ if Path(temp_path).exists():
439
+ Path(temp_path).unlink()
440
+
441
+ # hf_hub_downloadを使用
442
+ downloaded_path = hf_hub_download(
443
+ repo_id=model_info["repo_id"],
444
+ filename=model_info["filename"],
445
+ local_dir=str(self.models_dir),
446
+ force_download=False,
447
+ resume_download=True,
448
+ local_dir_use_symlinks=False
449
+ )
450
+
451
+ finally:
452
+ # stderrを復元
453
+ sys.stderr = old_stderr
454
+
455
+ except AttributeError as e:
456
+ if "'NoneType' object has no attribute 'write'" in str(e):
457
+ raise Exception("Logging error in huggingface_hub. This is a known issue in Thonny environment. Please try downloading the model manually.")
458
+ else:
459
+ raise
460
+
461
+ # ダウンロード完了
462
+
463
+ # 完了通知
464
+ if progress_callback:
465
+ progress = DownloadProgress(
466
+ model_name=model_info["name"],
467
+ downloaded=100,
468
+ total=100,
469
+ status="completed"
470
+ )
471
+ progress_callback(progress)
472
+
473
+ except Exception as e:
474
+ # エラー発生
475
+ import traceback
476
+ error_detail = f"{str(e)}\n\nDetails:\n{traceback.format_exc()}"
477
+ if progress_callback:
478
+ progress = DownloadProgress(
479
+ model_name=model_info["name"],
480
+ downloaded=0,
481
+ total=0,
482
+ status="error",
483
+ error_message=error_detail
484
+ )
485
+ progress_callback(progress)
486
+ finally:
487
+ self._downloading.pop(model_key, None)
488
+
489
+ def cancel_download(self, model_key: str):
490
+ """ダウンロードをキャンセル(現在は未実装)"""
491
+ # TODO: ダウンロードのキャンセル機能を実装
492
+ pass
493
+
494
+ def delete_model(self, model_path: str) -> bool:
495
+ """
496
+ モデルを削除
497
+
498
+ Args:
499
+ model_path: モデルファイルのパス
500
+
501
+ Returns:
502
+ 削除に成功したらTrue
503
+ """
504
+ try:
505
+ path = Path(model_path)
506
+ if path.exists() and path.parent == self.models_dir:
507
+ path.unlink()
508
+ # モデルを削除
509
+ return True
510
+ else:
511
+ # ファイルが見つからないか無効なパス
512
+ return False
513
+ except Exception as e:
514
+ # 削除に失敗
515
+ return False