thonny-codemate 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. thonny_codemate-0.1.0.dist-info/METADATA +307 -0
  2. thonny_codemate-0.1.0.dist-info/RECORD +27 -0
  3. thonny_codemate-0.1.0.dist-info/WHEEL +5 -0
  4. thonny_codemate-0.1.0.dist-info/licenses/LICENSE +21 -0
  5. thonny_codemate-0.1.0.dist-info/top_level.txt +1 -0
  6. thonnycontrib/__init__.py +1 -0
  7. thonnycontrib/thonny_codemate/__init__.py +397 -0
  8. thonnycontrib/thonny_codemate/api.py +154 -0
  9. thonnycontrib/thonny_codemate/context_manager.py +296 -0
  10. thonnycontrib/thonny_codemate/external_providers.py +714 -0
  11. thonnycontrib/thonny_codemate/i18n.py +506 -0
  12. thonnycontrib/thonny_codemate/llm_client.py +841 -0
  13. thonnycontrib/thonny_codemate/message_virtualization.py +136 -0
  14. thonnycontrib/thonny_codemate/model_manager.py +515 -0
  15. thonnycontrib/thonny_codemate/performance_monitor.py +141 -0
  16. thonnycontrib/thonny_codemate/prompts.py +102 -0
  17. thonnycontrib/thonny_codemate/ui/__init__.py +1 -0
  18. thonnycontrib/thonny_codemate/ui/chat_view.py +687 -0
  19. thonnycontrib/thonny_codemate/ui/chat_view_html.py +1299 -0
  20. thonnycontrib/thonny_codemate/ui/custom_prompt_dialog.py +175 -0
  21. thonnycontrib/thonny_codemate/ui/markdown_renderer.py +484 -0
  22. thonnycontrib/thonny_codemate/ui/model_download_dialog.py +355 -0
  23. thonnycontrib/thonny_codemate/ui/settings_dialog.py +1218 -0
  24. thonnycontrib/thonny_codemate/utils/__init__.py +25 -0
  25. thonnycontrib/thonny_codemate/utils/constants.py +138 -0
  26. thonnycontrib/thonny_codemate/utils/error_messages.py +92 -0
  27. thonnycontrib/thonny_codemate/utils/unified_error_handler.py +310 -0
@@ -0,0 +1,714 @@
1
+ """
2
+ 外部LLMプロバイダーのサポート
3
+ ChatGPT、Ollama API、OpenRouterに対応
4
+ """
5
+ import os
6
+ import json
7
+ import logging
8
+ import time
9
+ from typing import Optional, Iterator, Dict, Any
10
+ from abc import ABC, abstractmethod
11
+ import urllib.request
12
+ import urllib.error
13
+ import ssl
14
+ import sys
15
+ import os
16
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
17
+
18
+ try:
19
+ from ..utils.retry import retry_network_operation
20
+ except ImportError:
21
+ # フォールバック: リトライなし
22
+ def retry_network_operation(func):
23
+ return func
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ def retry_on_network_error(max_attempts=3, delay=1.0, backoff=2.0):
29
+ """ネットワークエラー時にリトライするデコレーター"""
30
+ def decorator(func):
31
+ def wrapper(*args, **kwargs):
32
+ last_error = None
33
+ current_delay = delay
34
+
35
+ for attempt in range(max_attempts):
36
+ try:
37
+ return func(*args, **kwargs)
38
+ except (urllib.error.URLError, ConnectionError, TimeoutError) as e:
39
+ last_error = e
40
+ if attempt < max_attempts - 1:
41
+ logger.info(f"Network error, retrying in {current_delay}s: {e}")
42
+ time.sleep(current_delay)
43
+ current_delay *= backoff
44
+ else:
45
+ raise
46
+
47
+ if last_error:
48
+ raise last_error
49
+ return wrapper
50
+ return decorator
51
+
52
+
53
+ class ExternalProvider(ABC):
54
+ """外部プロバイダーの基底クラス"""
55
+
56
+ @abstractmethod
57
+ def generate(self, prompt: str, **kwargs) -> str:
58
+ """テキスト生成"""
59
+ pass
60
+
61
+ @abstractmethod
62
+ def generate_stream(self, prompt: str, **kwargs) -> Iterator[str]:
63
+ """ストリーミング生成"""
64
+ pass
65
+
66
+ @abstractmethod
67
+ def test_connection(self) -> Dict[str, Any]:
68
+ """接続テスト"""
69
+ pass
70
+
71
+
72
+ class ChatGPTProvider(ExternalProvider):
73
+ """OpenAI ChatGPT APIプロバイダー"""
74
+
75
+ def __init__(self, api_key: str, model: str = "gpt-3.5-turbo", base_url: Optional[str] = None):
76
+ self.api_key = api_key
77
+ self.model = model
78
+ self.base_url = base_url or "https://api.openai.com/v1"
79
+ self.headers = {
80
+ "Authorization": f"Bearer {api_key}",
81
+ "Content-Type": "application/json"
82
+ }
83
+
84
+ @retry_on_network_error()
85
+ def generate(self, prompt: str, **kwargs) -> str:
86
+ """ChatGPT APIを使用してテキスト生成"""
87
+ messages = kwargs.get("messages", [{"role": "user", "content": prompt}])
88
+
89
+ data = {
90
+ "model": self.model,
91
+ "messages": messages,
92
+ "temperature": kwargs.get("temperature", 0.7),
93
+ "max_tokens": kwargs.get("max_tokens", 2048),
94
+ "stream": False
95
+ }
96
+
97
+ try:
98
+ req = urllib.request.Request(
99
+ f"{self.base_url}/chat/completions",
100
+ data=json.dumps(data).encode('utf-8'),
101
+ headers=self.headers
102
+ )
103
+
104
+ with urllib.request.urlopen(req) as response:
105
+ result = json.loads(response.read().decode('utf-8'))
106
+ return result['choices'][0]['message']['content']
107
+
108
+ except urllib.error.HTTPError as e:
109
+ error_body = e.read().decode('utf-8')
110
+ logger.error(f"ChatGPT API error: {e.code} - {error_body}")
111
+
112
+ # より詳細なエラーメッセージ
113
+ if e.code == 401:
114
+ raise Exception("Invalid API key. Please check your ChatGPT API key.")
115
+ elif e.code == 429:
116
+ raise Exception("Rate limit exceeded. Please try again later.")
117
+ elif e.code == 500:
118
+ raise Exception("ChatGPT server error. Please try again later.")
119
+ else:
120
+ raise Exception(f"ChatGPT API error ({e.code}): {error_body}")
121
+ except Exception as e:
122
+ logger.error(f"ChatGPT request failed: {e}")
123
+ raise
124
+
125
+ def generate_stream(self, prompt: str, **kwargs) -> Iterator[str]:
126
+ """ChatGPT APIを使用してストリーミング生成"""
127
+ messages = kwargs.get("messages", [{"role": "user", "content": prompt}])
128
+
129
+ data = {
130
+ "model": self.model,
131
+ "messages": messages,
132
+ "temperature": kwargs.get("temperature", 0.7),
133
+ "max_tokens": kwargs.get("max_tokens", 2048),
134
+ "stream": True
135
+ }
136
+
137
+ try:
138
+ req = urllib.request.Request(
139
+ f"{self.base_url}/chat/completions",
140
+ data=json.dumps(data).encode('utf-8'),
141
+ headers=self.headers
142
+ )
143
+
144
+ with urllib.request.urlopen(req) as response:
145
+ for line in response:
146
+ line = line.decode('utf-8').strip()
147
+ if line.startswith("data: "):
148
+ data_str = line[6:]
149
+ if data_str == "[DONE]":
150
+ break
151
+ try:
152
+ data = json.loads(data_str)
153
+ if 'choices' in data and len(data['choices']) > 0:
154
+ delta = data['choices'][0].get('delta', {})
155
+ if 'content' in delta:
156
+ yield delta['content']
157
+ except json.JSONDecodeError:
158
+ continue
159
+
160
+ except urllib.error.HTTPError as e:
161
+ import traceback
162
+ logger.error(f"ChatGPT streaming HTTP error: {e}\n{traceback.format_exc()}")
163
+ if e.code == 401:
164
+ yield "[Error: Invalid API key]"
165
+ else:
166
+ yield f"[Error: HTTP {e.code}]"
167
+ return
168
+ except Exception as e:
169
+ import traceback
170
+ logger.error(f"ChatGPT streaming failed: {e}\n{traceback.format_exc()}")
171
+ yield f"[Error: {str(e)}]"
172
+ return
173
+
174
+ def get_model_info(self, model_name: Optional[str] = None) -> Dict[str, Any]:
175
+ """モデルの詳細情報を取得(コンテキストサイズを含む)"""
176
+ model = model_name or self.model
177
+
178
+ # OpenAI/ChatGPTのコンテキストサイズは既知の値で判定
179
+ openai_models = {
180
+ "gpt-4o": 128000,
181
+ "gpt-4o-mini": 128000,
182
+ "gpt-4-turbo": 128000,
183
+ "gpt-4-turbo-preview": 128000,
184
+ "gpt-4-0125-preview": 128000,
185
+ "gpt-4-1106-preview": 128000,
186
+ "gpt-4": 8192,
187
+ "gpt-4-0613": 8192,
188
+ "gpt-4-32k": 32768,
189
+ "gpt-4-32k-0613": 32768,
190
+ "gpt-3.5-turbo": 16385,
191
+ "gpt-3.5-turbo-0125": 16385,
192
+ "gpt-3.5-turbo-1106": 16385,
193
+ "gpt-3.5-turbo-16k": 16385,
194
+ "text-davinci-003": 4097,
195
+ "text-davinci-002": 4097,
196
+ }
197
+
198
+ # 完全一致
199
+ if model in openai_models:
200
+ return {"context_size": openai_models[model]}
201
+
202
+ # 部分一致(モデル名にバージョンが含まれる場合)
203
+ for known_model, size in openai_models.items():
204
+ if known_model in model:
205
+ return {"context_size": size}
206
+
207
+ # 不明なモデルの場合
208
+ return {"context_size": None, "error": f"Unknown model: {model}"}
209
+
210
+ @retry_network_operation
211
+ def test_connection(self) -> Dict[str, Any]:
212
+ """接続テスト(リトライ付き)"""
213
+ try:
214
+ response = self.generate("Say 'Hello!' in exactly one word.", max_tokens=10)
215
+ return {
216
+ "success": True,
217
+ "provider": "ChatGPT",
218
+ "model": self.model,
219
+ "response": response
220
+ }
221
+ except Exception as e:
222
+ return {
223
+ "success": False,
224
+ "provider": "ChatGPT",
225
+ "model": self.model,
226
+ "error": str(e)
227
+ }
228
+
229
+
230
+ class OllamaProvider(ExternalProvider):
231
+ """Ollama/LM Studio APIプロバイダー"""
232
+
233
+ def __init__(self, base_url: str = "http://localhost:11434", model: str = "llama3"):
234
+ self.base_url = base_url.rstrip('/')
235
+ self.model = model
236
+ self.headers = {"Content-Type": "application/json"}
237
+
238
+ # LM Studioかどうかを判定(まずポート番号でヒント、後で確認)
239
+ self._port_suggests_lmstudio = ":1234" in base_url
240
+ self.is_lmstudio = None # 実際の判定は遅延評価
241
+
242
+ def _detect_server_type(self):
243
+ """サーバータイプを検出(Ollama or LM Studio)"""
244
+ if self.is_lmstudio is not None:
245
+ return self.is_lmstudio
246
+
247
+ # ポート番号からの初期推測を使用
248
+ if self._port_suggests_lmstudio:
249
+ # LM Studioの可能性が高い場合、OpenAI互換APIをチェック
250
+ try:
251
+ req = urllib.request.Request(f"{self.base_url}/v1/models")
252
+ with urllib.request.urlopen(req, timeout=1) as response:
253
+ data = json.loads(response.read().decode('utf-8'))
254
+ if 'data' in data: # OpenAI互換レスポンス
255
+ self.is_lmstudio = True
256
+ logger.debug("Detected LM Studio server")
257
+ return True
258
+ except:
259
+ pass
260
+
261
+ # Ollama APIをチェック
262
+ try:
263
+ req = urllib.request.Request(f"{self.base_url}/api/tags")
264
+ with urllib.request.urlopen(req, timeout=1) as response:
265
+ data = json.loads(response.read().decode('utf-8'))
266
+ if 'models' in data: # Ollamaレスポンス
267
+ self.is_lmstudio = False
268
+ logger.debug("Detected Ollama server")
269
+ return False
270
+ except:
271
+ pass
272
+
273
+ # デフォルトはポート番号からの推測を使用
274
+ self.is_lmstudio = self._port_suggests_lmstudio
275
+ return self.is_lmstudio
276
+
277
+ def _build_prompt_from_messages(self, messages: list) -> str:
278
+ """メッセージリストからOllama用のプロンプトを構築"""
279
+ prompt_parts = []
280
+
281
+ for msg in messages:
282
+ role = msg.get("role", "user")
283
+ content = msg.get("content", "")
284
+
285
+ if role == "system":
286
+ prompt_parts.append(f"System: {content}")
287
+ elif role == "user":
288
+ prompt_parts.append(f"\nUser: {content}")
289
+ elif role == "assistant":
290
+ prompt_parts.append(f"\nAssistant: {content}")
291
+
292
+ # 最後にアシスタントの応答を促す
293
+ prompt_parts.append("\nAssistant: ")
294
+
295
+ return "\n".join(prompt_parts)
296
+
297
+ def generate(self, prompt: str, **kwargs) -> str:
298
+ """Ollama/LM Studio APIを使用してテキスト生成"""
299
+ if self._detect_server_type():
300
+ # LM StudioはOpenAI互換API
301
+ messages = kwargs.get("messages", [])
302
+ if not messages:
303
+ messages = [{"role": "user", "content": prompt}]
304
+
305
+ data = {
306
+ "model": self.model,
307
+ "messages": messages,
308
+ "temperature": kwargs.get("temperature", 0.7),
309
+ "max_tokens": kwargs.get("max_tokens", 2048),
310
+ "stream": False
311
+ }
312
+
313
+ try:
314
+ req = urllib.request.Request(
315
+ f"{self.base_url}/v1/chat/completions",
316
+ data=json.dumps(data).encode('utf-8'),
317
+ headers=self.headers
318
+ )
319
+
320
+ with urllib.request.urlopen(req) as response:
321
+ result = json.loads(response.read().decode('utf-8'))
322
+ return result['choices'][0]['message']['content']
323
+
324
+ except Exception as e:
325
+ logger.error(f"LM Studio request failed: {e}")
326
+ raise
327
+ else:
328
+ # Ollama API
329
+ # messagesパラメータがある場合は会話履歴を含める
330
+ messages = kwargs.get("messages", [])
331
+ if messages:
332
+ # システムメッセージとユーザーメッセージを含む完全なプロンプトを構築
333
+ full_prompt = self._build_prompt_from_messages(messages)
334
+ else:
335
+ full_prompt = prompt
336
+
337
+ data = {
338
+ "model": self.model,
339
+ "prompt": full_prompt,
340
+ "stream": False,
341
+ "options": {
342
+ "temperature": kwargs.get("temperature", 0.7),
343
+ "num_predict": kwargs.get("max_tokens", 2048),
344
+ }
345
+ }
346
+
347
+ try:
348
+ req = urllib.request.Request(
349
+ f"{self.base_url}/api/generate",
350
+ data=json.dumps(data).encode('utf-8'),
351
+ headers=self.headers
352
+ )
353
+
354
+ with urllib.request.urlopen(req) as response:
355
+ result = json.loads(response.read().decode('utf-8'))
356
+ return result['response']
357
+
358
+ except Exception as e:
359
+ logger.error(f"Ollama request failed: {e}")
360
+ raise
361
+
362
+ def generate_stream(self, prompt: str, **kwargs) -> Iterator[str]:
363
+ """Ollama/LM Studio APIを使用してストリーミング生成"""
364
+ if self._detect_server_type():
365
+ # LM StudioはOpenAI互換API
366
+ messages = kwargs.get("messages", [])
367
+ if not messages:
368
+ messages = [{"role": "user", "content": prompt}]
369
+
370
+ data = {
371
+ "model": self.model,
372
+ "messages": messages,
373
+ "temperature": kwargs.get("temperature", 0.7),
374
+ "max_tokens": kwargs.get("max_tokens", 2048),
375
+ "stream": True
376
+ }
377
+
378
+ try:
379
+ req = urllib.request.Request(
380
+ f"{self.base_url}/v1/chat/completions",
381
+ data=json.dumps(data).encode('utf-8'),
382
+ headers=self.headers
383
+ )
384
+
385
+ with urllib.request.urlopen(req) as response:
386
+ for line in response:
387
+ line = line.decode('utf-8').strip()
388
+ if line.startswith("data: "):
389
+ data_str = line[6:]
390
+ if data_str == "[DONE]":
391
+ break
392
+ try:
393
+ data = json.loads(data_str)
394
+ if 'choices' in data and len(data['choices']) > 0:
395
+ delta = data['choices'][0].get('delta', {})
396
+ if 'content' in delta:
397
+ yield delta['content']
398
+ except json.JSONDecodeError:
399
+ continue
400
+
401
+ except Exception as e:
402
+ logger.error(f"LM Studio streaming failed: {e}")
403
+ raise
404
+ else:
405
+ # Ollama API
406
+ # messagesパラメータがある場合は会話履歴を含める
407
+ messages = kwargs.get("messages", [])
408
+ if messages:
409
+ # システムメッセージとユーザーメッセージを含む完全なプロンプトを構築
410
+ full_prompt = self._build_prompt_from_messages(messages)
411
+ else:
412
+ full_prompt = prompt
413
+
414
+ data = {
415
+ "model": self.model,
416
+ "prompt": full_prompt,
417
+ "stream": True,
418
+ "options": {
419
+ "temperature": kwargs.get("temperature", 0.7),
420
+ "num_predict": kwargs.get("max_tokens", 2048),
421
+ }
422
+ }
423
+
424
+ try:
425
+ req = urllib.request.Request(
426
+ f"{self.base_url}/api/generate",
427
+ data=json.dumps(data).encode('utf-8'),
428
+ headers=self.headers
429
+ )
430
+
431
+ with urllib.request.urlopen(req) as response:
432
+ for line in response:
433
+ try:
434
+ data = json.loads(line.decode('utf-8'))
435
+ if 'response' in data:
436
+ yield data['response']
437
+ except json.JSONDecodeError:
438
+ continue
439
+
440
+ except Exception as e:
441
+ logger.error(f"Ollama streaming failed: {e}")
442
+ raise
443
+
444
+ @retry_network_operation
445
+ def get_models(self) -> list[str]:
446
+ """利用可能なモデルのリストを取得(リトライ付き)"""
447
+ try:
448
+ if self._detect_server_type():
449
+ # LM StudioはOpenAI互換API
450
+ req = urllib.request.Request(f"{self.base_url}/v1/models")
451
+ with urllib.request.urlopen(req, timeout=5) as response:
452
+ data = json.loads(response.read().decode('utf-8'))
453
+ models = [m['id'] for m in data.get('data', [])]
454
+ return models
455
+ else:
456
+ # Ollama API
457
+ req = urllib.request.Request(f"{self.base_url}/api/tags")
458
+ with urllib.request.urlopen(req, timeout=5) as response:
459
+ data = json.loads(response.read().decode('utf-8'))
460
+ models = [m['name'] for m in data.get('models', [])]
461
+ return models
462
+ except Exception as e:
463
+ logger.error(f"Failed to fetch models: {e}")
464
+ return []
465
+
466
+ def get_model_info(self, model_name: Optional[str] = None) -> Dict[str, Any]:
467
+ """モデルの詳細情報を取得(コンテキストサイズを含む)"""
468
+ model = model_name or self.model
469
+
470
+ try:
471
+ if self._detect_server_type():
472
+ # LM Studio: /api/v0/models エンドポイントを使用
473
+ req = urllib.request.Request(f"{self.base_url}/api/v0/models")
474
+ with urllib.request.urlopen(req, timeout=10) as response:
475
+ result = json.loads(response.read().decode('utf-8'))
476
+
477
+ # 指定されたモデルを検索
478
+ for model_data in result:
479
+ if model_data.get('id') == model or model_data.get('name') == model:
480
+ max_context_length = model_data.get('max_context_length')
481
+ return {
482
+ "context_size": max_context_length,
483
+ "model_data": model_data
484
+ }
485
+
486
+ # モデルが見つからない場合
487
+ available_models = [m.get('id', m.get('name', 'unknown')) for m in result]
488
+ return {
489
+ "context_size": None,
490
+ "error": f"Model '{model}' not found in LM Studio models",
491
+ "available_models": available_models
492
+ }
493
+ else:
494
+ # Ollama: /api/show エンドポイントを使用
495
+ data = {"name": model}
496
+ req = urllib.request.Request(
497
+ f"{self.base_url}/api/show",
498
+ data=json.dumps(data).encode('utf-8'),
499
+ headers=self.headers
500
+ )
501
+
502
+ with urllib.request.urlopen(req, timeout=10) as response:
503
+ result = json.loads(response.read().decode('utf-8'))
504
+
505
+ # モデル情報からコンテキストサイズを取得
506
+ model_info = result.get('model_info', {})
507
+ parameters = result.get('parameters', '')
508
+ template = result.get('template', '')
509
+
510
+ # さまざまな場所からコンテキストサイズを探す
511
+ context_size = None
512
+
513
+ # 1. model_info内のコンテキストサイズ
514
+ if isinstance(model_info, dict):
515
+ for key in ['context_length', 'max_position_embeddings', 'n_ctx']:
516
+ if key in model_info:
517
+ context_size = model_info[key]
518
+ break
519
+
520
+ # 2. parametersからnum_ctxを探す
521
+ if context_size is None and parameters:
522
+ # parametersは文字列形式で "num_ctx 4096" のような形式
523
+ import re
524
+ match = re.search(r'num_ctx\s+(\d+)', parameters)
525
+ if match:
526
+ context_size = int(match.group(1))
527
+
528
+ # 3. templateからコンテキストサイズのヒントを探す
529
+ if context_size is None and template:
530
+ # 一部のモデルではtemplateにヒントがある場合がある
531
+ if '128k' in template.lower() or '128000' in template:
532
+ context_size = 128000
533
+ elif '32k' in template.lower() or '32768' in template:
534
+ context_size = 32768
535
+ elif '8k' in template.lower() or '8192' in template:
536
+ context_size = 8192
537
+
538
+ return {
539
+ "context_size": context_size,
540
+ "model_info": model_info,
541
+ "parameters": parameters,
542
+ "template": template
543
+ }
544
+ except Exception as e:
545
+ logger.error(f"Failed to get model info for {model}: {e}")
546
+ return {"context_size": None, "error": str(e)}
547
+
548
+ def test_connection(self) -> Dict[str, Any]:
549
+ """接続テスト"""
550
+ try:
551
+ # モデルリストを取得してテスト
552
+ models = self.get_models()
553
+
554
+ if models:
555
+ return {
556
+ "success": True,
557
+ "provider": "Ollama",
558
+ "base_url": self.base_url,
559
+ "available_models": models,
560
+ "current_model": self.model
561
+ }
562
+ else:
563
+ return {
564
+ "success": False,
565
+ "provider": "Ollama",
566
+ "base_url": self.base_url,
567
+ "error": "No models found or connection failed"
568
+ }
569
+ except Exception as e:
570
+ return {
571
+ "success": False,
572
+ "provider": "Ollama",
573
+ "base_url": self.base_url,
574
+ "error": str(e)
575
+ }
576
+
577
+
578
+ class OpenRouterProvider(ExternalProvider):
579
+ """OpenRouter APIプロバイダー"""
580
+
581
+ def __init__(self, api_key: str, model: str = "meta-llama/llama-3.2-3b-instruct:free"):
582
+ self.api_key = api_key
583
+ self.model = model
584
+ self.base_url = "https://openrouter.ai/api/v1"
585
+ self.headers = {
586
+ "Authorization": f"Bearer {api_key}",
587
+ "Content-Type": "application/json",
588
+ "HTTP-Referer": "https://github.com/thonny/thonny",
589
+ "X-Title": "Thonny Local LLM Plugin"
590
+ }
591
+
592
+ def generate(self, prompt: str, **kwargs) -> str:
593
+ """OpenRouter APIを使用してテキスト生成"""
594
+ messages = kwargs.get("messages", [{"role": "user", "content": prompt}])
595
+
596
+ data = {
597
+ "model": self.model,
598
+ "messages": messages,
599
+ "temperature": kwargs.get("temperature", 0.7),
600
+ "max_tokens": kwargs.get("max_tokens", 2048),
601
+ "stream": False
602
+ }
603
+
604
+ try:
605
+ req = urllib.request.Request(
606
+ f"{self.base_url}/chat/completions",
607
+ data=json.dumps(data).encode('utf-8'),
608
+ headers=self.headers
609
+ )
610
+
611
+ # SSL証明書の検証を有効化
612
+ context = ssl.create_default_context()
613
+
614
+ with urllib.request.urlopen(req, context=context) as response:
615
+ result = json.loads(response.read().decode('utf-8'))
616
+ return result['choices'][0]['message']['content']
617
+
618
+ except Exception as e:
619
+ logger.error(f"OpenRouter request failed: {e}")
620
+ raise
621
+
622
+ def generate_stream(self, prompt: str, **kwargs) -> Iterator[str]:
623
+ """OpenRouter APIを使用してストリーミング生成"""
624
+ messages = kwargs.get("messages", [{"role": "user", "content": prompt}])
625
+
626
+ data = {
627
+ "model": self.model,
628
+ "messages": messages,
629
+ "temperature": kwargs.get("temperature", 0.7),
630
+ "max_tokens": kwargs.get("max_tokens", 2048),
631
+ "stream": True
632
+ }
633
+
634
+ try:
635
+ req = urllib.request.Request(
636
+ f"{self.base_url}/chat/completions",
637
+ data=json.dumps(data).encode('utf-8'),
638
+ headers=self.headers
639
+ )
640
+
641
+ context = ssl.create_default_context()
642
+
643
+ with urllib.request.urlopen(req, context=context) as response:
644
+ for line in response:
645
+ line = line.decode('utf-8').strip()
646
+ if line.startswith("data: "):
647
+ data_str = line[6:]
648
+ if data_str == "[DONE]":
649
+ break
650
+ try:
651
+ data = json.loads(data_str)
652
+ if 'choices' in data and len(data['choices']) > 0:
653
+ delta = data['choices'][0].get('delta', {})
654
+ if 'content' in delta:
655
+ yield delta['content']
656
+ except json.JSONDecodeError:
657
+ continue
658
+
659
+ except Exception as e:
660
+ logger.error(f"OpenRouter streaming failed: {e}")
661
+ raise
662
+
663
+ def get_model_info(self, model_name: Optional[str] = None) -> Dict[str, Any]:
664
+ """モデルの詳細情報を取得(コンテキストサイズを含む)"""
665
+ model = model_name or self.model
666
+
667
+ try:
668
+ # OpenRouter API /v1/models エンドポイントを使用
669
+ req = urllib.request.Request(
670
+ f"{self.base_url}/models",
671
+ headers=self.headers
672
+ )
673
+
674
+ context = ssl.create_default_context()
675
+
676
+ with urllib.request.urlopen(req, context=context, timeout=10) as response:
677
+ result = json.loads(response.read().decode('utf-8'))
678
+
679
+ # 指定されたモデルを検索
680
+ for model_data in result.get('data', []):
681
+ if model_data.get('id') == model:
682
+ context_length = model_data.get('context_length')
683
+ return {
684
+ "context_size": context_length,
685
+ "model_data": model_data
686
+ }
687
+
688
+ # モデルが見つからない場合
689
+ return {
690
+ "context_size": None,
691
+ "error": f"Model '{model}' not found in available models"
692
+ }
693
+
694
+ except Exception as e:
695
+ logger.error(f"Failed to get model info for {model}: {e}")
696
+ return {"context_size": None, "error": str(e)}
697
+
698
+ def test_connection(self) -> Dict[str, Any]:
699
+ """接続テスト"""
700
+ try:
701
+ response = self.generate("Say 'Hello!' in exactly one word.", max_tokens=10)
702
+ return {
703
+ "success": True,
704
+ "provider": "OpenRouter",
705
+ "model": self.model,
706
+ "response": response
707
+ }
708
+ except Exception as e:
709
+ return {
710
+ "success": False,
711
+ "provider": "OpenRouter",
712
+ "model": self.model,
713
+ "error": str(e)
714
+ }