isa-model 0.3.5__py3-none-any.whl → 0.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. isa_model/__init__.py +30 -1
  2. isa_model/client.py +770 -0
  3. isa_model/core/config/__init__.py +16 -0
  4. isa_model/core/config/config_manager.py +514 -0
  5. isa_model/core/config.py +426 -0
  6. isa_model/core/models/model_billing_tracker.py +476 -0
  7. isa_model/core/models/model_manager.py +399 -0
  8. isa_model/core/{storage/supabase_storage.py → models/model_repo.py} +72 -73
  9. isa_model/core/pricing_manager.py +426 -0
  10. isa_model/core/services/__init__.py +19 -0
  11. isa_model/core/services/intelligent_model_selector.py +547 -0
  12. isa_model/core/types.py +291 -0
  13. isa_model/deployment/__init__.py +2 -0
  14. isa_model/deployment/cloud/modal/isa_vision_doc_service.py +157 -3
  15. isa_model/deployment/cloud/modal/isa_vision_table_service.py +532 -0
  16. isa_model/deployment/cloud/modal/isa_vision_ui_service.py +104 -3
  17. isa_model/deployment/cloud/modal/register_models.py +321 -0
  18. isa_model/deployment/runtime/deployed_service.py +338 -0
  19. isa_model/deployment/services/__init__.py +9 -0
  20. isa_model/deployment/services/auto_deploy_vision_service.py +537 -0
  21. isa_model/deployment/services/model_service.py +332 -0
  22. isa_model/deployment/services/service_monitor.py +356 -0
  23. isa_model/deployment/services/service_registry.py +527 -0
  24. isa_model/eval/__init__.py +80 -44
  25. isa_model/eval/config/__init__.py +10 -0
  26. isa_model/eval/config/evaluation_config.py +108 -0
  27. isa_model/eval/evaluators/__init__.py +18 -0
  28. isa_model/eval/evaluators/base_evaluator.py +503 -0
  29. isa_model/eval/evaluators/llm_evaluator.py +472 -0
  30. isa_model/eval/factory.py +417 -709
  31. isa_model/eval/infrastructure/__init__.py +24 -0
  32. isa_model/eval/infrastructure/experiment_tracker.py +466 -0
  33. isa_model/eval/metrics.py +191 -21
  34. isa_model/inference/ai_factory.py +181 -605
  35. isa_model/inference/services/audio/base_stt_service.py +65 -1
  36. isa_model/inference/services/audio/base_tts_service.py +75 -1
  37. isa_model/inference/services/audio/openai_stt_service.py +189 -151
  38. isa_model/inference/services/audio/openai_tts_service.py +12 -10
  39. isa_model/inference/services/audio/replicate_tts_service.py +61 -56
  40. isa_model/inference/services/base_service.py +55 -17
  41. isa_model/inference/services/embedding/base_embed_service.py +65 -1
  42. isa_model/inference/services/embedding/ollama_embed_service.py +103 -43
  43. isa_model/inference/services/embedding/openai_embed_service.py +8 -10
  44. isa_model/inference/services/helpers/stacked_config.py +148 -0
  45. isa_model/inference/services/img/__init__.py +18 -0
  46. isa_model/inference/services/{vision → img}/base_image_gen_service.py +80 -1
  47. isa_model/inference/services/{stacked → img}/flux_professional_service.py +25 -1
  48. isa_model/inference/services/{stacked → img/helpers}/base_stacked_service.py +40 -35
  49. isa_model/inference/services/{vision → img}/replicate_image_gen_service.py +44 -31
  50. isa_model/inference/services/llm/__init__.py +3 -3
  51. isa_model/inference/services/llm/base_llm_service.py +492 -40
  52. isa_model/inference/services/llm/helpers/llm_prompts.py +258 -0
  53. isa_model/inference/services/llm/helpers/llm_utils.py +280 -0
  54. isa_model/inference/services/llm/ollama_llm_service.py +51 -17
  55. isa_model/inference/services/llm/openai_llm_service.py +70 -19
  56. isa_model/inference/services/llm/yyds_llm_service.py +24 -23
  57. isa_model/inference/services/vision/__init__.py +38 -4
  58. isa_model/inference/services/vision/base_vision_service.py +218 -117
  59. isa_model/inference/services/vision/{isA_vision_service.py → disabled/isA_vision_service.py} +98 -0
  60. isa_model/inference/services/{stacked → vision}/doc_analysis_service.py +1 -1
  61. isa_model/inference/services/vision/helpers/base_stacked_service.py +274 -0
  62. isa_model/inference/services/vision/helpers/image_utils.py +272 -3
  63. isa_model/inference/services/vision/helpers/vision_prompts.py +297 -0
  64. isa_model/inference/services/vision/openai_vision_service.py +104 -307
  65. isa_model/inference/services/vision/replicate_vision_service.py +140 -325
  66. isa_model/inference/services/{stacked → vision}/ui_analysis_service.py +2 -498
  67. isa_model/scripts/register_models.py +370 -0
  68. isa_model/scripts/register_models_with_embeddings.py +510 -0
  69. isa_model/serving/api/fastapi_server.py +6 -1
  70. isa_model/serving/api/routes/unified.py +202 -0
  71. {isa_model-0.3.5.dist-info → isa_model-0.3.6.dist-info}/METADATA +4 -1
  72. {isa_model-0.3.5.dist-info → isa_model-0.3.6.dist-info}/RECORD +77 -53
  73. isa_model/config/__init__.py +0 -9
  74. isa_model/config/config_manager.py +0 -213
  75. isa_model/core/model_manager.py +0 -213
  76. isa_model/core/model_registry.py +0 -375
  77. isa_model/core/vision_models_init.py +0 -116
  78. isa_model/inference/billing_tracker.py +0 -406
  79. isa_model/inference/services/llm/triton_llm_service.py +0 -481
  80. isa_model/inference/services/stacked/__init__.py +0 -26
  81. isa_model/inference/services/stacked/config.py +0 -426
  82. isa_model/inference/services/vision/ollama_vision_service.py +0 -194
  83. /isa_model/core/{model_storage.py → models/model_storage.py} +0 -0
  84. /isa_model/inference/services/{vision → embedding}/helpers/text_splitter.py +0 -0
  85. /isa_model/inference/services/llm/{llm_adapter.py → helpers/llm_adapter.py} +0 -0
  86. {isa_model-0.3.5.dist-info → isa_model-0.3.6.dist-info}/WHEEL +0 -0
  87. {isa_model-0.3.5.dist-info → isa_model-0.3.6.dist-info}/top_level.txt +0 -0
@@ -1,21 +1,332 @@
1
1
  from abc import ABC, abstractmethod
2
2
  from typing import Dict, Any, List, Union, Optional, AsyncGenerator, Callable
3
+ import logging
4
+
3
5
  from isa_model.inference.services.base_service import BaseService
4
- from isa_model.inference.services.llm.llm_adapter import AdapterManager
6
+ from isa_model.inference.services.llm.helpers.llm_adapter import AdapterManager
7
+
8
+ logger = logging.getLogger(__name__)
5
9
 
6
10
  class BaseLLMService(BaseService):
7
- """Base class for Large Language Model services with unified invoke interface"""
11
+ """Base class for Large Language Model services with unified task dispatch"""
8
12
 
9
- def __init__(self, provider, model_name: str):
10
- super().__init__(provider, model_name)
11
- self._bound_tools: List[Any] = [] # 改为存储原始工具对象
12
- self._tool_mappings: Dict[str, tuple] = {} # 工具名到(工具, 适配器)的映射
13
+ def __init__(self, provider_name: str, model_name: str, **kwargs):
14
+ super().__init__(provider_name, model_name, **kwargs)
15
+ self._bound_tools: List[Any] = []
16
+ self._tool_mappings: Dict[str, tuple] = {}
13
17
 
14
18
  # 初始化适配器管理器
15
19
  self.adapter_manager = AdapterManager()
16
20
 
17
- # Get streaming config from provider config
18
- self.streaming = self.config.get("streaming", False)
21
+ # Get config from provider
22
+ provider_config = self.get_provider_config()
23
+ self.streaming = provider_config.get("streaming", False)
24
+ self.max_tokens = provider_config.get("max_tokens", 4096)
25
+ self.temperature = provider_config.get("temperature", 0.7)
26
+
27
+ async def invoke(
28
+ self,
29
+ input_data: Union[str, List[Dict[str, str]], Any],
30
+ task: Optional[str] = None,
31
+ **kwargs
32
+ ) -> Dict[str, Any]:
33
+ """
34
+ 统一的任务分发方法 - Base类提供通用实现
35
+
36
+ Args:
37
+ input_data: 输入数据,可以是:
38
+ - str: 简单文本提示
39
+ - list: 消息历史 [{"role": "user", "content": "hello"}]
40
+ - Any: LangChain 消息对象或其他格式
41
+ task: 任务类型,支持多种LLM任务
42
+ **kwargs: 任务特定的附加参数
43
+
44
+ Returns:
45
+ Dict containing task results
46
+ """
47
+ task = task or "chat"
48
+
49
+ # ==================== 对话类任务 ====================
50
+ if task == "chat":
51
+ return await self.chat(input_data, kwargs.get("max_tokens", self.max_tokens))
52
+ elif task == "complete":
53
+ return await self.complete_text(input_data, kwargs.get("max_tokens", self.max_tokens))
54
+ elif task == "instruct":
55
+ return await self.instruct(input_data, kwargs.get("instruction"), kwargs.get("max_tokens", self.max_tokens))
56
+
57
+ # ==================== 文本生成类任务 ====================
58
+ elif task == "generate":
59
+ return await self.generate_text(input_data, kwargs.get("max_tokens", self.max_tokens))
60
+ elif task == "rewrite":
61
+ return await self.rewrite_text(input_data, kwargs.get("style"), kwargs.get("tone"))
62
+ elif task == "summarize":
63
+ return await self.summarize_text(input_data, kwargs.get("max_length"), kwargs.get("style"))
64
+ elif task == "translate":
65
+ return await self.translate_text(input_data, kwargs.get("target_language"), kwargs.get("source_language"))
66
+
67
+ # ==================== 分析类任务 ====================
68
+ elif task == "analyze":
69
+ return await self.analyze_text(input_data, kwargs.get("analysis_type"))
70
+ elif task == "classify":
71
+ return await self.classify_text(input_data, kwargs.get("categories"))
72
+ elif task == "extract":
73
+ return await self.extract_information(input_data, kwargs.get("extract_type"))
74
+ elif task == "sentiment":
75
+ return await self.analyze_sentiment(input_data)
76
+
77
+ # ==================== 编程类任务 ====================
78
+ elif task == "code":
79
+ return await self.generate_code(input_data, kwargs.get("language"), kwargs.get("style"))
80
+ elif task == "explain_code":
81
+ return await self.explain_code(input_data, kwargs.get("language"))
82
+ elif task == "debug_code":
83
+ return await self.debug_code(input_data, kwargs.get("language"))
84
+ elif task == "refactor_code":
85
+ return await self.refactor_code(input_data, kwargs.get("language"), kwargs.get("improvements"))
86
+
87
+ # ==================== 推理类任务 ====================
88
+ elif task == "reason":
89
+ return await self.reason_about(input_data, kwargs.get("reasoning_type"))
90
+ elif task == "solve":
91
+ return await self.solve_problem(input_data, kwargs.get("problem_type"))
92
+ elif task == "plan":
93
+ return await self.create_plan(input_data, kwargs.get("plan_type"))
94
+
95
+ # ==================== 工具调用类任务 ====================
96
+ elif task == "tool_call":
97
+ return await self.call_tools(input_data, kwargs.get("available_tools"))
98
+ elif task == "function_call":
99
+ return await self.call_function(input_data, kwargs.get("function_name"), kwargs.get("parameters"))
100
+
101
+ else:
102
+ raise NotImplementedError(f"{self.__class__.__name__} does not support task: {task}")
103
+
104
+ # ==================== 对话类方法 ====================
105
+
106
+ async def chat(
107
+ self,
108
+ input_data: Union[str, List[Dict[str, str]], Any],
109
+ max_tokens: Optional[int] = None
110
+ ) -> Dict[str, Any]:
111
+ """
112
+ 对话聊天 - Provider必须实现
113
+
114
+ Args:
115
+ input_data: 输入消息
116
+ max_tokens: 最大生成token数
117
+
118
+ Returns:
119
+ Dict containing chat response
120
+ """
121
+ raise NotImplementedError(f"{self.__class__.__name__} does not support chat task")
122
+
123
+ # ==================== 文本生成类方法 ====================
124
+
125
+ async def complete_text(
126
+ self,
127
+ input_data: Union[str, Any],
128
+ max_tokens: Optional[int] = None
129
+ ) -> Dict[str, Any]:
130
+ """
131
+ 文本补全 - Provider可选实现
132
+ """
133
+ raise NotImplementedError(f"{self.__class__.__name__} does not support complete_text task")
134
+
135
+ async def instruct(
136
+ self,
137
+ input_data: Union[str, Any],
138
+ instruction: Optional[str] = None,
139
+ max_tokens: Optional[int] = None
140
+ ) -> Dict[str, Any]:
141
+ """
142
+ 指令跟随 - Provider可选实现
143
+ """
144
+ raise NotImplementedError(f"{self.__class__.__name__} does not support instruct task")
145
+
146
+ async def generate_text(
147
+ self,
148
+ input_data: Union[str, Any],
149
+ max_tokens: Optional[int] = None
150
+ ) -> Dict[str, Any]:
151
+ """
152
+ 通用文本生成 - Provider可选实现
153
+ """
154
+ raise NotImplementedError(f"{self.__class__.__name__} does not support generate_text task")
155
+
156
+ async def rewrite_text(
157
+ self,
158
+ input_data: Union[str, Any],
159
+ style: Optional[str] = None,
160
+ tone: Optional[str] = None
161
+ ) -> Dict[str, Any]:
162
+ """
163
+ 文本重写 - Provider可选实现
164
+ """
165
+ raise NotImplementedError(f"{self.__class__.__name__} does not support rewrite_text task")
166
+
167
+ async def summarize_text(
168
+ self,
169
+ input_data: Union[str, Any],
170
+ max_length: Optional[int] = None,
171
+ style: Optional[str] = None
172
+ ) -> Dict[str, Any]:
173
+ """
174
+ 文本摘要 - Provider可选实现
175
+ """
176
+ raise NotImplementedError(f"{self.__class__.__name__} does not support summarize_text task")
177
+
178
+ async def translate_text(
179
+ self,
180
+ input_data: Union[str, Any],
181
+ target_language: str,
182
+ source_language: Optional[str] = None
183
+ ) -> Dict[str, Any]:
184
+ """
185
+ 文本翻译 - Provider可选实现
186
+ """
187
+ raise NotImplementedError(f"{self.__class__.__name__} does not support translate_text task")
188
+
189
+ # ==================== 分析类方法 ====================
190
+
191
+ async def analyze_text(
192
+ self,
193
+ input_data: Union[str, Any],
194
+ analysis_type: Optional[str] = None
195
+ ) -> Dict[str, Any]:
196
+ """
197
+ 文本分析 - Provider可选实现
198
+ """
199
+ raise NotImplementedError(f"{self.__class__.__name__} does not support analyze_text task")
200
+
201
+ async def classify_text(
202
+ self,
203
+ input_data: Union[str, Any],
204
+ categories: Optional[List[str]] = None
205
+ ) -> Dict[str, Any]:
206
+ """
207
+ 文本分类 - Provider可选实现
208
+ """
209
+ raise NotImplementedError(f"{self.__class__.__name__} does not support classify_text task")
210
+
211
+ async def extract_information(
212
+ self,
213
+ input_data: Union[str, Any],
214
+ extract_type: Optional[str] = None
215
+ ) -> Dict[str, Any]:
216
+ """
217
+ 信息提取 - Provider可选实现
218
+ """
219
+ raise NotImplementedError(f"{self.__class__.__name__} does not support extract_information task")
220
+
221
+ async def analyze_sentiment(
222
+ self,
223
+ input_data: Union[str, Any]
224
+ ) -> Dict[str, Any]:
225
+ """
226
+ 情感分析 - Provider可选实现
227
+ """
228
+ raise NotImplementedError(f"{self.__class__.__name__} does not support analyze_sentiment task")
229
+
230
+ # ==================== 编程类方法 ====================
231
+
232
+ async def generate_code(
233
+ self,
234
+ input_data: Union[str, Any],
235
+ language: Optional[str] = None,
236
+ style: Optional[str] = None
237
+ ) -> Dict[str, Any]:
238
+ """
239
+ 代码生成 - Provider可选实现
240
+ """
241
+ raise NotImplementedError(f"{self.__class__.__name__} does not support generate_code task")
242
+
243
+ async def explain_code(
244
+ self,
245
+ input_data: Union[str, Any],
246
+ language: Optional[str] = None
247
+ ) -> Dict[str, Any]:
248
+ """
249
+ 代码解释 - Provider可选实现
250
+ """
251
+ raise NotImplementedError(f"{self.__class__.__name__} does not support explain_code task")
252
+
253
+ async def debug_code(
254
+ self,
255
+ input_data: Union[str, Any],
256
+ language: Optional[str] = None
257
+ ) -> Dict[str, Any]:
258
+ """
259
+ 代码调试 - Provider可选实现
260
+ """
261
+ raise NotImplementedError(f"{self.__class__.__name__} does not support debug_code task")
262
+
263
+ async def refactor_code(
264
+ self,
265
+ input_data: Union[str, Any],
266
+ language: Optional[str] = None,
267
+ improvements: Optional[List[str]] = None
268
+ ) -> Dict[str, Any]:
269
+ """
270
+ 代码重构 - Provider可选实现
271
+ """
272
+ raise NotImplementedError(f"{self.__class__.__name__} does not support refactor_code task")
273
+
274
+ # ==================== 推理类方法 ====================
275
+
276
+ async def reason_about(
277
+ self,
278
+ input_data: Union[str, Any],
279
+ reasoning_type: Optional[str] = None
280
+ ) -> Dict[str, Any]:
281
+ """
282
+ 推理分析 - Provider可选实现
283
+ """
284
+ raise NotImplementedError(f"{self.__class__.__name__} does not support reason_about task")
285
+
286
+ async def solve_problem(
287
+ self,
288
+ input_data: Union[str, Any],
289
+ problem_type: Optional[str] = None
290
+ ) -> Dict[str, Any]:
291
+ """
292
+ 问题求解 - Provider可选实现
293
+ """
294
+ raise NotImplementedError(f"{self.__class__.__name__} does not support solve_problem task")
295
+
296
+ async def create_plan(
297
+ self,
298
+ input_data: Union[str, Any],
299
+ plan_type: Optional[str] = None
300
+ ) -> Dict[str, Any]:
301
+ """
302
+ 计划制定 - Provider可选实现
303
+ """
304
+ raise NotImplementedError(f"{self.__class__.__name__} does not support create_plan task")
305
+
306
+ # ==================== 工具调用类方法 ====================
307
+
308
+ async def call_tools(
309
+ self,
310
+ input_data: Union[str, Any],
311
+ available_tools: Optional[List[Any]] = None
312
+ ) -> Dict[str, Any]:
313
+ """
314
+ 工具调用 - Provider可选实现
315
+ """
316
+ raise NotImplementedError(f"{self.__class__.__name__} does not support call_tools task")
317
+
318
+ async def call_function(
319
+ self,
320
+ input_data: Union[str, Any],
321
+ function_name: str,
322
+ parameters: Optional[Dict[str, Any]] = None
323
+ ) -> Dict[str, Any]:
324
+ """
325
+ 函数调用 - Provider可选实现
326
+ """
327
+ raise NotImplementedError(f"{self.__class__.__name__} does not support call_function task")
328
+
329
+ # ==================== 工具绑定和管理 ====================
19
330
 
20
331
  def bind_tools(self, tools: List[Any], **kwargs) -> 'BaseLLMService':
21
332
  """
@@ -99,28 +410,6 @@ class BaseLLMService(BaseService):
99
410
  """
100
411
  return self.astream(input_data)
101
412
 
102
- def invoke(self, input_data: Union[str, List[Dict[str, str]], Any]) -> Union[str, Any]:
103
- """
104
- Synchronous wrapper for ainvoke
105
-
106
- Args:
107
- input_data: Same as ainvoke
108
-
109
- Returns:
110
- Model response
111
- """
112
- import asyncio
113
- try:
114
- # Try to get current event loop
115
- loop = asyncio.get_running_loop()
116
- # If we're in an event loop, create a new thread
117
- import concurrent.futures
118
- with concurrent.futures.ThreadPoolExecutor() as executor:
119
- future = executor.submit(asyncio.run, self.ainvoke(input_data))
120
- return future.result()
121
- except RuntimeError:
122
- # No event loop running, create a new one
123
- return asyncio.run(self.ainvoke(input_data))
124
413
 
125
414
  def _has_bound_tools(self) -> bool:
126
415
  """Check if this service has bound tools"""
@@ -154,19 +443,182 @@ class BaseLLMService(BaseService):
154
443
  """Get last request usage with cost information"""
155
444
  usage = self.get_last_token_usage()
156
445
 
157
- # Calculate cost using provider
158
- if hasattr(self.provider, 'calculate_cost'):
159
- cost = getattr(self.provider, 'calculate_cost')(
160
- self.model_name,
161
- usage["prompt_tokens"],
162
- usage["completion_tokens"]
163
- )
164
- else:
165
- cost = 0.0
446
+ # Calculate cost using centralized pricing manager
447
+ cost = self.model_manager.calculate_cost(
448
+ provider=self.provider_name,
449
+ model_name=self.model_name,
450
+ input_tokens=usage.get("prompt_tokens", 0),
451
+ output_tokens=usage.get("completion_tokens", 0)
452
+ )
166
453
 
167
454
  return {
168
455
  **usage,
169
456
  "cost_usd": cost,
170
457
  "model": self.model_name,
171
- "provider": getattr(self.provider, 'name', 'unknown')
458
+ "provider": self.provider_name
459
+ }
460
+
461
+ async def _track_llm_usage(
462
+ self,
463
+ operation: str,
464
+ input_tokens: Optional[int] = None,
465
+ output_tokens: Optional[int] = None,
466
+ metadata: Optional[Dict[str, Any]] = None
467
+ ) -> float:
468
+ """
469
+ Track LLM usage using the unified BaseService billing system
470
+
471
+ Returns:
472
+ Cost in USD
473
+ """
474
+ from isa_model.core.types import ServiceType
475
+
476
+ await self._track_usage(
477
+ service_type=ServiceType.LLM,
478
+ operation=operation,
479
+ input_tokens=input_tokens,
480
+ output_tokens=output_tokens,
481
+ metadata=metadata
482
+ )
483
+
484
+ # Return calculated cost
485
+ if input_tokens is not None and output_tokens is not None:
486
+ return self.model_manager.calculate_cost(
487
+ provider=self.provider_name,
488
+ model_name=self.model_name,
489
+ input_tokens=input_tokens,
490
+ output_tokens=output_tokens
491
+ )
492
+ return 0.0
493
+
494
+ # ==================== METADATA AND UTILITY METHODS ====================
495
+
496
+ def get_supported_tasks(self) -> List[str]:
497
+ """
498
+ 获取provider支持的任务列表
499
+
500
+ Returns:
501
+ List of supported task names
502
+ """
503
+ supported = []
504
+
505
+ # 检查各类任务支持情况
506
+ method_task_map = {
507
+ # 对话类
508
+ 'chat': 'chat',
509
+ 'complete_text': 'complete',
510
+ 'instruct': 'instruct',
511
+ # 文本生成类
512
+ 'generate_text': 'generate',
513
+ 'rewrite_text': 'rewrite',
514
+ 'summarize_text': 'summarize',
515
+ 'translate_text': 'translate',
516
+ # 分析类
517
+ 'analyze_text': 'analyze',
518
+ 'classify_text': 'classify',
519
+ 'extract_information': 'extract',
520
+ 'analyze_sentiment': 'sentiment',
521
+ # 编程类
522
+ 'generate_code': 'code',
523
+ 'explain_code': 'explain_code',
524
+ 'debug_code': 'debug_code',
525
+ 'refactor_code': 'refactor_code',
526
+ # 推理类
527
+ 'reason_about': 'reason',
528
+ 'solve_problem': 'solve',
529
+ 'create_plan': 'plan',
530
+ # 工具调用类
531
+ 'call_tools': 'tool_call',
532
+ 'call_function': 'function_call'
533
+ }
534
+
535
+ for method_name, task_name in method_task_map.items():
536
+ if hasattr(self, method_name):
537
+ # 检查是否是默认实现还是provider自己的实现
538
+ try:
539
+ import inspect
540
+ source = inspect.getsource(getattr(self, method_name))
541
+ if 'NotImplementedError' not in source:
542
+ supported.append(task_name)
543
+ except:
544
+ # 如果无法检查源码,假设支持
545
+ supported.append(task_name)
546
+
547
+ return supported
548
+
549
+ def get_supported_languages(self) -> List[str]:
550
+ """
551
+ 获取支持的编程语言列表 - Provider应该实现
552
+
553
+ Returns:
554
+ List of supported programming languages
555
+ """
556
+ return [
557
+ 'python', 'javascript', 'typescript', 'java', 'c++', 'c#',
558
+ 'go', 'rust', 'php', 'ruby', 'swift', 'kotlin', 'scala',
559
+ 'r', 'matlab', 'sql', 'html', 'css', 'bash', 'powershell'
560
+ ] # 通用语言支持
561
+
562
+ def get_max_context_length(self) -> int:
563
+ """
564
+ 获取最大上下文长度 - Provider应该实现
565
+
566
+ Returns:
567
+ Maximum context length in tokens
568
+ """
569
+ return self.max_tokens or 4096 # 默认值
570
+
571
+ def get_supported_formats(self) -> List[str]:
572
+ """
573
+ 获取支持的输入格式 - Provider应该实现
574
+
575
+ Returns:
576
+ List of supported input formats
577
+ """
578
+ return ['text', 'json', 'markdown', 'code'] # 通用格式
579
+
580
+ def supports_streaming(self) -> bool:
581
+ """
582
+ 检查是否支持流式输出
583
+
584
+ Returns:
585
+ True if streaming is supported
586
+ """
587
+ return self.streaming
588
+
589
+ def supports_function_calling(self) -> bool:
590
+ """
591
+ 检查是否支持函数调用
592
+
593
+ Returns:
594
+ True if function calling is supported
595
+ """
596
+ return hasattr(self, 'call_tools') or hasattr(self, 'call_function')
597
+
598
+ def get_temperature_range(self) -> Dict[str, float]:
599
+ """
600
+ 获取温度参数范围
601
+
602
+ Returns:
603
+ Dict with min and max temperature values
604
+ """
605
+ return {"min": 0.0, "max": 2.0, "default": self.temperature}
606
+
607
+ def get_provider_info(self) -> Dict[str, Any]:
608
+ """
609
+ 获取provider信息
610
+
611
+ Returns:
612
+ Dict containing provider information
613
+ """
614
+ return {
615
+ "provider": self.provider_name,
616
+ "model": self.model_name,
617
+ "max_tokens": self.max_tokens,
618
+ "temperature": self.temperature,
619
+ "streaming": self.streaming,
620
+ "supports_tools": self.supports_function_calling(),
621
+ "supported_tasks": self.get_supported_tasks(),
622
+ "supported_languages": self.get_supported_languages(),
623
+ "max_context_length": self.get_max_context_length()
172
624
  }