isa-model 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. isa_model/core/model_manager.py +69 -4
  2. isa_model/inference/ai_factory.py +335 -46
  3. isa_model/inference/billing_tracker.py +406 -0
  4. isa_model/inference/providers/base_provider.py +51 -4
  5. isa_model/inference/providers/ollama_provider.py +37 -18
  6. isa_model/inference/providers/openai_provider.py +65 -36
  7. isa_model/inference/providers/replicate_provider.py +42 -30
  8. isa_model/inference/services/audio/base_stt_service.py +21 -2
  9. isa_model/inference/services/audio/openai_realtime_service.py +353 -0
  10. isa_model/inference/services/audio/openai_stt_service.py +252 -0
  11. isa_model/inference/services/audio/openai_tts_service.py +48 -9
  12. isa_model/inference/services/audio/replicate_tts_service.py +239 -0
  13. isa_model/inference/services/base_service.py +36 -1
  14. isa_model/inference/services/embedding/openai_embed_service.py +223 -0
  15. isa_model/inference/services/llm/base_llm_service.py +88 -192
  16. isa_model/inference/services/llm/llm_adapter.py +459 -0
  17. isa_model/inference/services/llm/ollama_llm_service.py +111 -185
  18. isa_model/inference/services/llm/openai_llm_service.py +115 -360
  19. isa_model/inference/services/vision/helpers/image_utils.py +4 -3
  20. isa_model/inference/services/vision/ollama_vision_service.py +11 -3
  21. isa_model/inference/services/vision/openai_vision_service.py +275 -41
  22. isa_model/inference/services/vision/replicate_image_gen_service.py +233 -205
  23. {isa_model-0.3.0.dist-info → isa_model-0.3.2.dist-info}/METADATA +1 -1
  24. {isa_model-0.3.0.dist-info → isa_model-0.3.2.dist-info}/RECORD +26 -21
  25. {isa_model-0.3.0.dist-info → isa_model-0.3.2.dist-info}/WHEEL +0 -0
  26. {isa_model-0.3.0.dist-info → isa_model-0.3.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,459 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ 统一适配器架构 - 支持多框架集成
4
+ 将不同框架的消息、工具、上下文统一适配到 OpenAI 格式
5
+ """
6
+
7
+ import json
8
+ import inspect
9
+ from typing import Dict, Any, List, Union, Optional, Callable, Protocol
10
+ from abc import ABC, abstractmethod
11
+
12
+
13
+ # ============= 适配器协议定义 =============
14
+
15
+ class MessageAdapter(Protocol):
16
+ """消息适配器协议"""
17
+
18
+ def can_handle(self, input_data: Any) -> bool:
19
+ """检查是否能处理该输入格式"""
20
+ ...
21
+
22
+ def to_openai_format(self, input_data: Any) -> List[Dict[str, str]]:
23
+ """转换为 OpenAI 格式"""
24
+ ...
25
+
26
+ def from_openai_format(self, response: str, original_input: Any) -> Any:
27
+ """从 OpenAI 格式转换回原始格式"""
28
+ ...
29
+
30
+
31
+ class ToolAdapter(Protocol):
32
+ """工具适配器协议"""
33
+
34
+ def can_handle(self, tool: Any) -> bool:
35
+ """检查是否能处理该工具"""
36
+ ...
37
+
38
+ def to_openai_schema(self, tool: Any) -> Dict[str, Any]:
39
+ """转换为 OpenAI 工具 schema"""
40
+ ...
41
+
42
+ async def execute_tool(self, tool: Any, arguments: Dict[str, Any]) -> Any:
43
+ """执行工具"""
44
+ ...
45
+
46
+
47
+ class ContextAdapter(Protocol):
48
+ """上下文适配器协议"""
49
+
50
+ adapter_name: str
51
+
52
+ def can_provide_context(self, query: str, context_type: str) -> bool:
53
+ """检查是否能为查询提供上下文"""
54
+ ...
55
+
56
+ async def get_relevant_context(self, query: str, limit: int = 5) -> str:
57
+ """获取相关上下文"""
58
+ ...
59
+
60
+
61
+ # ============= LangChain 适配器实现 =============
62
+
63
+ class LangChainMessageAdapter:
64
+ """LangChain 消息适配器"""
65
+
66
+ def __init__(self):
67
+ self.adapter_name = "langchain"
68
+ self.priority = 8
69
+
70
+ def can_handle(self, input_data: Any) -> bool:
71
+ """检查是否是 LangChain 消息"""
72
+ if isinstance(input_data, list) and input_data:
73
+ return hasattr(input_data[0], 'type') and hasattr(input_data[0], 'content')
74
+ return hasattr(input_data, 'type') and hasattr(input_data, 'content')
75
+
76
+ def to_openai_format(self, input_data: Any) -> List[Dict[str, str]]:
77
+ """转换 LangChain 消息为 OpenAI 格式"""
78
+ if not isinstance(input_data, list):
79
+ input_data = [input_data]
80
+
81
+ converted_messages = []
82
+ for msg in input_data:
83
+ if hasattr(msg, 'type') and hasattr(msg, 'content'):
84
+ msg_dict = {"content": str(msg.content)}
85
+
86
+ if msg.type == "system":
87
+ msg_dict["role"] = "system"
88
+ elif msg.type == "human":
89
+ msg_dict["role"] = "user"
90
+ elif msg.type == "ai":
91
+ msg_dict["role"] = "assistant"
92
+ elif msg.type == "tool":
93
+ msg_dict["role"] = "tool"
94
+ if hasattr(msg, 'tool_call_id'):
95
+ msg_dict["tool_call_id"] = msg.tool_call_id
96
+ else:
97
+ msg_dict["role"] = "user"
98
+
99
+ converted_messages.append(msg_dict)
100
+
101
+ return converted_messages
102
+
103
+ def from_openai_format(self, response: Union[str, Any], original_input: Any) -> Any:
104
+ """从 OpenAI 格式转换回 LangChain 格式"""
105
+ try:
106
+ from langchain_core.messages import AIMessage
107
+
108
+ # Handle response objects with tool_calls (OpenAI message objects)
109
+ if hasattr(response, 'tool_calls') and response.tool_calls:
110
+ # Convert OpenAI tool_calls to LangChain format
111
+ tool_calls = []
112
+ for tc in response.tool_calls:
113
+ tool_call = {
114
+ "name": tc.function.name,
115
+ "args": json.loads(tc.function.arguments),
116
+ "id": tc.id
117
+ }
118
+ tool_calls.append(tool_call)
119
+
120
+ return AIMessage(
121
+ content=response.content or "",
122
+ tool_calls=tool_calls
123
+ )
124
+ else:
125
+ # Handle simple string response
126
+ content = response if isinstance(response, str) else getattr(response, 'content', str(response))
127
+ return AIMessage(content=content)
128
+
129
+ except ImportError:
130
+ # 回退实现
131
+ class SimpleAIMessage:
132
+ def __init__(self, content, tool_calls=None):
133
+ self.content = content
134
+ self.type = "ai"
135
+ self.tool_calls = tool_calls or []
136
+
137
+ # Handle response objects with tool_calls
138
+ if hasattr(response, 'tool_calls') and response.tool_calls:
139
+ tool_calls = []
140
+ for tc in response.tool_calls:
141
+ tool_call = {
142
+ "name": tc.function.name,
143
+ "args": json.loads(tc.function.arguments),
144
+ "id": tc.id
145
+ }
146
+ tool_calls.append(tool_call)
147
+
148
+ return SimpleAIMessage(response.content or "", tool_calls)
149
+ else:
150
+ content = response if isinstance(response, str) else getattr(response, 'content', str(response))
151
+ return SimpleAIMessage(content)
152
+
153
+
154
+ class LangChainToolAdapter:
155
+ """LangChain 工具适配器"""
156
+
157
+ def __init__(self):
158
+ self.adapter_name = "langchain_tool"
159
+ self.priority = 8
160
+
161
+ def can_handle(self, tool: Any) -> bool:
162
+ """检查是否是 LangChain 工具"""
163
+ return (hasattr(tool, 'name') and hasattr(tool, 'description')
164
+ and hasattr(tool, 'args_schema'))
165
+
166
+ def to_openai_schema(self, tool: Any) -> Dict[str, Any]:
167
+ """转换 LangChain 工具为 OpenAI schema"""
168
+ properties = {}
169
+ required = []
170
+
171
+ if tool.args_schema and hasattr(tool.args_schema, 'model_fields'):
172
+ for field_name, field_info in tool.args_schema.model_fields.items():
173
+ field_type = getattr(field_info, 'annotation', str)
174
+
175
+ if field_type == str:
176
+ prop_type = "string"
177
+ elif field_type == int:
178
+ prop_type = "integer"
179
+ elif field_type == float:
180
+ prop_type = "number"
181
+ elif field_type == bool:
182
+ prop_type = "boolean"
183
+ else:
184
+ prop_type = "string"
185
+
186
+ properties[field_name] = {"type": prop_type}
187
+
188
+ if hasattr(field_info, 'default') and field_info.default == ...:
189
+ required.append(field_name)
190
+
191
+ return {
192
+ "type": "function",
193
+ "function": {
194
+ "name": tool.name,
195
+ "description": tool.description,
196
+ "parameters": {
197
+ "type": "object",
198
+ "properties": properties,
199
+ "required": required
200
+ }
201
+ }
202
+ }
203
+
204
+ async def execute_tool(self, tool: Any, arguments: Dict[str, Any]) -> Any:
205
+ """执行 LangChain 工具"""
206
+ try:
207
+ if len(arguments) == 1:
208
+ input_value = list(arguments.values())[0]
209
+ result = tool.invoke(input_value)
210
+ else:
211
+ result = tool.invoke(arguments)
212
+
213
+ if hasattr(result, '__await__'):
214
+ result = await result
215
+
216
+ return result
217
+ except Exception as e:
218
+ return f"Error executing LangChain tool {tool.name}: {str(e)}"
219
+
220
+
221
+ # ============= Python 函数适配器 =============
222
+
223
+ class PythonFunctionAdapter:
224
+ """Python 函数适配器"""
225
+
226
+ def __init__(self):
227
+ self.adapter_name = "python_function"
228
+ self.priority = 5
229
+
230
+ def can_handle(self, tool: Any) -> bool:
231
+ """检查是否是 Python 函数"""
232
+ return callable(tool) and hasattr(tool, '__name__')
233
+
234
+ def to_openai_schema(self, tool: Any) -> Dict[str, Any]:
235
+ """转换 Python 函数为 OpenAI schema"""
236
+ func = tool # tool 就是函数
237
+ sig = inspect.signature(func)
238
+ properties = {}
239
+ required = []
240
+
241
+ # 尝试获取类型提示,优雅处理错误
242
+ try:
243
+ from typing import get_type_hints
244
+ type_hints = get_type_hints(func)
245
+ except (NameError, AttributeError, TypeError):
246
+ type_hints = {}
247
+ for param_name, param in sig.parameters.items():
248
+ if param.annotation != inspect.Parameter.empty:
249
+ type_hints[param_name] = param.annotation
250
+
251
+ for param_name, param in sig.parameters.items():
252
+ param_type = type_hints.get(param_name, str)
253
+
254
+ # 转换 Python 类型到 JSON schema 类型
255
+ if param_type == str or param_type == "str":
256
+ prop_type = "string"
257
+ elif param_type == int or param_type == "int":
258
+ prop_type = "integer"
259
+ elif param_type == float or param_type == "float":
260
+ prop_type = "number"
261
+ elif param_type == bool or param_type == "bool":
262
+ prop_type = "boolean"
263
+ elif param_type == list or param_type == "list":
264
+ prop_type = "array"
265
+ elif param_type == dict or param_type == "dict":
266
+ prop_type = "object"
267
+ else:
268
+ prop_type = "string"
269
+
270
+ properties[param_name] = {"type": prop_type}
271
+
272
+ if param.default == inspect.Parameter.empty:
273
+ required.append(param_name)
274
+
275
+ return {
276
+ "type": "function",
277
+ "function": {
278
+ "name": func.__name__,
279
+ "description": func.__doc__ or f"Function {func.__name__}",
280
+ "parameters": {
281
+ "type": "object",
282
+ "properties": properties,
283
+ "required": required
284
+ }
285
+ }
286
+ }
287
+
288
+ async def execute_tool(self, tool: Any, arguments: Dict[str, Any]) -> Any:
289
+ """执行 Python 函数"""
290
+ func = tool # tool 就是函数
291
+ try:
292
+ result = func(**arguments)
293
+ if hasattr(result, '__await__'):
294
+ result = await result
295
+ return result
296
+ except Exception as e:
297
+ return f"Error executing function {func.__name__}: {str(e)}"
298
+
299
+
300
+ # ============= 标准格式适配器 =============
301
+
302
+ class StandardMessageAdapter:
303
+ """标准消息适配器(处理字典和字符串)"""
304
+
305
+ def __init__(self):
306
+ self.adapter_name = "standard"
307
+ self.priority = 1 # 最低优先级,作为回退
308
+
309
+ def can_handle(self, input_data: Any) -> bool:
310
+ """处理所有标准格式"""
311
+ return True # 作为回退适配器
312
+
313
+ def to_openai_format(self, input_data: Any) -> List[Dict[str, str]]:
314
+ """转换标准格式为 OpenAI 格式"""
315
+ if isinstance(input_data, str):
316
+ return [{"role": "user", "content": input_data}]
317
+ elif isinstance(input_data, list):
318
+ if not input_data:
319
+ return [{"role": "user", "content": ""}]
320
+
321
+ if isinstance(input_data[0], dict):
322
+ return input_data # 假设已经是 OpenAI 格式
323
+ else:
324
+ # 转换字符串列表
325
+ messages = []
326
+ for i, msg in enumerate(input_data):
327
+ role = "user" if i % 2 == 0 else "assistant"
328
+ messages.append({"role": role, "content": str(msg)})
329
+ return messages
330
+ else:
331
+ return [{"role": "user", "content": str(input_data)}]
332
+
333
+ def from_openai_format(self, response: Union[str, Any], original_input: Any) -> Any:
334
+ """从 OpenAI 格式转换回原始格式"""
335
+ # Handle response objects with tool_calls (preserve message object for compatibility)
336
+ if hasattr(response, 'tool_calls') and response.tool_calls:
337
+ # For standard format, we preserve the message object for tool_calls access
338
+ # This handles both OpenAI and Ollama message objects
339
+ return response
340
+ else:
341
+ # Handle simple string response
342
+ content = response if isinstance(response, str) else getattr(response, 'content', str(response))
343
+ return content
344
+
345
+
346
+ # ============= 适配器管理器 =============
347
+
348
+ class AdapterManager:
349
+ """适配器管理器 - 统一管理所有类型的适配器"""
350
+
351
+ def __init__(self):
352
+ self.message_adapters: List[MessageAdapter] = []
353
+ self.tool_adapters: List[ToolAdapter] = []
354
+ self.context_adapters: List[ContextAdapter] = []
355
+
356
+ # 注册默认适配器
357
+ self._register_default_adapters()
358
+
359
+ def _register_default_adapters(self):
360
+ """注册默认适配器"""
361
+ # 消息适配器(按优先级排序)
362
+ self.message_adapters = [
363
+ LangChainMessageAdapter(),
364
+ StandardMessageAdapter() # 回退适配器
365
+ ]
366
+
367
+ # 工具适配器
368
+ self.tool_adapters = [
369
+ LangChainToolAdapter(),
370
+ PythonFunctionAdapter()
371
+ ]
372
+
373
+ def register_custom_adapter(self, adapter, adapter_type: str):
374
+ """注册自定义适配器"""
375
+ if adapter_type == "message":
376
+ # 按优先级插入
377
+ priority = getattr(adapter, 'priority', 5)
378
+ inserted = False
379
+ for i, existing in enumerate(self.message_adapters):
380
+ if getattr(existing, 'priority', 5) < priority:
381
+ self.message_adapters.insert(i, adapter)
382
+ inserted = True
383
+ break
384
+ if not inserted:
385
+ self.message_adapters.append(adapter)
386
+
387
+ elif adapter_type == "tool":
388
+ priority = getattr(adapter, 'priority', 5)
389
+ inserted = False
390
+ for i, existing in enumerate(self.tool_adapters):
391
+ if getattr(existing, 'priority', 5) < priority:
392
+ self.tool_adapters.insert(i, adapter)
393
+ inserted = True
394
+ break
395
+ if not inserted:
396
+ self.tool_adapters.append(adapter)
397
+
398
+ elif adapter_type == "context":
399
+ self.context_adapters.append(adapter)
400
+
401
+ def convert_messages(self, input_data: Any) -> List[Dict[str, str]]:
402
+ """转换消息格式"""
403
+ for adapter in self.message_adapters:
404
+ if adapter.can_handle(input_data):
405
+ return adapter.to_openai_format(input_data)
406
+
407
+ # 不应该到这里,因为有回退适配器
408
+ return [{"role": "user", "content": str(input_data)}]
409
+
410
+ def format_response(self, response: str, original_input: Any) -> Any:
411
+ """格式化响应"""
412
+ for adapter in self.message_adapters:
413
+ if adapter.can_handle(original_input):
414
+ return adapter.from_openai_format(response, original_input)
415
+
416
+ return response
417
+
418
+ async def convert_tools_to_schemas(self, tools: List[Any]) -> tuple[List[Dict[str, Any]], Dict[str, tuple]]:
419
+ """转换工具为 OpenAI schemas"""
420
+ schemas = []
421
+ tool_mappings = {} # 存储工具名到适配器的映射
422
+
423
+ for tool in tools:
424
+ for adapter in self.tool_adapters:
425
+ if adapter.can_handle(tool):
426
+ schema = adapter.to_openai_schema(tool)
427
+ schemas.append(schema)
428
+
429
+ # 存储映射关系
430
+ tool_name = schema["function"]["name"]
431
+ tool_mappings[tool_name] = (tool, adapter)
432
+ break
433
+
434
+ return schemas, tool_mappings
435
+
436
+ async def execute_tool(self, tool_name: str, arguments: Dict[str, Any],
437
+ tool_mappings: Dict[str, tuple]) -> Any:
438
+ """执行工具"""
439
+ if tool_name in tool_mappings:
440
+ tool, adapter = tool_mappings[tool_name]
441
+ return await adapter.execute_tool(tool, arguments)
442
+ else:
443
+ return f"Error: Tool {tool_name} not found"
444
+
445
+ async def get_context(self, query: str, context_type: str = "all",
446
+ limit: int = 5) -> str:
447
+ """获取上下文"""
448
+ context_parts = []
449
+
450
+ for adapter in self.context_adapters:
451
+ if adapter.can_provide_context(query, context_type):
452
+ try:
453
+ context = await adapter.get_relevant_context(query, limit)
454
+ if context:
455
+ context_parts.append(context)
456
+ except Exception as e:
457
+ print(f"Warning: Context adapter {adapter.adapter_name} failed: {e}")
458
+
459
+ return "\n\n".join(context_parts)