agentrun-sdk 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (128) hide show
  1. agentrun/__init__.py +209 -0
  2. agentrun/agent_runtime/__client_async_template.py +466 -0
  3. agentrun/agent_runtime/__endpoint_async_template.py +345 -0
  4. agentrun/agent_runtime/__init__.py +53 -0
  5. agentrun/agent_runtime/__runtime_async_template.py +477 -0
  6. agentrun/agent_runtime/api/__data_async_template.py +58 -0
  7. agentrun/agent_runtime/api/__init__.py +6 -0
  8. agentrun/agent_runtime/api/control.py +1362 -0
  9. agentrun/agent_runtime/api/data.py +98 -0
  10. agentrun/agent_runtime/client.py +868 -0
  11. agentrun/agent_runtime/endpoint.py +649 -0
  12. agentrun/agent_runtime/model.py +362 -0
  13. agentrun/agent_runtime/runtime.py +904 -0
  14. agentrun/credential/__client_async_template.py +177 -0
  15. agentrun/credential/__credential_async_template.py +216 -0
  16. agentrun/credential/__init__.py +28 -0
  17. agentrun/credential/api/__init__.py +5 -0
  18. agentrun/credential/api/control.py +606 -0
  19. agentrun/credential/client.py +319 -0
  20. agentrun/credential/credential.py +381 -0
  21. agentrun/credential/model.py +248 -0
  22. agentrun/integration/__init__.py +21 -0
  23. agentrun/integration/agentscope/__init__.py +12 -0
  24. agentrun/integration/agentscope/adapter.py +17 -0
  25. agentrun/integration/agentscope/builtin.py +65 -0
  26. agentrun/integration/agentscope/message_adapter.py +185 -0
  27. agentrun/integration/agentscope/model_adapter.py +60 -0
  28. agentrun/integration/agentscope/tool_adapter.py +59 -0
  29. agentrun/integration/builtin/__init__.py +16 -0
  30. agentrun/integration/builtin/model.py +97 -0
  31. agentrun/integration/builtin/sandbox.py +276 -0
  32. agentrun/integration/builtin/toolset.py +47 -0
  33. agentrun/integration/crewai/__init__.py +12 -0
  34. agentrun/integration/crewai/adapter.py +9 -0
  35. agentrun/integration/crewai/builtin.py +65 -0
  36. agentrun/integration/crewai/model_adapter.py +27 -0
  37. agentrun/integration/crewai/tool_adapter.py +26 -0
  38. agentrun/integration/google_adk/__init__.py +12 -0
  39. agentrun/integration/google_adk/adapter.py +15 -0
  40. agentrun/integration/google_adk/builtin.py +65 -0
  41. agentrun/integration/google_adk/message_adapter.py +144 -0
  42. agentrun/integration/google_adk/model_adapter.py +43 -0
  43. agentrun/integration/google_adk/tool_adapter.py +25 -0
  44. agentrun/integration/langchain/__init__.py +9 -0
  45. agentrun/integration/langchain/adapter.py +15 -0
  46. agentrun/integration/langchain/builtin.py +71 -0
  47. agentrun/integration/langchain/message_adapter.py +141 -0
  48. agentrun/integration/langchain/model_adapter.py +37 -0
  49. agentrun/integration/langchain/tool_adapter.py +50 -0
  50. agentrun/integration/langgraph/__init__.py +13 -0
  51. agentrun/integration/langgraph/adapter.py +20 -0
  52. agentrun/integration/langgraph/builtin.py +65 -0
  53. agentrun/integration/pydantic_ai/__init__.py +12 -0
  54. agentrun/integration/pydantic_ai/adapter.py +13 -0
  55. agentrun/integration/pydantic_ai/builtin.py +65 -0
  56. agentrun/integration/pydantic_ai/model_adapter.py +44 -0
  57. agentrun/integration/pydantic_ai/tool_adapter.py +19 -0
  58. agentrun/integration/utils/__init__.py +112 -0
  59. agentrun/integration/utils/adapter.py +167 -0
  60. agentrun/integration/utils/canonical.py +157 -0
  61. agentrun/integration/utils/converter.py +134 -0
  62. agentrun/integration/utils/model.py +107 -0
  63. agentrun/integration/utils/tool.py +1714 -0
  64. agentrun/model/__client_async_template.py +357 -0
  65. agentrun/model/__init__.py +57 -0
  66. agentrun/model/__model_proxy_async_template.py +270 -0
  67. agentrun/model/__model_service_async_template.py +267 -0
  68. agentrun/model/api/__init__.py +6 -0
  69. agentrun/model/api/control.py +1173 -0
  70. agentrun/model/api/data.py +196 -0
  71. agentrun/model/client.py +674 -0
  72. agentrun/model/model.py +218 -0
  73. agentrun/model/model_proxy.py +439 -0
  74. agentrun/model/model_service.py +438 -0
  75. agentrun/sandbox/__browser_sandbox_async_template.py +113 -0
  76. agentrun/sandbox/__client_async_template.py +466 -0
  77. agentrun/sandbox/__code_interpreter_sandbox_async_template.py +466 -0
  78. agentrun/sandbox/__init__.py +54 -0
  79. agentrun/sandbox/__sandbox_async_template.py +398 -0
  80. agentrun/sandbox/__template_async_template.py +150 -0
  81. agentrun/sandbox/api/__browser_data_async_template.py +140 -0
  82. agentrun/sandbox/api/__code_interpreter_data_async_template.py +206 -0
  83. agentrun/sandbox/api/__init__.py +17 -0
  84. agentrun/sandbox/api/__sandbox_data_async_template.py +100 -0
  85. agentrun/sandbox/api/browser_data.py +172 -0
  86. agentrun/sandbox/api/code_interpreter_data.py +396 -0
  87. agentrun/sandbox/api/control.py +1051 -0
  88. agentrun/sandbox/api/playwright_async.py +492 -0
  89. agentrun/sandbox/api/playwright_sync.py +492 -0
  90. agentrun/sandbox/api/sandbox_data.py +140 -0
  91. agentrun/sandbox/browser_sandbox.py +191 -0
  92. agentrun/sandbox/client.py +878 -0
  93. agentrun/sandbox/code_interpreter_sandbox.py +829 -0
  94. agentrun/sandbox/model.py +269 -0
  95. agentrun/sandbox/sandbox.py +737 -0
  96. agentrun/sandbox/template.py +215 -0
  97. agentrun/server/__init__.py +82 -0
  98. agentrun/server/invoker.py +131 -0
  99. agentrun/server/model.py +225 -0
  100. agentrun/server/openai_protocol.py +798 -0
  101. agentrun/server/protocol.py +96 -0
  102. agentrun/server/server.py +192 -0
  103. agentrun/toolset/__client_async_template.py +62 -0
  104. agentrun/toolset/__init__.py +51 -0
  105. agentrun/toolset/__toolset_async_template.py +204 -0
  106. agentrun/toolset/api/__init__.py +17 -0
  107. agentrun/toolset/api/control.py +262 -0
  108. agentrun/toolset/api/mcp.py +100 -0
  109. agentrun/toolset/api/openapi.py +1184 -0
  110. agentrun/toolset/client.py +102 -0
  111. agentrun/toolset/model.py +160 -0
  112. agentrun/toolset/toolset.py +271 -0
  113. agentrun/utils/__data_api_async_template.py +715 -0
  114. agentrun/utils/__init__.py +5 -0
  115. agentrun/utils/__resource_async_template.py +158 -0
  116. agentrun/utils/config.py +258 -0
  117. agentrun/utils/control_api.py +78 -0
  118. agentrun/utils/data_api.py +1110 -0
  119. agentrun/utils/exception.py +149 -0
  120. agentrun/utils/helper.py +34 -0
  121. agentrun/utils/log.py +77 -0
  122. agentrun/utils/model.py +168 -0
  123. agentrun/utils/resource.py +291 -0
  124. agentrun_sdk-0.0.4.dist-info/METADATA +262 -0
  125. agentrun_sdk-0.0.4.dist-info/RECORD +128 -0
  126. agentrun_sdk-0.0.4.dist-info/WHEEL +5 -0
  127. agentrun_sdk-0.0.4.dist-info/licenses/LICENSE +201 -0
  128. agentrun_sdk-0.0.4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,798 @@
1
+ """OpenAI Completions API 协议实现 / OpenAI Completions API 协议Implements
2
+
3
+ 基于 Router 的设计:
4
+ - 协议自己创建 FastAPI Router
5
+ - 定义所有端点和处理逻辑
6
+ - Server 只需挂载 Router"""
7
+
8
+ import json
9
+ import time
10
+ from typing import Any, AsyncIterator, Dict, Iterator, TYPE_CHECKING, Union
11
+
12
+ from fastapi import APIRouter, Request
13
+ from fastapi.responses import JSONResponse, StreamingResponse
14
+
15
+ from .model import (
16
+ AgentRequest,
17
+ AgentResponse,
18
+ AgentResult,
19
+ AgentRunResult,
20
+ AgentStreamResponse,
21
+ AgentStreamResponseChoice,
22
+ AgentStreamResponseDelta,
23
+ Message,
24
+ MessageRole,
25
+ )
26
+ from .protocol import ProtocolHandler
27
+
28
+ if TYPE_CHECKING:
29
+ from .invoker import AgentInvoker
30
+
31
+
32
+ class OpenAIProtocolHandler(ProtocolHandler):
33
+ """OpenAI Completions API 协议处理器
34
+
35
+ 实现 OpenAI Chat Completions API 兼容接口
36
+ 参考: https://platform.openai.com/docs/api-reference/chat/create
37
+ """
38
+
39
+ def get_prefix(self) -> str:
40
+ """OpenAI 协议建议使用 /v1 前缀"""
41
+ return "/openai/v1"
42
+
43
+ def as_fastapi_router(self, agent_invoker: "AgentInvoker") -> APIRouter:
44
+ """创建 OpenAI 协议的 FastAPI Router"""
45
+ router = APIRouter()
46
+
47
+ @router.post("/chat/completions")
48
+ async def chat_completions(request: Request):
49
+ """OpenAI Chat Completions 端点"""
50
+ try:
51
+ # 1. 解析请求
52
+ request_data = await request.json()
53
+ agent_request = await self.parse_request(request_data)
54
+
55
+ # 2. 调用 Agent
56
+ agent_result = await agent_invoker.invoke(agent_request)
57
+
58
+ # 3. 格式化响应
59
+ formatted_result = await self.format_response(
60
+ agent_result, agent_request
61
+ )
62
+
63
+ # 4. 返回响应
64
+ # 自动检测是否为流式响应
65
+ if hasattr(formatted_result, "__aiter__"):
66
+ return StreamingResponse(
67
+ formatted_result, media_type="text/event-stream"
68
+ )
69
+ else:
70
+ return JSONResponse(formatted_result)
71
+
72
+ except ValueError as e:
73
+ return JSONResponse(
74
+ {
75
+ "error": {
76
+ "message": str(e),
77
+ "type": "invalid_request_error",
78
+ }
79
+ },
80
+ status_code=400,
81
+ )
82
+ except Exception as e:
83
+ return JSONResponse(
84
+ {"error": {"message": str(e), "type": "internal_error"}},
85
+ status_code=500,
86
+ )
87
+
88
+ # 可以添加更多端点
89
+ @router.get("/models")
90
+ async def list_models():
91
+ """列出可用模型"""
92
+ return {
93
+ "object": "list",
94
+ "data": [{
95
+ "id": "agentrun-model",
96
+ "object": "model",
97
+ "created": int(time.time()),
98
+ "owned_by": "agentrun",
99
+ }],
100
+ }
101
+
102
+ return router
103
+
104
+ async def parse_request(self, request_data: Dict[str, Any]) -> AgentRequest:
105
+ """解析 OpenAI 格式的请求
106
+
107
+ Args:
108
+ request_data: HTTP 请求体 JSON 数据
109
+
110
+ Returns:
111
+ AgentRequest: 标准化的请求对象
112
+
113
+ Raises:
114
+ ValueError: 请求格式不正确
115
+ """
116
+ # 验证必需字段
117
+ if "messages" not in request_data:
118
+ raise ValueError("Missing required field: messages")
119
+
120
+ # 解析消息列表
121
+ messages = []
122
+ for msg_data in request_data["messages"]:
123
+ if not isinstance(msg_data, dict):
124
+ raise ValueError(f"Invalid message format: {msg_data}")
125
+
126
+ if "role" not in msg_data:
127
+ raise ValueError("Message missing 'role' field")
128
+
129
+ # 转换消息
130
+ try:
131
+ role = MessageRole(msg_data["role"])
132
+ except ValueError as e:
133
+ raise ValueError(
134
+ f"Invalid message role: {msg_data['role']}"
135
+ ) from e
136
+
137
+ messages.append(
138
+ Message(
139
+ role=role,
140
+ content=msg_data.get("content"),
141
+ name=msg_data.get("name"),
142
+ tool_calls=msg_data.get("tool_calls"),
143
+ tool_call_id=msg_data.get("tool_call_id"),
144
+ )
145
+ )
146
+
147
+ # 提取标准参数
148
+ agent_request = AgentRequest(
149
+ messages=messages,
150
+ model=request_data.get("model"),
151
+ stream=request_data.get("stream", False),
152
+ temperature=request_data.get("temperature"),
153
+ top_p=request_data.get("top_p"),
154
+ max_tokens=request_data.get("max_tokens"),
155
+ tools=request_data.get("tools"),
156
+ tool_choice=request_data.get("tool_choice"),
157
+ user=request_data.get("user"),
158
+ )
159
+
160
+ # 保存其他额外参数
161
+ standard_fields = {
162
+ "messages",
163
+ "model",
164
+ "stream",
165
+ "temperature",
166
+ "top_p",
167
+ "max_tokens",
168
+ "tools",
169
+ "tool_choice",
170
+ "user",
171
+ }
172
+ agent_request.extra = {
173
+ k: v for k, v in request_data.items() if k not in standard_fields
174
+ }
175
+
176
+ return agent_request
177
+
178
+ async def format_response(
179
+ self, result: AgentResult, request: AgentRequest
180
+ ) -> Any:
181
+ """格式化响应为 OpenAI 格式
182
+
183
+ Args:
184
+ result: Agent 执行结果,支持:
185
+ - AgentRunResult: 核心数据结构 (推荐)
186
+ - AgentResponse: 完整响应对象
187
+ - ModelResponse: litellm 的 ModelResponse
188
+ - CustomStreamWrapper: litellm 的流式响应
189
+ request: 原始请求
190
+
191
+ Returns:
192
+ 格式化后的响应(dict 或 AsyncIterator)
193
+ """
194
+ # 1. 检测 ModelResponse (来自 Model Service)
195
+ if self._is_model_response(result):
196
+ return self._format_model_response(result, request)
197
+
198
+ # 2. 处理 AgentRunResult
199
+ if isinstance(result, AgentRunResult):
200
+ return await self._format_agent_run_result(result, request)
201
+
202
+ # 3. 自动检测流式响应:
203
+ # - 请求明确指定 stream=true
204
+ # - 或返回值是迭代器/生成器
205
+ is_stream = request.stream or self._is_iterator(result)
206
+
207
+ if is_stream:
208
+ return self._format_stream_response(result, request)
209
+
210
+ # 4. 非流式响应
211
+ # 如果是字符串,包装成 AgentResponse
212
+ if isinstance(result, str):
213
+ result = self._wrap_string_response(result, request)
214
+
215
+ # 如果是 AgentResponse,补充 OpenAI 必需字段并序列化
216
+ if isinstance(result, AgentResponse):
217
+ return self._ensure_openai_format(result, request)
218
+
219
+ raise TypeError(
220
+ "Expected AgentRunResult, AgentResponse, or ModelResponse, "
221
+ f"got {type(result)}"
222
+ )
223
+
224
+ async def _format_agent_run_result(
225
+ self, result: AgentRunResult, request: AgentRequest
226
+ ) -> Union[Dict[str, Any], AsyncIterator[str]]:
227
+ """格式化 AgentRunResult 为 OpenAI 格式
228
+
229
+ AgentRunResult 的 content 可以是:
230
+ - string: 非流式响应
231
+ - Iterator[str] 或 AsyncIterator[str]: 流式响应
232
+
233
+ Args:
234
+ result: AgentRunResult 对象
235
+ request: 原始请求
236
+
237
+ Returns:
238
+ 非流式: OpenAI 格式的字典
239
+ 流式: SSE 格式的异步迭代器
240
+ """
241
+ content = result.content
242
+
243
+ # 检查 content 是否是迭代器
244
+ if self._is_iterator(content):
245
+ # 流式响应
246
+ return self._format_stream_content(content, request)
247
+
248
+ # 非流式响应
249
+ if isinstance(content, str):
250
+ return {
251
+ "id": f"chatcmpl-{int(time.time() * 1000)}",
252
+ "object": "chat.completion",
253
+ "created": int(time.time()),
254
+ "model": request.model or "agentrun-model",
255
+ "choices": [{
256
+ "index": 0,
257
+ "message": {
258
+ "role": "assistant",
259
+ "content": content,
260
+ },
261
+ "finish_reason": "stop",
262
+ }],
263
+ }
264
+
265
+ raise TypeError(
266
+ "AgentRunResult.content must be str or Iterator[str], got"
267
+ f" {type(content)}"
268
+ )
269
+
270
+ def _is_model_response(self, obj: Any) -> bool:
271
+ """检查对象是否是 Model Service 的 ModelResponse
272
+
273
+ ModelResponse 特征:
274
+ - 有 choices 属性
275
+ - 有 usage 属性 (或 created, id 等)
276
+ - 不是 AgentResponse (AgentResponse 也有这些字段)
277
+
278
+ Args:
279
+ obj: 要检查的对象
280
+
281
+ Returns:
282
+ bool: 是否是 ModelResponse
283
+ """
284
+ # 排除已知类型
285
+ if isinstance(obj, (str, AgentResponse, AgentRunResult, dict)):
286
+ return False
287
+
288
+ # 检查 ModelResponse 的特征属性
289
+ # litellm 的 ModelResponse 有 choices 和 model 属性
290
+ return (
291
+ hasattr(obj, "choices")
292
+ and hasattr(obj, "model")
293
+ and (hasattr(obj, "usage") or hasattr(obj, "created"))
294
+ )
295
+
296
+ def _format_model_response(
297
+ self, response: Any, request: AgentRequest
298
+ ) -> Dict[str, Any]:
299
+ """格式化 ModelResponse 为 OpenAI 格式
300
+
301
+ ModelResponse 本身已经是 OpenAI 格式,直接转换为字典即可。
302
+
303
+ Args:
304
+ response: litellm 的 ModelResponse 对象
305
+ request: 原始请求
306
+
307
+ Returns:
308
+ Dict: OpenAI 格式的响应字典
309
+ """
310
+ # 方式 1: 如果有 model_dump 方法 (Pydantic)
311
+ if hasattr(response, "model_dump"):
312
+ return response.model_dump(exclude_none=True)
313
+
314
+ # 方式 2: 如果有 dict 方法
315
+ if hasattr(response, "dict"):
316
+ return response.dict(exclude_none=True)
317
+
318
+ # 方式 3: 手动转换 (litellm ModelResponse)
319
+ result = {
320
+ "id": getattr(
321
+ response, "id", f"chatcmpl-{int(time.time() * 1000)}"
322
+ ),
323
+ "object": getattr(response, "object", "chat.completion"),
324
+ "created": getattr(response, "created", int(time.time())),
325
+ "model": getattr(
326
+ response, "model", request.model or "agentrun-model"
327
+ ),
328
+ "choices": [],
329
+ }
330
+
331
+ # 转换 choices
332
+ if hasattr(response, "choices"):
333
+ for choice in response.choices:
334
+ choice_dict = {
335
+ "index": getattr(choice, "index", 0),
336
+ "finish_reason": getattr(choice, "finish_reason", None),
337
+ }
338
+
339
+ # 转换 message
340
+ if hasattr(choice, "message"):
341
+ msg = choice.message
342
+ choice_dict["message"] = {
343
+ "role": getattr(msg, "role", "assistant"),
344
+ "content": getattr(msg, "content", None),
345
+ }
346
+ # 可选字段
347
+ if hasattr(msg, "tool_calls") and msg.tool_calls:
348
+ choice_dict["message"]["tool_calls"] = msg.tool_calls
349
+
350
+ result["choices"].append(choice_dict)
351
+
352
+ # 转换 usage
353
+ if hasattr(response, "usage") and response.usage:
354
+ usage = response.usage
355
+ result["usage"] = {
356
+ "prompt_tokens": getattr(usage, "prompt_tokens", 0),
357
+ "completion_tokens": getattr(usage, "completion_tokens", 0),
358
+ "total_tokens": getattr(usage, "total_tokens", 0),
359
+ }
360
+
361
+ return result
362
+
363
+ def _is_iterator(self, obj: Any) -> bool:
364
+ """检查对象是否是迭代器
365
+
366
+ Args:
367
+ obj: 要检查的对象
368
+
369
+ Returns:
370
+ bool: 是否是迭代器
371
+ """
372
+ # 检查是否是迭代器或生成器
373
+ return (
374
+ hasattr(obj, "__iter__") and not isinstance(obj, (str, bytes, dict))
375
+ ) or hasattr(obj, "__aiter__")
376
+
377
+ async def _format_stream_content(
378
+ self,
379
+ content: Union[Iterator[str], AsyncIterator[str]],
380
+ request: AgentRequest,
381
+ ) -> AsyncIterator[str]:
382
+ """格式化流式 content 为 OpenAI SSE 格式
383
+
384
+ 将字符串迭代器转换为 OpenAI 流式响应格式。
385
+
386
+ Args:
387
+ content: 字符串迭代器 (同步或异步)
388
+ request: 原始请求
389
+
390
+ Yields:
391
+ SSE 格式的数据行
392
+ """
393
+ response_id = f"chatcmpl-{int(time.time() * 1000)}"
394
+ created = int(time.time())
395
+ model = request.model or "agentrun-model"
396
+
397
+ # 发送第一个 chunk (包含 role)
398
+ first_chunk = {
399
+ "id": response_id,
400
+ "object": "chat.completion.chunk",
401
+ "created": created,
402
+ "model": model,
403
+ "choices": [{
404
+ "index": 0,
405
+ "delta": {"role": "assistant"},
406
+ "finish_reason": None,
407
+ }],
408
+ }
409
+ yield f"data: {json.dumps(first_chunk, ensure_ascii=False)}\n\n"
410
+
411
+ # 检查是否是异步迭代器
412
+ if hasattr(content, "__aiter__"):
413
+ async for chunk in content: # type: ignore
414
+ if chunk: # 跳过空字符串
415
+ data = {
416
+ "id": response_id,
417
+ "object": "chat.completion.chunk",
418
+ "created": created,
419
+ "model": model,
420
+ "choices": [{
421
+ "index": 0,
422
+ "delta": {"content": chunk},
423
+ "finish_reason": None,
424
+ }],
425
+ }
426
+ yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
427
+ else:
428
+ # 同步迭代器
429
+ for chunk in content: # type: ignore
430
+ if chunk:
431
+ data = {
432
+ "id": response_id,
433
+ "object": "chat.completion.chunk",
434
+ "created": created,
435
+ "model": model,
436
+ "choices": [{
437
+ "index": 0,
438
+ "delta": {"content": chunk},
439
+ "finish_reason": None,
440
+ }],
441
+ }
442
+ yield f"data: {json.dumps(data, ensure_ascii=False)}\n\n"
443
+
444
+ # 发送结束 chunk
445
+ final_chunk = {
446
+ "id": response_id,
447
+ "object": "chat.completion.chunk",
448
+ "created": created,
449
+ "model": model,
450
+ "choices": [{
451
+ "index": 0,
452
+ "delta": {},
453
+ "finish_reason": "stop",
454
+ }],
455
+ }
456
+ yield f"data: {json.dumps(final_chunk, ensure_ascii=False)}\n\n"
457
+
458
+ # 发送结束标记
459
+ yield "data: [DONE]\n\n"
460
+
461
+ def _wrap_string_response(
462
+ self, content: str, request: AgentRequest
463
+ ) -> AgentResponse:
464
+ """将字符串包装成 AgentResponse
465
+
466
+ Args:
467
+ content: 响应内容字符串
468
+ request: 原始请求
469
+
470
+ Returns:
471
+ AgentResponse: 包装后的响应对象
472
+ """
473
+ return AgentResponse(content=content)
474
+
475
+ def _ensure_openai_format(
476
+ self, response: AgentResponse, request: AgentRequest
477
+ ) -> Dict[str, Any]:
478
+ """确保 AgentResponse 符合 OpenAI 格式
479
+
480
+ 如果用户只填充了 content,自动补充 OpenAI 必需字段。
481
+ 如果用户已填充完整字段,直接使用。
482
+
483
+ Args:
484
+ response: Agent 返回的响应对象
485
+ request: 原始请求
486
+
487
+ Returns:
488
+ Dict: OpenAI 格式的响应字典
489
+ """
490
+ # 如果用户只提供了 content,构造完整的 OpenAI 格式
491
+ if response.content and not response.choices:
492
+ return {
493
+ "id": response.id or f"chatcmpl-{int(time.time() * 1000)}",
494
+ "object": response.object or "chat.completion",
495
+ "created": response.created or int(time.time()),
496
+ "model": response.model or request.model or "agentrun-model",
497
+ "choices": [{
498
+ "index": 0,
499
+ "message": {
500
+ "role": "assistant",
501
+ "content": response.content,
502
+ },
503
+ "finish_reason": "stop",
504
+ }],
505
+ "usage": (
506
+ json.loads(response.usage.model_dump_json())
507
+ if response.usage
508
+ else None
509
+ ),
510
+ }
511
+
512
+ # 用户提供了完整字段,使用 JSON 序列化避免对象嵌套问题
513
+ json_str = response.model_dump_json(exclude_none=True)
514
+ result = json.loads(json_str)
515
+
516
+ # 确保必需字段存在
517
+ if "id" not in result:
518
+ result["id"] = f"chatcmpl-{int(time.time() * 1000)}"
519
+ if "object" not in result:
520
+ result["object"] = "chat.completion"
521
+ if "created" not in result:
522
+ result["created"] = int(time.time())
523
+ if "model" not in result:
524
+ result["model"] = request.model or "agentrun-model"
525
+
526
+ # 移除 content 和 extra (OpenAI 格式中不需要)
527
+ result.pop("content", None)
528
+ result.pop("extra", None)
529
+
530
+ return result
531
+
532
+ def _is_custom_stream_wrapper(self, obj: Any) -> bool:
533
+ """检查是否是 Model Service 的 CustomStreamWrapper"""
534
+ # CustomStreamWrapper 的特征
535
+ return (
536
+ hasattr(obj, "__aiter__")
537
+ and type(obj).__name__ == "CustomStreamWrapper"
538
+ )
539
+
540
+ async def _format_model_stream(
541
+ self, stream_wrapper: Any, request: AgentRequest
542
+ ) -> AsyncIterator[str]:
543
+ """格式化 Model Service 的流式响应
544
+
545
+ CustomStreamWrapper 返回的 chunk 已经是完整的 OpenAI 格式对象。
546
+ """
547
+ async for chunk in stream_wrapper:
548
+ # chunk 是 litellm 的 ModelResponse 或字典
549
+ if isinstance(chunk, dict):
550
+ # 已经是字典,直接格式化为 SSE
551
+ yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
552
+ elif hasattr(chunk, "model_dump"):
553
+ # Pydantic 对象
554
+ chunk_dict = chunk.model_dump(exclude_none=True)
555
+ yield f"data: {json.dumps(chunk_dict, ensure_ascii=False)}\n\n"
556
+ elif hasattr(chunk, "dict"):
557
+ # 旧版 Pydantic
558
+ chunk_dict = chunk.dict(exclude_none=True)
559
+ yield f"data: {json.dumps(chunk_dict, ensure_ascii=False)}\n\n"
560
+ else:
561
+ # 手动转换对象为字典
562
+ chunk_dict = {
563
+ "id": getattr(
564
+ chunk, "id", f"chatcmpl-{int(time.time() * 1000)}"
565
+ ),
566
+ "object": getattr(chunk, "object", "chat.completion.chunk"),
567
+ "created": getattr(chunk, "created", int(time.time())),
568
+ "model": getattr(
569
+ chunk, "model", request.model or "agentrun-model"
570
+ ),
571
+ "choices": [],
572
+ }
573
+
574
+ if hasattr(chunk, "choices"):
575
+ for choice in chunk.choices:
576
+ choice_dict = {
577
+ "index": getattr(choice, "index", 0),
578
+ "finish_reason": getattr(
579
+ choice, "finish_reason", None
580
+ ),
581
+ }
582
+
583
+ if hasattr(choice, "delta"):
584
+ delta = choice.delta
585
+ delta_dict = {}
586
+ if hasattr(delta, "role") and delta.role:
587
+ delta_dict["role"] = delta.role
588
+ if hasattr(delta, "content") and delta.content:
589
+ delta_dict["content"] = delta.content
590
+ if (
591
+ hasattr(delta, "tool_calls")
592
+ and delta.tool_calls
593
+ ):
594
+ delta_dict["tool_calls"] = delta.tool_calls
595
+ choice_dict["delta"] = delta_dict
596
+
597
+ chunk_dict["choices"].append(choice_dict)
598
+
599
+ yield f"data: {json.dumps(chunk_dict, ensure_ascii=False)}\n\n"
600
+
601
+ # 发送结束标记
602
+ yield "data: [DONE]\n\n"
603
+
604
+ async def _format_stream_response(
605
+ self, result: AgentResult, request: AgentRequest
606
+ ) -> AsyncIterator[str]:
607
+ """格式化流式响应
608
+
609
+ Args:
610
+ result: 流式迭代器,支持:
611
+ - Iterator[str]/AsyncIterator[str]: 流式字符串
612
+ - Iterator[AgentStreamResponse]: 流式响应对象
613
+ - CustomStreamWrapper: Model Service 流式响应
614
+ request: 原始请求
615
+
616
+ Yields:
617
+ SSE 格式的数据行
618
+ """
619
+ # 检查是否是 CustomStreamWrapper (Model Service 流式响应)
620
+ if self._is_custom_stream_wrapper(result):
621
+ async for chunk in self._format_model_stream(result, request):
622
+ yield chunk
623
+ return
624
+
625
+ response_id = f"chatcmpl-{int(time.time() * 1000)}"
626
+ created = int(time.time())
627
+ model = request.model or "agentrun-model"
628
+
629
+ # 检查是否是异步迭代器
630
+ if hasattr(result, "__aiter__"):
631
+ first_chunk = True
632
+ async for chunk in result: # type: ignore
633
+ # 如果是字符串,包装成 AgentStreamResponse
634
+ if isinstance(chunk, str):
635
+ if first_chunk:
636
+ # 第一个 chunk: 发送 role
637
+ yield self._format_sse_chunk(
638
+ AgentStreamResponse(
639
+ id=response_id,
640
+ created=created,
641
+ model=model,
642
+ choices=[
643
+ AgentStreamResponseChoice(
644
+ index=0,
645
+ delta=AgentStreamResponseDelta(
646
+ role=MessageRole.ASSISTANT,
647
+ ),
648
+ finish_reason=None,
649
+ )
650
+ ],
651
+ )
652
+ )
653
+ first_chunk = False
654
+
655
+ # 发送内容 chunk
656
+ if chunk: # 跳过空字符串
657
+ yield self._format_sse_chunk(
658
+ AgentStreamResponse(
659
+ id=response_id,
660
+ created=created,
661
+ model=model,
662
+ choices=[
663
+ AgentStreamResponseChoice(
664
+ index=0,
665
+ delta=AgentStreamResponseDelta(
666
+ content=chunk
667
+ ),
668
+ finish_reason=None,
669
+ )
670
+ ],
671
+ )
672
+ )
673
+
674
+ # 如果是 AgentStreamResponse,直接序列化
675
+ elif isinstance(chunk, AgentStreamResponse):
676
+ yield self._format_sse_chunk(chunk)
677
+
678
+ # 发送结束 chunk
679
+ yield self._format_sse_chunk(
680
+ AgentStreamResponse(
681
+ id=response_id,
682
+ created=created,
683
+ model=model,
684
+ choices=[
685
+ AgentStreamResponseChoice(
686
+ index=0,
687
+ delta=AgentStreamResponseDelta(),
688
+ finish_reason="stop",
689
+ )
690
+ ],
691
+ )
692
+ )
693
+ # 发送结束标记
694
+ yield "data: [DONE]\n\n"
695
+
696
+ # 同步迭代器
697
+ elif hasattr(result, "__iter__"):
698
+ first_chunk = True
699
+ for chunk in result: # type: ignore
700
+ # 如果是字符串,包装成 AgentStreamResponse
701
+ if isinstance(chunk, str):
702
+ if first_chunk:
703
+ yield self._format_sse_chunk(
704
+ AgentStreamResponse(
705
+ id=response_id,
706
+ created=created,
707
+ model=model,
708
+ choices=[
709
+ AgentStreamResponseChoice(
710
+ index=0,
711
+ delta=AgentStreamResponseDelta(
712
+ role=MessageRole.ASSISTANT,
713
+ ),
714
+ finish_reason=None,
715
+ )
716
+ ],
717
+ )
718
+ )
719
+ first_chunk = False
720
+
721
+ if chunk:
722
+ yield self._format_sse_chunk(
723
+ AgentStreamResponse(
724
+ id=response_id,
725
+ created=created,
726
+ model=model,
727
+ choices=[
728
+ AgentStreamResponseChoice(
729
+ index=0,
730
+ delta=AgentStreamResponseDelta(
731
+ content=chunk
732
+ ),
733
+ finish_reason=None,
734
+ )
735
+ ],
736
+ )
737
+ )
738
+
739
+ elif isinstance(chunk, AgentStreamResponse):
740
+ yield self._format_sse_chunk(chunk)
741
+
742
+ # 发送结束 chunk
743
+ yield self._format_sse_chunk(
744
+ AgentStreamResponse(
745
+ id=response_id,
746
+ created=created,
747
+ model=model,
748
+ choices=[
749
+ AgentStreamResponseChoice(
750
+ index=0,
751
+ delta=AgentStreamResponseDelta(),
752
+ finish_reason="stop",
753
+ )
754
+ ],
755
+ )
756
+ )
757
+ yield "data: [DONE]\n\n"
758
+
759
+ else:
760
+ raise TypeError(
761
+ "Expected Iterator or AsyncIterator for stream response, "
762
+ f"got {type(result)}"
763
+ )
764
+
765
+ def _format_sse_chunk(self, chunk: AgentStreamResponse) -> str:
766
+ """格式化单个 SSE chunk
767
+
768
+ Args:
769
+ chunk: AgentStreamResponse 对象
770
+
771
+ Returns:
772
+ SSE 格式的字符串
773
+ """
774
+ # 使用 Pydantic 的 JSON 序列化,自动处理所有嵌套对象
775
+ json_str = chunk.model_dump_json(exclude_none=True)
776
+ json_data = json.loads(json_str)
777
+
778
+ # 如果用户只提供了 content,转换为 OpenAI 格式
779
+ if "content" in json_data and "choices" not in json_data:
780
+ json_data = {
781
+ "id": json_data.get(
782
+ "id", f"chatcmpl-{int(time.time() * 1000)}"
783
+ ),
784
+ "object": json_data.get("object", "chat.completion.chunk"),
785
+ "created": json_data.get("created", int(time.time())),
786
+ "model": json_data.get("model", "agentrun-model"),
787
+ "choices": [{
788
+ "index": 0,
789
+ "delta": {"content": json_data["content"]},
790
+ "finish_reason": None,
791
+ }],
792
+ }
793
+ else:
794
+ # 移除不属于 OpenAI 格式的字段
795
+ json_data.pop("content", None)
796
+ json_data.pop("extra", None)
797
+
798
+ return f"data: {json.dumps(json_data, ensure_ascii=False)}\n\n"