@agentunion/kite 1.4.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (235) hide show
  1. package/CHANGELOG.md +102 -0
  2. package/cli.js +44 -5
  3. package/core/dependency_checker.py +250 -0
  4. package/core/env_checker.py +490 -0
  5. package/dependencies_lock.json +128 -0
  6. package/extensions/agents/assistant/server.py +33 -17
  7. package/extensions/channels/acp_channel/server.py +33 -17
  8. package/extensions/services/backup/entry.py +23 -16
  9. package/extensions/services/evol/auth_manager.py +443 -0
  10. package/extensions/services/evol/config.yaml +149 -0
  11. package/extensions/services/evol/config_loader.py +117 -0
  12. package/extensions/services/evol/entry.py +406 -0
  13. package/extensions/services/evol/evol_api.py +173 -0
  14. package/extensions/services/evol/evol_config.json5 +29 -0
  15. package/extensions/services/evol/migrate_tokens.py +122 -0
  16. package/extensions/services/evol/module.md +32 -0
  17. package/extensions/services/evol/pairing.py +250 -0
  18. package/extensions/services/evol/pairing_codes.jsonl +1 -0
  19. package/extensions/services/evol/relay.py +682 -0
  20. package/extensions/services/evol/relay_config.json5 +67 -0
  21. package/extensions/services/evol/routes/__init__.py +1 -0
  22. package/extensions/services/evol/routes/routes_management_ws.py +127 -0
  23. package/extensions/services/evol/routes/routes_rpc.py +89 -0
  24. package/extensions/services/evol/routes/routes_test.py +61 -0
  25. package/extensions/services/evol/server.py +875 -0
  26. package/extensions/services/evol/static/css/style.css +1200 -0
  27. package/extensions/services/evol/static/index.html +781 -0
  28. package/extensions/services/evol/static/index_evol.html +14 -0
  29. package/extensions/services/evol/static/js/app.js +6304 -0
  30. package/extensions/services/evol/static/js/auth.js +326 -0
  31. package/extensions/services/evol/static/js/dialog.js +285 -0
  32. package/extensions/services/evol/static/js/evol-app-fixed.js +50 -0
  33. package/extensions/services/evol/static/js/evol-app.js +1949 -0
  34. package/extensions/services/evol/static/js/evol-app.js.bak +1800 -0
  35. package/extensions/services/evol/static/js/kernel-client-example.js +228 -0
  36. package/extensions/services/evol/static/js/kernel-client.js +396 -0
  37. package/extensions/services/evol/static/js/main.js +141 -0
  38. package/extensions/services/evol/static/js/registry-tests.js +585 -0
  39. package/extensions/services/evol/static/js/stats.js +217 -0
  40. package/extensions/services/evol/static/js/token-manager.js +175 -0
  41. package/extensions/services/evol/static/pairing.html +248 -0
  42. package/extensions/services/evol/static/test_registry.html +262 -0
  43. package/extensions/services/evol/static/test_relay.html +462 -0
  44. package/extensions/services/evol/stats_manager.py +240 -0
  45. package/extensions/services/model_service/entry.py +23 -1
  46. package/extensions/services/proxy/.claude/settings.local.json +13 -0
  47. package/extensions/services/proxy/CHANGELOG_20260308.md +258 -0
  48. package/extensions/services/proxy/_fix_prints.py +133 -0
  49. package/extensions/services/proxy/_fix_prints2.py +87 -0
  50. package/extensions/services/proxy/agentcp/LICENCE +178 -0
  51. package/extensions/services/proxy/agentcp/README copy.md +85 -0
  52. package/extensions/services/proxy/agentcp/README.md +260 -0
  53. package/extensions/services/proxy/agentcp/__init__.py +16 -0
  54. package/extensions/services/proxy/agentcp/agent.py +4 -0
  55. package/extensions/services/proxy/agentcp/agentcp.py +2494 -0
  56. package/extensions/services/proxy/agentcp/agentprofile.json +89 -0
  57. package/extensions/services/proxy/agentcp/ap/__init__.py +16 -0
  58. package/extensions/services/proxy/agentcp/ap/ap_client.py +316 -0
  59. package/extensions/services/proxy/agentcp/assets/images/wechat_qr.png +0 -0
  60. package/extensions/services/proxy/agentcp/backup/metrics.json +31 -0
  61. package/extensions/services/proxy/agentcp/base/__init__.py +20 -0
  62. package/extensions/services/proxy/agentcp/base/auth_client.py +257 -0
  63. package/extensions/services/proxy/agentcp/base/client.py +112 -0
  64. package/extensions/services/proxy/agentcp/base/env.py +34 -0
  65. package/extensions/services/proxy/agentcp/base/html_util.py +336 -0
  66. package/extensions/services/proxy/agentcp/base/log.py +98 -0
  67. package/extensions/services/proxy/agentcp/ca/__init__.py +17 -0
  68. package/extensions/services/proxy/agentcp/ca/ca_client.py +414 -0
  69. package/extensions/services/proxy/agentcp/ca/ca_root.py +74 -0
  70. package/extensions/services/proxy/agentcp/context/__init__.py +20 -0
  71. package/extensions/services/proxy/agentcp/context/context.py +73 -0
  72. package/extensions/services/proxy/agentcp/context/exceptions.py +114 -0
  73. package/extensions/services/proxy/agentcp/create_profile.py +125 -0
  74. package/extensions/services/proxy/agentcp/create_profile_weather.py +125 -0
  75. package/extensions/services/proxy/agentcp/db/__init__.py +15 -0
  76. package/extensions/services/proxy/agentcp/db/db_mananger.py +550 -0
  77. package/extensions/services/proxy/agentcp/docs/UDP_HEARTBEAT_FIX_REPORT.md +265 -0
  78. package/extensions/services/proxy/agentcp/docs/heartbeat_issue_analysis.md +291 -0
  79. package/extensions/services/proxy/agentcp/file/__init__.py +16 -0
  80. package/extensions/services/proxy/agentcp/file/file_client.py +141 -0
  81. package/extensions/services/proxy/agentcp/file/wss_binary_message.py +137 -0
  82. package/extensions/services/proxy/agentcp/hcp.py +299 -0
  83. package/extensions/services/proxy/agentcp/heartbeat/__init__.py +16 -0
  84. package/extensions/services/proxy/agentcp/heartbeat/heartbeat_client.py +360 -0
  85. package/extensions/services/proxy/agentcp/improved_scheduler.py +498 -0
  86. package/extensions/services/proxy/agentcp/llm_agent_utils.py +249 -0
  87. package/extensions/services/proxy/agentcp/llm_server.py +172 -0
  88. package/extensions/services/proxy/agentcp/mermaid.py +210 -0
  89. package/extensions/services/proxy/agentcp/message.py +149 -0
  90. package/extensions/services/proxy/agentcp/metrics.py +256 -0
  91. package/extensions/services/proxy/agentcp/monitoring/__init__.py +20 -0
  92. package/extensions/services/proxy/agentcp/monitoring/global_monitor.py +27 -0
  93. package/extensions/services/proxy/agentcp/monitoring/metrics_store.py +325 -0
  94. package/extensions/services/proxy/agentcp/monitoring/monitoring_service.py +269 -0
  95. package/extensions/services/proxy/agentcp/monitoring/sliding_window.py +222 -0
  96. package/extensions/services/proxy/agentcp/monitoring/standalone_reader.py +224 -0
  97. package/extensions/services/proxy/agentcp/msg/__init__.py +21 -0
  98. package/extensions/services/proxy/agentcp/msg/connection_manager.py +456 -0
  99. package/extensions/services/proxy/agentcp/msg/message_client.py +2058 -0
  100. package/extensions/services/proxy/agentcp/msg/message_serialize.py +263 -0
  101. package/extensions/services/proxy/agentcp/msg/open_ai_message.py +88 -0
  102. package/extensions/services/proxy/agentcp/msg/session_manager.py +1062 -0
  103. package/extensions/services/proxy/agentcp/msg/stream_client.py +267 -0
  104. package/extensions/services/proxy/agentcp/msg/websocket_file_receiver.py +89 -0
  105. package/extensions/services/proxy/agentcp/msg/ws_logger.py +685 -0
  106. package/extensions/services/proxy/agentcp/msg/wss_binary_message.py +137 -0
  107. package/extensions/services/proxy/agentcp/requirements.txt +7 -0
  108. package/extensions/services/proxy/agentcp/samples/agent_graph/README.md +37 -0
  109. package/extensions/services/proxy/agentcp/samples/agent_graph/agentprofile.json +89 -0
  110. package/extensions/services/proxy/agentcp/samples/agent_graph/create_profile.py +138 -0
  111. package/extensions/services/proxy/agentcp/samples/agent_graph/main.py +164 -0
  112. package/extensions/services/proxy/agentcp/samples/agent_use/create_profile.py +123 -0
  113. package/extensions/services/proxy/agentcp/samples/agent_use/llm/create_profile.py +129 -0
  114. package/extensions/services/proxy/agentcp/samples/agent_use/llm/env.json +5 -0
  115. package/extensions/services/proxy/agentcp/samples/agent_use/llm/main.py +146 -0
  116. package/extensions/services/proxy/agentcp/samples/agent_use/main.py +123 -0
  117. package/extensions/services/proxy/agentcp/samples/agent_use/readme.md +379 -0
  118. package/extensions/services/proxy/agentcp/samples/agent_use/search/create_profile.py +129 -0
  119. package/extensions/services/proxy/agentcp/samples/agent_use/search/main.py +28 -0
  120. package/extensions/services/proxy/agentcp/samples/agent_use/tool/create_profile.py +129 -0
  121. package/extensions/services/proxy/agentcp/samples/agent_use/tool/main.py +20 -0
  122. package/extensions/services/proxy/agentcp/samples/ali_amap/README.md +97 -0
  123. package/extensions/services/proxy/agentcp/samples/ali_amap/amap_agent.py +88 -0
  124. package/extensions/services/proxy/agentcp/samples/ali_amap/create_profile.py +125 -0
  125. package/extensions/services/proxy/agentcp/samples/compute_agent/agent/powershell.py +228 -0
  126. package/extensions/services/proxy/agentcp/samples/compute_agent/agent/software.py +63 -0
  127. package/extensions/services/proxy/agentcp/samples/compute_agent/agent/tools.py +36 -0
  128. package/extensions/services/proxy/agentcp/samples/compute_agent/browser_user.py +41 -0
  129. package/extensions/services/proxy/agentcp/samples/deepseek/README.md +79 -0
  130. package/extensions/services/proxy/agentcp/samples/deepseek/create_profile.py +126 -0
  131. package/extensions/services/proxy/agentcp/samples/deepseek/deepseek.py +42 -0
  132. package/extensions/services/proxy/agentcp/samples/dify_chat/README.md +78 -0
  133. package/extensions/services/proxy/agentcp/samples/dify_chat/create_profile.py +126 -0
  134. package/extensions/services/proxy/agentcp/samples/dify_chat/dify_chat.py +47 -0
  135. package/extensions/services/proxy/agentcp/samples/dify_workflow/README.md +78 -0
  136. package/extensions/services/proxy/agentcp/samples/dify_workflow/create_profile.py +126 -0
  137. package/extensions/services/proxy/agentcp/samples/dify_workflow/dify_workflow.py +46 -0
  138. package/extensions/services/proxy/agentcp/samples/executor/README.md +44 -0
  139. package/extensions/services/proxy/agentcp/samples/executor/agentprofile.json +89 -0
  140. package/extensions/services/proxy/agentcp/samples/executor/create_profile.py +139 -0
  141. package/extensions/services/proxy/agentcp/samples/executor/main.py +160 -0
  142. package/extensions/services/proxy/agentcp/samples/filereader/README.md +45 -0
  143. package/extensions/services/proxy/agentcp/samples/filereader/agentprofile.json +90 -0
  144. package/extensions/services/proxy/agentcp/samples/filereader/create_profile.py +137 -0
  145. package/extensions/services/proxy/agentcp/samples/filereader/main.py +253 -0
  146. package/extensions/services/proxy/agentcp/samples/filewriter/README.md +38 -0
  147. package/extensions/services/proxy/agentcp/samples/filewriter/agentprofile.json +91 -0
  148. package/extensions/services/proxy/agentcp/samples/filewriter/create_profile.py +138 -0
  149. package/extensions/services/proxy/agentcp/samples/filewriter/main.py +289 -0
  150. package/extensions/services/proxy/agentcp/samples/hcp/README.md +85 -0
  151. package/extensions/services/proxy/agentcp/samples/hcp/acp_weather_agent.zip +0 -0
  152. package/extensions/services/proxy/agentcp/samples/hcp/create_profile.py +125 -0
  153. package/extensions/services/proxy/agentcp/samples/hcp/hcp.py +237 -0
  154. package/extensions/services/proxy/agentcp/samples/helloworld/README.md +68 -0
  155. package/extensions/services/proxy/agentcp/samples/helloworld/hello_world.py +40 -0
  156. package/extensions/services/proxy/agentcp/samples/llm_agent/MEADME.md +117 -0
  157. package/extensions/services/proxy/agentcp/samples/llm_agent/create_profile.py +125 -0
  158. package/extensions/services/proxy/agentcp/samples/llm_agent/qwen_agent.py +136 -0
  159. package/extensions/services/proxy/agentcp/samples/local_llm_agent/README.md +90 -0
  160. package/extensions/services/proxy/agentcp/samples/local_llm_agent/create_profile.py +125 -0
  161. package/extensions/services/proxy/agentcp/samples/local_llm_agent/main.py +49 -0
  162. package/extensions/services/proxy/agentcp/samples/query_llm_from_agent/README.md +55 -0
  163. package/extensions/services/proxy/agentcp/samples/query_llm_from_agent/create_profile.py +125 -0
  164. package/extensions/services/proxy/agentcp/samples/query_llm_from_agent/main.py +23 -0
  165. package/extensions/services/proxy/agentcp/samples/query_weather_api_agent/README.md +103 -0
  166. package/extensions/services/proxy/agentcp/samples/query_weather_api_agent/create_profile.py +125 -0
  167. package/extensions/services/proxy/agentcp/samples/query_weather_api_agent/main.py +69 -0
  168. package/extensions/services/proxy/agentcp/samples/query_weather_from_agent/README.md +58 -0
  169. package/extensions/services/proxy/agentcp/samples/query_weather_from_agent/create_profile.py +125 -0
  170. package/extensions/services/proxy/agentcp/samples/query_weather_from_agent/main.py +25 -0
  171. package/extensions/services/proxy/agentcp/samples/qwen3/README.md +71 -0
  172. package/extensions/services/proxy/agentcp/samples/qwen3/create_profile.py +126 -0
  173. package/extensions/services/proxy/agentcp/samples/qwen3/qwen3.py +37 -0
  174. package/extensions/services/proxy/agentcp/samples/qwen3_tools/README.md +133 -0
  175. package/extensions/services/proxy/agentcp/samples/qwen3_tools/create_profile.py +126 -0
  176. package/extensions/services/proxy/agentcp/samples/qwen3_tools/qwen3_tools.py +98 -0
  177. package/extensions/services/proxy/agentcp/samples/search/create_profile_qwen.py +125 -0
  178. package/extensions/services/proxy/agentcp/samples/search/create_profile_search.py +125 -0
  179. package/extensions/services/proxy/agentcp/samples/search/qwen_agent.py +136 -0
  180. package/extensions/services/proxy/agentcp/samples/search/search_agent.py +170 -0
  181. package/extensions/services/proxy/agentcp/samples/wrapper_agently_to_agent/README.md +89 -0
  182. package/extensions/services/proxy/agentcp/samples/wrapper_agently_to_agent/create_profile.py +125 -0
  183. package/extensions/services/proxy/agentcp/samples/wrapper_agently_to_agent/main.py +44 -0
  184. package/extensions/services/proxy/agentcp/utils/__init__.py +15 -0
  185. package/extensions/services/proxy/agentcp/utils/file_util.py +117 -0
  186. package/extensions/services/proxy/agentcp/utils/proxy_bypass.py +99 -0
  187. package/extensions/services/proxy/agentcp/workflow.py +203 -0
  188. package/extensions/services/proxy/console_auth.py +109 -0
  189. package/extensions/services/proxy/evol/__init__.py +1 -0
  190. package/extensions/services/proxy/evol/config.py +37 -0
  191. package/extensions/services/proxy/evol/http/__init__.py +1 -0
  192. package/extensions/services/proxy/evol/http/async_http.py +551 -0
  193. package/extensions/services/proxy/evol/log.py +28 -0
  194. package/extensions/services/proxy/evol/presenter/__init__.py +2 -0
  195. package/extensions/services/proxy/evol/presenter/agentIdPresenter.py +1031 -0
  196. package/extensions/services/proxy/evol/presenter/apikeyPresenter.py +106 -0
  197. package/extensions/services/proxy/evol/presenter/configPresenter.py +1281 -0
  198. package/extensions/services/proxy/evol/presenter/userPresenter.py +477 -0
  199. package/extensions/services/proxy/evol/server/__init__.py +1 -0
  200. package/extensions/services/proxy/evol/server/claude_proxy_async.py +3430 -0
  201. package/extensions/services/proxy/evol/server/openclaw_proxy.py +1861 -0
  202. package/extensions/services/proxy/evol/server/proxy_config.py +15 -0
  203. package/extensions/services/proxy/evol/server/proxy_engine.py +501 -0
  204. package/extensions/services/proxy/evol/version.py +24 -0
  205. package/extensions/services/proxy/logs/websocket.log +260 -0
  206. package/extensions/services/proxy/main.py +240 -0
  207. package/extensions/services/proxy/requirements.txt +13 -0
  208. package/extensions/services/proxy/server.py +271 -0
  209. package/extensions/services/watchdog/entry.py +42 -16
  210. package/extensions/services/watchdog/module.md +1 -0
  211. package/extensions/services/watchdog/monitor.py +34 -4
  212. package/extensions/services/web/module.md +1 -1
  213. package/extensions/services/web/server.py +30 -18
  214. package/extensions/services/web/static/js/token-manager.js +10 -10
  215. package/kernel/entry.py +1 -1
  216. package/kernel/module.md +25 -1
  217. package/kernel/registry_store.py +2 -26
  218. package/kernel/rpc_router.py +36 -10
  219. package/kernel/server.py +106 -17
  220. package/kite_cli/commands/deps_install.py +67 -0
  221. package/kite_cli/commands/env_check.py +45 -0
  222. package/kite_cli/commands/prepare.py +49 -0
  223. package/kite_cli/commands/venv_setup.py +56 -0
  224. package/kite_cli/main.py +29 -1
  225. package/launcher/entry.py +306 -21
  226. package/launcher/module.md +9 -0
  227. package/launcher/module_scanner.py +11 -1
  228. package/main.py +4 -1
  229. package/package.json +8 -1
  230. package/python_version.json +4 -0
  231. package/requirements.txt +38 -0
  232. package/scripts/env-manager.js +328 -0
  233. package/scripts/python-env.js +79 -0
  234. package/scripts/scan_dependencies.py +461 -0
  235. package/scripts/setup-python-env.js +191 -0
@@ -0,0 +1,1861 @@
1
+ """
2
+ OpenClaw Proxy - OpenAI-compatible API endpoint that forwards to Claude via AgentCP
3
+ """
4
+ import json
5
+ import time
6
+ import asyncio
7
+ import logging
8
+ import httpx
9
+ from typing import Any, AsyncGenerator, Dict, List, Tuple
10
+ from urllib.parse import unquote
11
+ from starlette.responses import JSONResponse, StreamingResponse, Response
12
+ from starlette.requests import Request
13
+
14
+ from ..presenter import configPresenter
15
+ from . import claude_proxy_async
16
+ from agentcp.base.log import log_info, log_error
17
+ from agentcp.agentcp import AgentID
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ # ============================================================================
22
+ # OpenClaw 独立的 Session Manager 和 AgentID
23
+ # 完全独立于 claude_proxy,不共享任何资源
24
+ # ============================================================================
25
+
26
+ # OpenClaw 专用的 AgentID 实例
27
+ openclaw_agentId: AgentID = None
28
+
29
+ # OpenClaw 专用的 AsyncSessionManager 实例
30
+ openclaw_session_manager = None
31
+
32
+
33
+ def set_openclaw_agentid(aid: AgentID):
34
+ """Compatibility hook: OpenClaw now uses shared Claude session manager."""
35
+ global openclaw_agentId, openclaw_session_manager
36
+ openclaw_agentId = aid
37
+ # Shared-session mode: keep isolated manager disabled.
38
+ openclaw_session_manager = None
39
+ log_info(f"[OpenClaw] Shared-session mode initialized, AgentID: {aid.id if aid else 'None'}")
40
+
41
+
42
+ class OpenClawSessionManager:
43
+ """OpenClaw 专用的 Session 管理器 - 完全独立于 claude_proxy"""
44
+
45
+ def __init__(self):
46
+ import uuid
47
+ self._instance_id = str(uuid.uuid4())[:8]
48
+ logger.debug(f"[OpenClawSessionManager] 初始化实例 ID: {self._instance_id}")
49
+
50
+ self._sessions = {} # api_key -> session_id
51
+ self._session_info = {} # session_id -> {'created_time', 'last_used', 'api_key'}
52
+ self._lock = asyncio.Lock()
53
+ self._session_timeout = 1800 # 30分钟
54
+ self._last_cleanup = time.time()
55
+
56
+ # trace_id 匹配机制
57
+ self._pending_requests = {} # trace_id -> asyncio.Event
58
+ self._request_result_map = {} # trace_id -> {"result_type": str, "result": dict}
59
+ self._handler_registered = {} # session_id -> bool
60
+ self._request_timestamps = {} # trace_id -> timestamp
61
+
62
+ async def get_session(self, api_key: str, agent_id: AgentID) -> str:
63
+ """获取或创建 session"""
64
+ async with self._lock:
65
+ current_time = time.time()
66
+
67
+ # 检查现有 session
68
+ if api_key in self._sessions:
69
+ session_id = self._sessions[api_key]
70
+ if session_id in self._session_info:
71
+ session_data = self._session_info[session_id]
72
+ if current_time - session_data['last_used'] < self._session_timeout:
73
+ session_data['last_used'] = current_time
74
+ if session_id not in self._handler_registered:
75
+ await self._setup_handler(session_id, agent_id)
76
+ log_info(f"[OpenClaw] 复用 session: {session_id}")
77
+ return session_id
78
+ else:
79
+ await self._remove_session(api_key, session_id, agent_id)
80
+
81
+ # 创建新 session
82
+ session_id = await asyncio.to_thread(
83
+ agent_id.create_session, "openclaw_proxy", "openclaw_proxy"
84
+ )
85
+
86
+ if session_id:
87
+ # 邀请 openclaw.aid.pub
88
+ await asyncio.to_thread(
89
+ agent_id.invite_member, session_id, "openclaw.aid.pub"
90
+ )
91
+
92
+ self._sessions[api_key] = session_id
93
+ self._session_info[session_id] = {
94
+ 'created_time': current_time,
95
+ 'last_used': current_time,
96
+ 'api_key': api_key
97
+ }
98
+
99
+ await self._setup_handler(session_id, agent_id)
100
+ log_info(f"[OpenClaw] 创建新 session: {session_id}")
101
+ return session_id
102
+ else:
103
+ log_error(f"[OpenClaw] 创建 session 失败")
104
+ return None
105
+
106
+ async def _setup_handler(self, session_id: str, agent_id: AgentID):
107
+ """设置消息处理器"""
108
+ # ✅ 捕获主事件循环引用,用于跨线程安全通知
109
+ main_loop = asyncio.get_running_loop()
110
+
111
+ def _threadsafe_set_event(event):
112
+ """线程安全地设置 asyncio.Event"""
113
+ try:
114
+ main_loop.call_soon_threadsafe(event.set)
115
+ except RuntimeError:
116
+ event.set()
117
+
118
+ async def reply_handler(reply_msg):
119
+ try:
120
+ if reply_msg is None:
121
+ return
122
+
123
+ messages = agent_id.get_content_array_from_message(reply_msg)
124
+ if not messages:
125
+ return
126
+
127
+ response_msg = messages[0]
128
+ if not isinstance(response_msg, dict):
129
+ return
130
+
131
+ trace_id = response_msg.get("trace_id", "")
132
+ msg_type = response_msg.get("type", "")
133
+
134
+ # 无 trace_id 的错误消息广播给所有等待请求
135
+ if not trace_id and msg_type == "error":
136
+ log_error(f"[OpenClaw] 收到无 trace_id 的错误消息")
137
+ error_result = {"result_type": "error", "result": response_msg}
138
+ for pending_trace_id in list(self._pending_requests.keys()):
139
+ self._request_result_map[pending_trace_id] = error_result
140
+ event = self._pending_requests.get(pending_trace_id)
141
+ if event:
142
+ _threadsafe_set_event(event)
143
+ return
144
+
145
+ if not trace_id:
146
+ return
147
+
148
+ # 存储结果并通知
149
+ result = {"result_type": msg_type, "result": response_msg}
150
+ self._request_result_map[trace_id] = result
151
+
152
+ event = self._pending_requests.get(trace_id)
153
+ if event:
154
+ _threadsafe_set_event(event)
155
+ except Exception as e:
156
+ log_error(f"[OpenClaw Handler] 异常: {e}")
157
+
158
+ try:
159
+ await asyncio.to_thread(
160
+ agent_id.add_message_handler, reply_handler, session_id=session_id
161
+ )
162
+ self._handler_registered[session_id] = True
163
+ except Exception as e:
164
+ log_error(f"[OpenClaw] Handler 注册失败: {e}")
165
+
166
+ async def _remove_session(self, api_key: str, session_id: str, agent_id: AgentID):
167
+ """移除 session"""
168
+ try:
169
+ if agent_id:
170
+ await asyncio.to_thread(agent_id.close_session, session_id)
171
+ if api_key in self._sessions:
172
+ del self._sessions[api_key]
173
+ if session_id in self._session_info:
174
+ del self._session_info[session_id]
175
+ if session_id in self._handler_registered:
176
+ del self._handler_registered[session_id]
177
+ except Exception as e:
178
+ log_error(f"[OpenClaw] 移除 session 失败: {e}")
179
+
180
+ def register_request_timestamp(self, trace_id: str):
181
+ """注册请求时间戳"""
182
+ self._request_timestamps[trace_id] = time.time()
183
+
184
+
185
+ # Supported models list
186
+ SUPPORTED_MODELS = [
187
+ {
188
+ "id": "claude-opus-4-6",
189
+ "object": "model",
190
+ "created": 1704067200,
191
+ "owned_by": "anthropic"
192
+ },
193
+ {
194
+ "id": "claude-sonnet-4-6",
195
+ "object": "model",
196
+ "created": 1704067200,
197
+ "owned_by": "anthropic"
198
+ },
199
+ {
200
+ "id": "claude-haiku-4-5-20251001",
201
+ "object": "model",
202
+ "created": 1704067200,
203
+ "owned_by": "anthropic"
204
+ },
205
+ {
206
+ "id": "claude-opus-4-5-20251101",
207
+ "object": "model",
208
+ "created": 1704067200,
209
+ "owned_by": "anthropic"
210
+ },
211
+ {
212
+ "id": "claude-sonnet-4-5-20250929",
213
+ "object": "model",
214
+ "created": 1704067200,
215
+ "owned_by": "anthropic"
216
+ },
217
+ {
218
+ "id": "gpt-5.4",
219
+ "object": "model",
220
+ "created": 1704067200,
221
+ "owned_by": "openai"
222
+ },
223
+ {
224
+ "id": "gpt-5.3-codex",
225
+ "object": "model",
226
+ "created": 1704067200,
227
+ "owned_by": "openai"
228
+ },
229
+ {
230
+ "id": "gpt-5.2",
231
+ "object": "model",
232
+ "created": 1704067200,
233
+ "owned_by": "openai"
234
+ }
235
+ ]
236
+
237
+ # Model name mapping: OpenAI-style names to Claude API names
238
+ # Note: These simplified names are kept as-is since openclaw.aid.pub handles the mapping
239
+ MODEL_NAME_MAPPING = {
240
+ "claude-sonnet-4-6": "claude-sonnet-4-6",
241
+ "claude-opus-4-6": "claude-opus-4-6",
242
+ "claude-haiku-4-5-20251001": "claude-haiku-4-5-20251001",
243
+ "claude-opus-4-5-20251101": "claude-opus-4-5-20251101",
244
+ }
245
+
246
+
247
+ def normalize_model_name(model: str) -> str:
248
+ """
249
+ Normalize model name for Claude API
250
+ The openclaw.aid.pub service handles the actual model name mapping
251
+ """
252
+ return MODEL_NAME_MAPPING.get(model, model)
253
+
254
+
255
+ def is_model_supported(model: str) -> bool:
256
+ """Check if the model is in the supported models list"""
257
+ return any(m["id"] == model for m in SUPPORTED_MODELS)
258
+
259
+
260
+ def get_models_list() -> dict:
261
+ """Return OpenAI-formatted models list"""
262
+ return {
263
+ "object": "list",
264
+ "data": SUPPORTED_MODELS
265
+ }
266
+
267
+
268
+ def is_claude_model(model: str) -> bool:
269
+ """Check if model is a Claude model"""
270
+ model_lower = model.lower()
271
+ return any(prefix in model_lower for prefix in ['claude-', 'claude_', 'anthropic/', 'anthropic.'])
272
+
273
+
274
+ def is_gpt_model(model: str) -> bool:
275
+ """Check if model is a GPT/Codex model"""
276
+ model_lower = model.lower()
277
+ return any(prefix in model_lower for prefix in ['gpt-', 'gpt_', 'codex', 'openai/'])
278
+
279
+
280
+ def openai_error_response(message: str, error_type: str = "invalid_request_error", code: str = None, status_code: int = 400) -> JSONResponse:
281
+ """Return OpenAI-formatted error response"""
282
+ error_body = {
283
+ "error": {
284
+ "message": message,
285
+ "type": error_type,
286
+ "code": code
287
+ }
288
+ }
289
+ return JSONResponse(error_body, status_code=status_code)
290
+
291
+
292
+ def normalize_openclaw_error_message(response_msg: Any) -> tuple[str, int]:
293
+ """Normalize upstream OpenClaw errors to a user-facing message."""
294
+ if isinstance(response_msg, dict):
295
+ error_message = response_msg.get("content", "Unknown error")
296
+ http_status = response_msg.get("http_status", 503)
297
+ trace_id = response_msg.get("trace_id")
298
+ else:
299
+ error_message = str(response_msg)
300
+ http_status = 503
301
+ trace_id = None
302
+
303
+ error_text = str(error_message or "").strip()
304
+ normalized_text = error_text.lower()
305
+
306
+ if not trace_id:
307
+ return "服务暂不可用,请联系客服处理", 503
308
+
309
+ if "offline" in normalized_text:
310
+ return "服务暂不可用,请联系客服处理", 503
311
+
312
+ return error_text or "服务暂不可用,请联系客服处理", int(http_status or 503)
313
+
314
+
315
+ def _to_input_parts(content: Any) -> List[dict]:
316
+ """Convert system/user/developer chat content to Responses API input parts."""
317
+ if isinstance(content, str):
318
+ return [{"type": "input_text", "text": content}]
319
+
320
+ if not isinstance(content, list):
321
+ return [{"type": "input_text", "text": str(content) if content is not None else ""}]
322
+
323
+ parts: List[dict] = []
324
+ for block in content:
325
+ if not isinstance(block, dict):
326
+ parts.append({"type": "input_text", "text": str(block)})
327
+ continue
328
+
329
+ block_type = block.get("type")
330
+ if block_type == "text":
331
+ parts.append({"type": "input_text", "text": block.get("text", "")})
332
+ elif block_type == "image_url":
333
+ image_url = block.get("image_url", {})
334
+ if isinstance(image_url, dict):
335
+ url = image_url.get("url", "")
336
+ else:
337
+ url = str(image_url)
338
+ parts.append({"type": "input_image", "image_url": url})
339
+ else:
340
+ # Keep unknown blocks as plain text to avoid dropping user context.
341
+ parts.append({"type": "input_text", "text": json.dumps(block, ensure_ascii=False)})
342
+
343
+ return parts
344
+
345
+
346
+ def _to_assistant_output_parts(content: Any) -> List[dict]:
347
+ """Convert assistant chat content to Responses API output parts."""
348
+ if isinstance(content, str):
349
+ return [{"type": "output_text", "text": content}]
350
+
351
+ if content is None:
352
+ return []
353
+
354
+ if not isinstance(content, list):
355
+ return [{"type": "output_text", "text": str(content)}]
356
+
357
+ parts: List[dict] = []
358
+ for block in content:
359
+ if not isinstance(block, dict):
360
+ parts.append({"type": "output_text", "text": str(block)})
361
+ continue
362
+
363
+ block_type = block.get("type")
364
+ if block_type in ("text", "output_text"):
365
+ parts.append({"type": "output_text", "text": block.get("text", "")})
366
+ elif block_type == "refusal":
367
+ refusal_text = block.get("refusal")
368
+ if refusal_text is None:
369
+ refusal_text = block.get("text", "")
370
+ parts.append({"type": "refusal", "refusal": str(refusal_text)})
371
+ else:
372
+ # Assistant input blocks only support output_text/refusal.
373
+ parts.append({"type": "output_text", "text": json.dumps(block, ensure_ascii=False)})
374
+
375
+ return parts
376
+
377
+
378
+ def _convert_messages_to_responses_input(messages: List[dict]) -> List[dict]:
379
+ """Convert OpenAI chat messages[] to Responses API input[] items."""
380
+ input_items: List[dict] = []
381
+
382
+ for msg in messages:
383
+ role = msg.get("role")
384
+ content = msg.get("content")
385
+
386
+ if role in ("system", "user", "developer"):
387
+ message_item = {
388
+ "type": "message",
389
+ "role": role,
390
+ "content": _to_input_parts(content)
391
+ }
392
+ input_items.append(message_item)
393
+
394
+ elif role == "assistant":
395
+ assistant_parts = _to_assistant_output_parts(content)
396
+
397
+ # Chat Completions may include top-level refusal on assistant messages.
398
+ refusal_text = msg.get("refusal")
399
+ if refusal_text:
400
+ assistant_parts.append({"type": "refusal", "refusal": str(refusal_text)})
401
+
402
+ # Skip empty assistant messages (e.g. tool_calls-only turns).
403
+ if assistant_parts:
404
+ input_items.append({
405
+ "type": "message",
406
+ "role": "assistant",
407
+ "content": assistant_parts
408
+ })
409
+
410
+ # Carry forward assistant tool calls as explicit function_call items.
411
+ for tool_call in msg.get("tool_calls", []):
412
+ if tool_call.get("type") != "function":
413
+ continue
414
+ func = tool_call.get("function", {})
415
+ input_items.append({
416
+ "type": "function_call",
417
+ "call_id": tool_call.get("id", ""),
418
+ "name": func.get("name", ""),
419
+ "arguments": func.get("arguments", "{}")
420
+ })
421
+
422
+ elif role == "tool":
423
+ tool_call_id = msg.get("tool_call_id", "")
424
+ tool_output = content if isinstance(content, str) else json.dumps(content, ensure_ascii=False)
425
+ input_items.append({
426
+ "type": "function_call_output",
427
+ "call_id": tool_call_id,
428
+ "output": tool_output
429
+ })
430
+
431
+ return input_items
432
+
433
+
434
+ def _convert_tools_to_responses_format(tools: List[dict]) -> List[dict]:
435
+ """Convert OpenAI chat tool schema to Responses API function tool schema."""
436
+ response_tools: List[dict] = []
437
+ for tool in tools:
438
+ if tool.get("type") != "function":
439
+ continue
440
+ fn = tool.get("function", {})
441
+ response_tools.append({
442
+ "type": "function",
443
+ "name": fn.get("name", ""),
444
+ "description": fn.get("description", ""),
445
+ "parameters": fn.get("parameters", {})
446
+ })
447
+ return response_tools
448
+
449
+
450
+ def convert_openai_to_response_format(body: dict) -> dict:
451
+ """Convert OpenAI /v1/chat/completions payload to Responses API payload."""
452
+ response_body: Dict[str, Any] = {
453
+ "model": body.get("model", "gpt-5.3-codex"),
454
+ "input": _convert_messages_to_responses_input(body.get("messages", [])),
455
+ "stream": body.get("stream", False),
456
+ }
457
+
458
+ # Optional parameters
459
+ if "max_tokens" in body:
460
+ response_body["max_output_tokens"] = body["max_tokens"]
461
+ elif "max_completion_tokens" in body:
462
+ response_body["max_output_tokens"] = body["max_completion_tokens"]
463
+
464
+ if "temperature" in body:
465
+ response_body["temperature"] = body["temperature"]
466
+
467
+ if "top_p" in body:
468
+ response_body["top_p"] = body["top_p"]
469
+
470
+ if "stop" in body:
471
+ response_body["stop"] = body["stop"]
472
+
473
+ if "tools" in body:
474
+ converted_tools = _convert_tools_to_responses_format(body["tools"])
475
+ if converted_tools:
476
+ response_body["tools"] = converted_tools
477
+
478
+ if "tool_choice" in body:
479
+ tool_choice = body["tool_choice"]
480
+ if isinstance(tool_choice, dict) and tool_choice.get("type") == "function":
481
+ fn_name = tool_choice.get("function", {}).get("name")
482
+ if fn_name:
483
+ response_body["tool_choice"] = {"type": "function", "name": fn_name}
484
+ else:
485
+ response_body["tool_choice"] = tool_choice
486
+
487
+ if "frequency_penalty" in body:
488
+ response_body["frequency_penalty"] = body["frequency_penalty"]
489
+
490
+ if "presence_penalty" in body:
491
+ response_body["presence_penalty"] = body["presence_penalty"]
492
+
493
+ if "n" in body:
494
+ response_body["n"] = body["n"]
495
+
496
+ if "user" in body:
497
+ response_body["user"] = body["user"]
498
+
499
+ return response_body
500
+
501
+
502
+ def _usage_to_openai_usage(response_body: dict) -> dict:
503
+ usage = response_body.get("usage", {}) or {}
504
+ prompt_tokens = usage.get("prompt_tokens", usage.get("input_tokens", 0))
505
+ completion_tokens = usage.get("completion_tokens", usage.get("output_tokens", 0))
506
+ total_tokens = usage.get("total_tokens", prompt_tokens + completion_tokens)
507
+ return {
508
+ "prompt_tokens": prompt_tokens,
509
+ "completion_tokens": completion_tokens,
510
+ "total_tokens": total_tokens
511
+ }
512
+
513
+
514
+ def _extract_response_text_and_tools(response_body: dict) -> Tuple[str, List[dict]]:
515
+ text_parts: List[str] = []
516
+ tool_calls: List[dict] = []
517
+ output = response_body.get("output", [])
518
+
519
+ if isinstance(output, list):
520
+ for item in output:
521
+ if not isinstance(item, dict):
522
+ continue
523
+ item_type = item.get("type")
524
+
525
+ if item_type == "message":
526
+ for part in item.get("content", []):
527
+ if not isinstance(part, dict):
528
+ continue
529
+ part_type = part.get("type")
530
+ if part_type in ("output_text", "text", "input_text"):
531
+ text_parts.append(part.get("text", ""))
532
+ elif item_type in ("function_call", "tool_call"):
533
+ raw_arguments = item.get("arguments", item.get("arguments_json", item.get("input", "")))
534
+ if isinstance(raw_arguments, (dict, list)):
535
+ arguments_str = json.dumps(raw_arguments, ensure_ascii=False)
536
+ elif raw_arguments is None:
537
+ arguments_str = ""
538
+ else:
539
+ arguments_str = str(raw_arguments)
540
+ tool_calls.append({
541
+ "id": item.get("call_id", item.get("id", "")),
542
+ "type": "function",
543
+ "function": {
544
+ "name": item.get("name", ""),
545
+ "arguments": arguments_str
546
+ }
547
+ })
548
+
549
+ if not text_parts:
550
+ fallback_content = response_body.get("content", "")
551
+ if isinstance(fallback_content, list):
552
+ for block in fallback_content:
553
+ if isinstance(block, dict):
554
+ text_parts.append(block.get("text", ""))
555
+ else:
556
+ text_parts.append(str(block))
557
+ elif isinstance(fallback_content, str):
558
+ text_parts.append(fallback_content)
559
+ elif fallback_content is not None:
560
+ text_parts.append(str(fallback_content))
561
+
562
+ return "".join(text_parts), tool_calls
563
+
564
+
565
+ def convert_response_to_openai_format(response_body: dict, model: str) -> dict:
566
+ """Convert Responses API payload back to OpenAI chat completions format."""
567
+ # If response is already in OpenAI format (has 'choices' field), return as-is.
568
+ if "choices" in response_body:
569
+ return response_body
570
+
571
+ # Some providers wrap the actual response object.
572
+ if isinstance(response_body.get("response"), dict):
573
+ inner = response_body["response"]
574
+ if "output" in inner or "usage" in inner:
575
+ response_body = inner
576
+
577
+ message_id = response_body.get("id", f"chatcmpl-{int(time.time())}")
578
+ created = response_body.get("created", int(time.time()))
579
+ content_text, tool_calls = _extract_response_text_and_tools(response_body)
580
+
581
+ message: Dict[str, Any] = {
582
+ "role": "assistant",
583
+ "content": content_text or None
584
+ }
585
+ if tool_calls:
586
+ message["tool_calls"] = tool_calls
587
+
588
+ finish_reason = "tool_calls" if tool_calls else "stop"
589
+
590
+ return {
591
+ "id": message_id,
592
+ "object": "chat.completion",
593
+ "created": created,
594
+ "model": model,
595
+ "choices": [{
596
+ "index": 0,
597
+ "message": message,
598
+ "finish_reason": finish_reason
599
+ }],
600
+ "usage": _usage_to_openai_usage(response_body)
601
+ }
602
+
603
+
604
+ def _to_unix_seconds(value: Any, default: int) -> int:
605
+ if value is None:
606
+ return default
607
+ try:
608
+ number = float(value)
609
+ if number > 10_000_000_000: # likely ms timestamp
610
+ number = number / 1000.0
611
+ return int(number)
612
+ except Exception:
613
+ return default
614
+
615
+
616
+ def convert_openai_to_claude_request(body: dict) -> dict:
617
+ """Convert OpenAI chat completion request to Claude messages format"""
618
+ claude_body = {}
619
+
620
+ # Extract system messages
621
+ system_messages = []
622
+ messages = []
623
+
624
+ for msg in body.get("messages", []):
625
+ role = msg.get("role")
626
+ content = msg.get("content")
627
+
628
+ if role == "system":
629
+ if isinstance(content, str):
630
+ system_messages.append(content)
631
+ elif isinstance(content, list):
632
+ # Handle array content for system messages
633
+ text_parts = [block.get("text", "") for block in content if block.get("type") == "text"]
634
+ system_messages.append("".join(text_parts))
635
+
636
+ elif role == "user":
637
+ # Convert user message
638
+ claude_msg = {"role": "user"}
639
+ if isinstance(content, str):
640
+ claude_msg["content"] = content
641
+ elif isinstance(content, list):
642
+ # Handle multimodal content
643
+ claude_content = []
644
+ for block in content:
645
+ if block.get("type") == "text":
646
+ claude_content.append({"type": "text", "text": block.get("text", "")})
647
+ elif block.get("type") == "image_url":
648
+ # Convert OpenAI image_url to Claude image format
649
+ image_url = block.get("image_url", {})
650
+ url = image_url.get("url", "")
651
+ if url.startswith("data:"):
652
+ # Base64 image
653
+ parts = url.split(",", 1)
654
+ if len(parts) == 2:
655
+ media_type = parts[0].split(":")[1].split(";")[0]
656
+ data = parts[1]
657
+ claude_content.append({
658
+ "type": "image",
659
+ "source": {
660
+ "type": "base64",
661
+ "media_type": media_type,
662
+ "data": data
663
+ }
664
+ })
665
+ else:
666
+ # URL image
667
+ claude_content.append({
668
+ "type": "image",
669
+ "source": {
670
+ "type": "url",
671
+ "url": url
672
+ }
673
+ })
674
+ claude_msg["content"] = claude_content
675
+ messages.append(claude_msg)
676
+
677
+ elif role == "assistant":
678
+ # Convert assistant message
679
+ claude_msg = {"role": "assistant", "content": []}
680
+
681
+ # Handle text content
682
+ if isinstance(content, str) and content:
683
+ claude_msg["content"].append({"type": "text", "text": content})
684
+ elif isinstance(content, list):
685
+ for block in content:
686
+ if block.get("type") == "text":
687
+ claude_msg["content"].append({"type": "text", "text": block.get("text", "")})
688
+
689
+ # Handle tool_calls
690
+ tool_calls = msg.get("tool_calls", [])
691
+ for tc in tool_calls:
692
+ if tc.get("type") == "function":
693
+ func = tc.get("function", {})
694
+ try:
695
+ arguments = json.loads(func.get("arguments", "{}"))
696
+ except:
697
+ arguments = {}
698
+ claude_msg["content"].append({
699
+ "type": "tool_use",
700
+ "id": tc.get("id", ""),
701
+ "name": func.get("name", ""),
702
+ "input": arguments
703
+ })
704
+
705
+ messages.append(claude_msg)
706
+
707
+ elif role == "tool":
708
+ # Convert tool result to Claude format
709
+ # OpenAI tool messages should be converted to user messages with tool_result
710
+ tool_call_id = msg.get("tool_call_id", "")
711
+ tool_content = content if isinstance(content, str) else json.dumps(content)
712
+
713
+ messages.append({
714
+ "role": "user",
715
+ "content": [{
716
+ "type": "tool_result",
717
+ "tool_use_id": tool_call_id,
718
+ "content": tool_content
719
+ }]
720
+ })
721
+
722
+ # Set system message
723
+ if system_messages:
724
+ claude_body["system"] = "\n\n".join(system_messages)
725
+
726
+ claude_body["messages"] = messages
727
+
728
+ # Model - already validated in proxy_openclaw_request
729
+ model = body.get("model")
730
+ claude_body["model"] = normalize_model_name(model)
731
+
732
+ # Max tokens
733
+ max_tokens = body.get("max_tokens") or body.get("max_completion_tokens", 4096)
734
+ claude_body["max_tokens"] = max_tokens
735
+
736
+ # Temperature
737
+ if "temperature" in body:
738
+ claude_body["temperature"] = body["temperature"]
739
+
740
+ # Top P
741
+ if "top_p" in body:
742
+ claude_body["top_p"] = body["top_p"]
743
+
744
+ # Stop sequences
745
+ if "stop" in body:
746
+ stop = body["stop"]
747
+ if isinstance(stop, str):
748
+ claude_body["stop_sequences"] = [stop]
749
+ elif isinstance(stop, list):
750
+ claude_body["stop_sequences"] = stop
751
+
752
+ # Stream
753
+ if "stream" in body:
754
+ claude_body["stream"] = body["stream"]
755
+
756
+ # Tools
757
+ if "tools" in body:
758
+ claude_tools = []
759
+ for tool in body["tools"]:
760
+ if tool.get("type") == "function":
761
+ func = tool.get("function", {})
762
+ claude_tools.append({
763
+ "name": func.get("name", ""),
764
+ "description": func.get("description", ""),
765
+ "input_schema": func.get("parameters", {})
766
+ })
767
+ if claude_tools:
768
+ claude_body["tools"] = claude_tools
769
+
770
+ # Tool choice
771
+ if "tool_choice" in body:
772
+ tc = body["tool_choice"]
773
+ if tc == "auto":
774
+ claude_body["tool_choice"] = {"type": "auto"}
775
+ elif tc == "required" or tc == "any":
776
+ claude_body["tool_choice"] = {"type": "any"}
777
+ elif tc == "none":
778
+ claude_body["tool_choice"] = {"type": "auto"}
779
+ elif isinstance(tc, dict) and tc.get("type") == "function":
780
+ func_name = tc.get("function", {}).get("name")
781
+ if func_name:
782
+ claude_body["tool_choice"] = {"type": "tool", "name": func_name}
783
+
784
+ return claude_body
785
+
786
+
787
+ def convert_claude_to_openai_response(claude_resp: dict, model: str) -> dict:
788
+ """Convert Claude response to OpenAI chat completion format"""
789
+ message_id = claude_resp.get("id", "chatcmpl-" + str(int(time.time())))
790
+ created = int(time.time())
791
+
792
+ # Build message content
793
+ message = {"role": "assistant"}
794
+ content_parts = []
795
+ tool_calls = []
796
+
797
+ for block in claude_resp.get("content", []):
798
+ if block.get("type") == "text":
799
+ content_parts.append(block.get("text", ""))
800
+ elif block.get("type") == "tool_use":
801
+ tool_calls.append({
802
+ "id": block.get("id", ""),
803
+ "type": "function",
804
+ "function": {
805
+ "name": block.get("name", ""),
806
+ "arguments": json.dumps(block.get("input", {}))
807
+ }
808
+ })
809
+
810
+ if content_parts:
811
+ message["content"] = "".join(content_parts)
812
+ else:
813
+ message["content"] = None
814
+
815
+ if tool_calls:
816
+ message["tool_calls"] = tool_calls
817
+
818
+ # Map stop_reason to finish_reason
819
+ stop_reason = claude_resp.get("stop_reason", "")
820
+ finish_reason_map = {
821
+ "end_turn": "stop",
822
+ "max_tokens": "length",
823
+ "stop_sequence": "stop",
824
+ "tool_use": "tool_calls"
825
+ }
826
+ finish_reason = finish_reason_map.get(stop_reason, "stop")
827
+
828
+ # Usage
829
+ usage_data = claude_resp.get("usage", {})
830
+ usage = {
831
+ "prompt_tokens": usage_data.get("input_tokens", 0),
832
+ "completion_tokens": usage_data.get("output_tokens", 0),
833
+ "total_tokens": usage_data.get("input_tokens", 0) + usage_data.get("output_tokens", 0)
834
+ }
835
+
836
+ return {
837
+ "id": message_id,
838
+ "object": "chat.completion",
839
+ "created": created,
840
+ "model": model,
841
+ "choices": [{
842
+ "index": 0,
843
+ "message": message,
844
+ "finish_reason": finish_reason
845
+ }],
846
+ "usage": usage
847
+ }
848
+
849
+
850
+ async def handle_stream_response_as_openai(stream_url: str, model: str, api_key: str = None, is_gpt: bool = False) -> StreamingResponse:
851
+ """Convert Claude/Codex SSE stream to OpenAI SSE format"""
852
+
853
+ async def generate_openai_stream() -> AsyncGenerator[str, None]:
854
+ message_id = f"chatcmpl-{int(time.time())}"
855
+ created = int(time.time())
856
+
857
+ # State tracking (only needed for Claude responses)
858
+ content_block_types = {} # index -> type
859
+ tool_call_index = -1
860
+ role_sent = False
861
+
862
+ try:
863
+ # Add agent_id parameter to URL (required by openclaw.aid.pub/responses.aid.pub)
864
+ current_agent = claude_proxy_async.get_current_agent_id()
865
+ if current_agent:
866
+ separator = "&" if "?" in stream_url else "?"
867
+ full_url = f"{stream_url}{separator}agent_id={current_agent.id}"
868
+ else:
869
+ full_url = stream_url
870
+
871
+ decoded_url = unquote(full_url)
872
+ logger.info(f"[Stream] Connecting to: {decoded_url}")
873
+ logger.info(f"[Stream] Is GPT model: {is_gpt}")
874
+
875
+ # Build headers with API key if provided
876
+ headers = {}
877
+ if api_key:
878
+ headers["Authorization"] = f"Bearer {api_key}"
879
+ headers["x-api-key"] = api_key
880
+
881
+ async with httpx.AsyncClient(verify=False, timeout=httpx.Timeout(30.0, read=300.0)) as client:
882
+ async with client.stream("GET", decoded_url, headers=headers) as response:
883
+ logger.info(f"[Stream] Response status: {response.status_code}")
884
+
885
+ if response.status_code != 200:
886
+ error_text = await response.aread()
887
+ error_msg = error_text.decode('utf-8') if error_text else "Stream request failed"
888
+ logger.error(f"[Stream] Error response: {error_msg}")
889
+ yield f'data: {json.dumps({"error": {"message": error_msg, "type": "api_error"}})}\n\n'
890
+ return
891
+
892
+ # For GPT models, convert responses-style stream to OpenAI chat-completions stream
893
+ if is_gpt:
894
+ response_id = None
895
+ response_created = created
896
+ role_sent = False
897
+ tool_call_indices: Dict[str, int] = {}
898
+ tool_call_names: Dict[str, str] = {}
899
+ tool_call_arguments: Dict[str, str] = {}
900
+ next_tool_call_index = 0
901
+
902
+ def _emit_role_chunk() -> str:
903
+ nonlocal role_sent
904
+ if role_sent:
905
+ return ""
906
+ role_sent = True
907
+ chunk = {
908
+ "id": response_id or message_id,
909
+ "object": "chat.completion.chunk",
910
+ "created": response_created,
911
+ "model": model,
912
+ "choices": [{
913
+ "index": 0,
914
+ "delta": {"role": "assistant", "content": ""},
915
+ "finish_reason": None
916
+ }]
917
+ }
918
+ return f"data: {json.dumps(chunk)}\n\n"
919
+
920
+ def _emit_text_chunk(text: str) -> str:
921
+ chunk = {
922
+ "id": response_id or message_id,
923
+ "object": "chat.completion.chunk",
924
+ "created": response_created,
925
+ "model": model,
926
+ "choices": [{
927
+ "index": 0,
928
+ "delta": {"content": text},
929
+ "finish_reason": None
930
+ }]
931
+ }
932
+ return f"data: {json.dumps(chunk)}\n\n"
933
+
934
+ def _emit_tool_start(call_id: str, name: str, arguments: str = "") -> str:
935
+ nonlocal next_tool_call_index
936
+ if call_id not in tool_call_indices:
937
+ tool_call_indices[call_id] = next_tool_call_index
938
+ next_tool_call_index += 1
939
+ index = tool_call_indices[call_id]
940
+ safe_name = name or tool_call_names.get(call_id, "")
941
+ safe_arguments = arguments or tool_call_arguments.get(call_id, "")
942
+ tool_call_names[call_id] = safe_name
943
+ tool_call_arguments[call_id] = safe_arguments
944
+ chunk = {
945
+ "id": response_id or message_id,
946
+ "object": "chat.completion.chunk",
947
+ "created": response_created,
948
+ "model": model,
949
+ "choices": [{
950
+ "index": 0,
951
+ "delta": {
952
+ "tool_calls": [{
953
+ "index": index,
954
+ "id": call_id,
955
+ "type": "function",
956
+ "function": {"name": safe_name, "arguments": safe_arguments}
957
+ }]
958
+ },
959
+ "finish_reason": None
960
+ }]
961
+ }
962
+ return f"data: {json.dumps(chunk)}\n\n"
963
+
964
+ def _emit_tool_args_delta(call_id: str, arguments_delta: str) -> str:
965
+ nonlocal next_tool_call_index
966
+ if call_id not in tool_call_indices:
967
+ tool_call_indices[call_id] = next_tool_call_index
968
+ next_tool_call_index += 1
969
+ index = tool_call_indices[call_id]
970
+ previous = tool_call_arguments.get(call_id, "")
971
+ merged = previous + arguments_delta
972
+ tool_call_arguments[call_id] = merged
973
+ chunk = {
974
+ "id": response_id or message_id,
975
+ "object": "chat.completion.chunk",
976
+ "created": response_created,
977
+ "model": model,
978
+ "choices": [{
979
+ "index": 0,
980
+ "delta": {
981
+ "tool_calls": [{
982
+ "index": index,
983
+ "function": {"arguments": arguments_delta}
984
+ }]
985
+ },
986
+ "finish_reason": None
987
+ }]
988
+ }
989
+ return f"data: {json.dumps(chunk)}\n\n"
990
+
991
+ def _normalize_arguments(value: Any) -> str:
992
+ if isinstance(value, str):
993
+ return value
994
+ if isinstance(value, (dict, list)):
995
+ return json.dumps(value, ensure_ascii=False)
996
+ if value is None:
997
+ return ""
998
+ return str(value)
999
+
1000
+ def _emit_finish_chunk(reason: str, usage_data: dict = None) -> str:
1001
+ chunk = {
1002
+ "id": response_id or message_id,
1003
+ "object": "chat.completion.chunk",
1004
+ "created": response_created,
1005
+ "model": model,
1006
+ "choices": [{
1007
+ "index": 0,
1008
+ "delta": {},
1009
+ "finish_reason": reason
1010
+ }]
1011
+ }
1012
+ if usage_data:
1013
+ prompt_tokens = usage_data.get("prompt_tokens", usage_data.get("input_tokens", 0))
1014
+ completion_tokens = usage_data.get("completion_tokens", usage_data.get("output_tokens", 0))
1015
+ chunk["usage"] = {
1016
+ "prompt_tokens": prompt_tokens,
1017
+ "completion_tokens": completion_tokens,
1018
+ "total_tokens": usage_data.get("total_tokens", prompt_tokens + completion_tokens)
1019
+ }
1020
+ return f"data: {json.dumps(chunk)}\n\n"
1021
+
1022
+ async for line in response.aiter_lines():
1023
+ if not line:
1024
+ continue
1025
+
1026
+ try:
1027
+ # URL decode the line
1028
+ from urllib.parse import unquote_plus
1029
+ decoded_line = unquote_plus(line)
1030
+
1031
+ # Skip comments
1032
+ if decoded_line.startswith(":"):
1033
+ continue
1034
+
1035
+ # Parse key:value format
1036
+ if ":" not in decoded_line:
1037
+ continue
1038
+
1039
+ key, value = decoded_line.split(":", 1)
1040
+ key = key.strip()
1041
+ value = value.strip()
1042
+
1043
+ # Handle "data: event: xxx" or "data: data: {...}"
1044
+ if key == "data":
1045
+ if ":" in value:
1046
+ inner_key, inner_value = value.split(":", 1)
1047
+ inner_key = inner_key.strip()
1048
+ inner_value = inner_value.strip()
1049
+
1050
+ if inner_key == "data":
1051
+ try:
1052
+ event = json.loads(inner_value)
1053
+ except Exception:
1054
+ if inner_value == "[DONE]":
1055
+ if role_sent:
1056
+ yield _emit_finish_chunk("stop")
1057
+ yield "data: [DONE]\n\n"
1058
+ return
1059
+ continue
1060
+
1061
+ # Upstream may already send OpenAI chat chunks.
1062
+ if isinstance(event, dict) and "choices" in event:
1063
+ yield f"data: {json.dumps(event)}\n\n"
1064
+ continue
1065
+
1066
+ event_type = event.get("type")
1067
+ if event_type == "response.created":
1068
+ response_info = event.get("response", {})
1069
+ response_id = response_info.get("id", response_id or message_id)
1070
+ response_created = _to_unix_seconds(
1071
+ response_info.get("created_at"),
1072
+ response_created
1073
+ )
1074
+ elif event_type in ("response.output_text.delta", "response.output_text.annotation.added"):
1075
+ role_chunk = _emit_role_chunk()
1076
+ if role_chunk:
1077
+ yield role_chunk
1078
+ delta = event.get("delta", "")
1079
+ if delta:
1080
+ yield _emit_text_chunk(delta)
1081
+ elif event_type == "response.output_item.added":
1082
+ item = event.get("item", {})
1083
+ if item.get("type") in ("function_call", "tool_call"):
1084
+ role_chunk = _emit_role_chunk()
1085
+ if role_chunk:
1086
+ yield role_chunk
1087
+ call_id = item.get("call_id", item.get("id", "")) or f"call_{next_tool_call_index}"
1088
+ name = item.get("name", "")
1089
+ arguments_value = item.get("arguments", item.get("arguments_json", item.get("input", "")))
1090
+ arguments_text = _normalize_arguments(arguments_value)
1091
+ tool_call_names[call_id] = name or tool_call_names.get(call_id, "")
1092
+ if arguments_text:
1093
+ tool_call_arguments[call_id] = arguments_text
1094
+ yield _emit_tool_start(
1095
+ call_id=call_id,
1096
+ name=name,
1097
+ arguments=arguments_text
1098
+ )
1099
+ elif event_type in ("response.function_call_arguments.delta", "response.output_item.delta"):
1100
+ role_chunk = _emit_role_chunk()
1101
+ if role_chunk:
1102
+ yield role_chunk
1103
+ call_id = event.get("call_id")
1104
+ if not call_id and isinstance(event.get("item"), dict):
1105
+ call_id = event["item"].get("call_id", event["item"].get("id", ""))
1106
+ if not call_id:
1107
+ continue
1108
+ delta = event.get("delta", "")
1109
+ if isinstance(delta, dict):
1110
+ delta = (
1111
+ delta.get("arguments")
1112
+ or delta.get("arguments_delta")
1113
+ or delta.get("partial_json")
1114
+ or ""
1115
+ )
1116
+ if delta:
1117
+ yield _emit_tool_args_delta(call_id, str(delta))
1118
+ elif event_type in ("response.function_call_arguments.done", "response.output_item.done"):
1119
+ item = event.get("item", {})
1120
+ call_id = event.get("call_id")
1121
+ if not call_id and isinstance(item, dict):
1122
+ call_id = item.get("call_id", item.get("id", ""))
1123
+ if not call_id:
1124
+ continue
1125
+ full_args = _normalize_arguments(
1126
+ event.get("arguments")
1127
+ or (item.get("arguments") if isinstance(item, dict) else None)
1128
+ or (item.get("arguments_json") if isinstance(item, dict) else None)
1129
+ or (item.get("input") if isinstance(item, dict) else None)
1130
+ )
1131
+ if full_args:
1132
+ previous = tool_call_arguments.get(call_id, "")
1133
+ if not previous:
1134
+ # If we never got deltas, emit once so client receives arguments.
1135
+ yield _emit_tool_args_delta(call_id, full_args)
1136
+ elif full_args != previous and full_args.startswith(previous):
1137
+ # Emit missing suffix if done payload is more complete.
1138
+ suffix = full_args[len(previous):]
1139
+ if suffix:
1140
+ yield _emit_tool_args_delta(call_id, suffix)
1141
+ elif full_args != previous:
1142
+ # Fallback: replace mismatch by sending full args once more.
1143
+ yield _emit_tool_args_delta(call_id, full_args)
1144
+ elif event_type == "response.completed":
1145
+ response_info = event.get("response", {})
1146
+ status = response_info.get("status", "completed")
1147
+ usage_data = response_info.get("usage", {})
1148
+ finish_reason = "stop"
1149
+ if status in ("incomplete", "failed", "cancelled"):
1150
+ finish_reason = "length"
1151
+ elif tool_call_indices:
1152
+ finish_reason = "tool_calls"
1153
+ yield _emit_finish_chunk(finish_reason, usage_data)
1154
+ yield "data: [DONE]\n\n"
1155
+ return
1156
+ elif event_type == "response.error":
1157
+ error_info = event.get("error", {})
1158
+ error_message = error_info.get("message", "Response stream error")
1159
+ yield f'data: {json.dumps({"error": {"message": error_message, "type": "api_error"}})}\n\n'
1160
+ yield "data: [DONE]\n\n"
1161
+ return
1162
+
1163
+ elif key == "event":
1164
+ # Handle "event: done"
1165
+ if value == "done":
1166
+ if role_sent:
1167
+ yield _emit_finish_chunk("stop")
1168
+ yield "data: [DONE]\n\n"
1169
+ return
1170
+
1171
+ except Exception as e:
1172
+ logger.warning(f"Failed to process SSE line (GPT): {e}")
1173
+ continue
1174
+
1175
+ else:
1176
+ # Process URL-encoded SSE stream from openclaw.aid.pub (Claude format)
1177
+ # Format: "data: event: xxx" and "data: data: {...}"
1178
+ current_event = None
1179
+
1180
+ async for line in response.aiter_lines():
1181
+ if not line:
1182
+ continue
1183
+
1184
+ try:
1185
+ # URL decode the line (openclaw.aid.pub uses URL encoding)
1186
+ from urllib.parse import unquote_plus
1187
+ decoded_line = unquote_plus(line)
1188
+
1189
+ # Skip comments
1190
+ if decoded_line.startswith(":"):
1191
+ continue
1192
+
1193
+ # Parse key:value format
1194
+ if ":" not in decoded_line:
1195
+ continue
1196
+
1197
+ key, value = decoded_line.split(":", 1)
1198
+ key = key.strip()
1199
+ value = value.strip()
1200
+
1201
+ # Handle "data: event: xxx" or "data: data: {...}"
1202
+ if key == "data":
1203
+ if ":" in value:
1204
+ inner_key, inner_value = value.split(":", 1)
1205
+ inner_key = inner_key.strip()
1206
+ inner_value = inner_value.strip()
1207
+
1208
+ if inner_key == "event":
1209
+ # Cache event type
1210
+ current_event = inner_value
1211
+ elif inner_key == "data":
1212
+ # Got data line, process the Claude event
1213
+ if current_event:
1214
+ try:
1215
+ event = json.loads(inner_value)
1216
+ event_type = event.get("type")
1217
+
1218
+ if event_type == "message_start":
1219
+ # Send role
1220
+ chunk = {
1221
+ "id": message_id,
1222
+ "object": "chat.completion.chunk",
1223
+ "created": created,
1224
+ "model": model,
1225
+ "choices": [{
1226
+ "index": 0,
1227
+ "delta": {"role": "assistant", "content": ""},
1228
+ "finish_reason": None
1229
+ }]
1230
+ }
1231
+ yield f"data: {json.dumps(chunk)}\n\n"
1232
+ role_sent = True
1233
+
1234
+ elif event_type == "content_block_start":
1235
+ block_index = event.get("index", 0)
1236
+ content_block = event.get("content_block", {})
1237
+ block_type = content_block.get("type")
1238
+ content_block_types[block_index] = block_type
1239
+
1240
+ if block_type == "tool_use":
1241
+ tool_call_index += 1
1242
+ chunk = {
1243
+ "id": message_id,
1244
+ "object": "chat.completion.chunk",
1245
+ "created": created,
1246
+ "model": model,
1247
+ "choices": [{
1248
+ "index": 0,
1249
+ "delta": {
1250
+ "tool_calls": [{
1251
+ "index": tool_call_index,
1252
+ "id": content_block.get("id", ""),
1253
+ "type": "function",
1254
+ "function": {
1255
+ "name": content_block.get("name", ""),
1256
+ "arguments": ""
1257
+ }
1258
+ }]
1259
+ },
1260
+ "finish_reason": None
1261
+ }]
1262
+ }
1263
+ yield f"data: {json.dumps(chunk)}\n\n"
1264
+
1265
+ elif event_type == "content_block_delta":
1266
+ delta = event.get("delta", {})
1267
+ delta_type = delta.get("type")
1268
+
1269
+ if delta_type == "text_delta":
1270
+ text = delta.get("text", "")
1271
+ chunk = {
1272
+ "id": message_id,
1273
+ "object": "chat.completion.chunk",
1274
+ "created": created,
1275
+ "model": model,
1276
+ "choices": [{
1277
+ "index": 0,
1278
+ "delta": {"content": text},
1279
+ "finish_reason": None
1280
+ }]
1281
+ }
1282
+ yield f"data: {json.dumps(chunk)}\n\n"
1283
+
1284
+ elif delta_type == "input_json_delta":
1285
+ partial_json = delta.get("partial_json", "")
1286
+ chunk = {
1287
+ "id": message_id,
1288
+ "object": "chat.completion.chunk",
1289
+ "created": created,
1290
+ "model": model,
1291
+ "choices": [{
1292
+ "index": 0,
1293
+ "delta": {
1294
+ "tool_calls": [{
1295
+ "index": tool_call_index,
1296
+ "function": {
1297
+ "arguments": partial_json
1298
+ }
1299
+ }]
1300
+ },
1301
+ "finish_reason": None
1302
+ }]
1303
+ }
1304
+ yield f"data: {json.dumps(chunk)}\n\n"
1305
+
1306
+ elif event_type == "message_delta":
1307
+ # Send finish_reason and usage
1308
+ stop_reason = event.get("delta", {}).get("stop_reason", "")
1309
+ finish_reason_map = {
1310
+ "end_turn": "stop",
1311
+ "max_tokens": "length",
1312
+ "stop_sequence": "stop",
1313
+ "tool_use": "tool_calls"
1314
+ }
1315
+ finish_reason = finish_reason_map.get(stop_reason, "stop")
1316
+
1317
+ usage_data = event.get("usage", {})
1318
+ chunk = {
1319
+ "id": message_id,
1320
+ "object": "chat.completion.chunk",
1321
+ "created": created,
1322
+ "model": model,
1323
+ "choices": [{
1324
+ "index": 0,
1325
+ "delta": {},
1326
+ "finish_reason": finish_reason
1327
+ }],
1328
+ "usage": {
1329
+ "prompt_tokens": usage_data.get("input_tokens", 0),
1330
+ "completion_tokens": usage_data.get("output_tokens", 0),
1331
+ "total_tokens": usage_data.get("input_tokens", 0) + usage_data.get("output_tokens", 0)
1332
+ }
1333
+ }
1334
+ yield f"data: {json.dumps(chunk)}\n\n"
1335
+
1336
+ elif event_type == "message_stop":
1337
+ yield "data: [DONE]\n\n"
1338
+
1339
+ elif event_type == "ping":
1340
+ pass # Ignore ping events
1341
+
1342
+ except json.JSONDecodeError:
1343
+ logger.warning(f"Failed to parse SSE data: {inner_value[:100]}")
1344
+ current_event = None
1345
+
1346
+ elif key == "event":
1347
+ # Handle "event: done"
1348
+ if value == "done":
1349
+ yield "data: [DONE]\n\n"
1350
+ return
1351
+
1352
+ except Exception as e:
1353
+ logger.warning(f"Failed to process SSE line: {e}")
1354
+ continue
1355
+
1356
+ except Exception as e:
1357
+ logger.error(f"Stream error: {e}")
1358
+ yield f'data: {json.dumps({"error": {"message": str(e), "type": "api_error"}})}\n\n'
1359
+
1360
+ return StreamingResponse(generate_openai_stream(), media_type="text/event-stream")
1361
+
1362
+
1363
+ def get_openclaw_proxy_config():
1364
+ """获取 OpenClaw 代理配置"""
1365
+ from .proxy_config import ProxyConfig
1366
+
1367
+ return ProxyConfig(
1368
+ target_aid_getter=lambda api_key: "openclaw.aid.pub",
1369
+ request_converter=convert_openai_to_claude_request,
1370
+ response_converter=lambda resp: convert_claude_to_openai_response(resp, resp.get("model", "gpt-4o")) if isinstance(resp, dict) and "content" in resp else resp,
1371
+ stream_handler=lambda url: handle_stream_response_as_openai(url, "gpt-4o", None, True),
1372
+ error_formatter=lambda e: openai_error_response(str(e), "api_error", status_code=500),
1373
+ model_validator=is_model_supported,
1374
+ proxy_type="openclaw"
1375
+ )
1376
+
1377
+
1378
+ async def proxy_openclaw_request(request: Request) -> Response:
1379
+ """Main proxy handler for OpenClaw requests - 使用统一引擎"""
1380
+ # Note: OpenClaw has complex session management with target_aid
1381
+ # For now, keep using legacy implementation
1382
+ # TODO: Fully integrate with proxy_engine after testing
1383
+ return await proxy_openclaw_request_legacy(request)
1384
+
1385
+
1386
+ async def proxy_openclaw_request_legacy(request: Request) -> Response:
1387
+ """Main proxy handler for OpenClaw requests - 原始实现(备份)"""
1388
+
1389
+ logger.debug(f"[OpenClaw] proxy_openclaw_request called")
1390
+ shared_agent_id = claude_proxy_async.get_current_agent_id()
1391
+ logger.debug(f"[OpenClaw] shared agent_id is None: {shared_agent_id is None}")
1392
+
1393
+ # Check if AgentCP is ready (shared with claude proxy)
1394
+ if not shared_agent_id:
1395
+ logger.error("[OpenClaw] AgentID not initialized - returning error")
1396
+ return openai_error_response(
1397
+ "AgentCP not initialized. Please ensure the agent is connected.",
1398
+ error_type="service_unavailable",
1399
+ status_code=503
1400
+ )
1401
+
1402
+ # Parse request body
1403
+ logger.debug("[OpenClaw] Parsing request body...")
1404
+ try:
1405
+ body = await request.json()
1406
+ logger.debug(f"[OpenClaw] Body parsed, model: {body.get('model', 'N/A')}")
1407
+ except Exception as e:
1408
+ logger.error(f"[OpenClaw] JSON parse error: {e}")
1409
+ return openai_error_response(f"Invalid JSON: {str(e)}")
1410
+
1411
+ # Get model
1412
+ model = body.get("model", "")
1413
+ logger.debug(f"[OpenClaw] Model: {model}")
1414
+ if not model:
1415
+ return openai_error_response("Missing required parameter: model")
1416
+
1417
+ # Check if model is supported
1418
+ logger.debug(f"[OpenClaw] Checking if model is supported: {model}")
1419
+ if not is_model_supported(model):
1420
+ logger.debug(f"[OpenClaw] Model not supported: {model}")
1421
+ return openai_error_response(
1422
+ f"Model not supported: {model}",
1423
+ error_type="invalid_request_error"
1424
+ )
1425
+ logger.debug(f"[OpenClaw] Model is supported")
1426
+
1427
+ # Get API key from header - this is the user's API key (evol-xxx format)
1428
+ # It will be used as both the user key and Claude API key (transparent pass-through)
1429
+ logger.debug("[OpenClaw] Getting API key from header...")
1430
+ auth_header = request.headers.get("authorization", "")
1431
+ if not auth_header.startswith("Bearer "):
1432
+ logger.debug("[OpenClaw] Missing or invalid authorization header")
1433
+ return openai_error_response("Missing or invalid authorization header", status_code=401)
1434
+
1435
+ user_api_key = auth_header[7:] # Extract the API key (e.g., evol-xxx)
1436
+ logger.debug(f"[OpenClaw] API key extracted: {user_api_key[:10]}...")
1437
+
1438
+ # Determine if this is a GPT/Codex model or Claude model
1439
+ # Also check the request path - /codex-proxy/ indicates GPT model
1440
+ request_path = str(request.url.path)
1441
+ is_codex_path = "/codex-proxy/" in request_path
1442
+ is_gpt = is_gpt_model(model) or is_codex_path
1443
+ logger.debug(f"[OpenClaw] is_gpt: {is_gpt}, request_path: {request_path}")
1444
+
1445
+ # Convert OpenAI request to appropriate format
1446
+ logger.debug("[OpenClaw] Converting request format...")
1447
+ try:
1448
+ if is_gpt:
1449
+ # Convert to responses format payload
1450
+ request_body = convert_openai_to_response_format(body)
1451
+ logger.debug(f"[OpenClaw] Converted to GPT/Codex format, model: {request_body.get('model')}")
1452
+ else:
1453
+ # Convert to Claude format
1454
+ request_body = convert_openai_to_claude_request(body)
1455
+ logger.debug(f"[OpenClaw] Converted to Claude format, model: {request_body.get('model')}")
1456
+ except Exception as e:
1457
+ logger.error(f"[OpenClaw] Request conversion error: {e}")
1458
+ return openai_error_response(f"Request conversion failed: {str(e)}")
1459
+
1460
+ # Build proxy message with complete headers (same format as claude-proxy)
1461
+ trace_id = f"openclaw-{int(time.time() * 1000)}"
1462
+ logger.debug(f"[OpenClaw] trace_id: {trace_id}")
1463
+
1464
+ # Construct headers following the reference format
1465
+ # The user's API key is transparently passed through as both authorization and x-api-key
1466
+ proxy_headers = {
1467
+ "connection": "keep-alive",
1468
+ "accept": "application/json",
1469
+ "x-stainless-retry-count": "0",
1470
+ "x-stainless-timeout": "600",
1471
+ "x-stainless-lang": "js",
1472
+ "x-stainless-package-version": "0.74.0",
1473
+ "x-stainless-os": "Windows",
1474
+ "x-stainless-arch": "x64",
1475
+ "x-stainless-runtime": "node",
1476
+ "x-stainless-runtime-version": "v22.20.0",
1477
+ "anthropic-dangerous-direct-browser-access": "true",
1478
+ "anthropic-version": "2023-06-01",
1479
+ "x-app": "cli",
1480
+ "user-agent": "claude-cli/2.1.63 (external, openclaw-proxy, evol-electron-app)",
1481
+ "authorization": f"Bearer {user_api_key}", # User's API key (lowercase)
1482
+ "content-type": "application/json",
1483
+ "anthropic-beta": "claude-code-20250219,interleaved-thinking-2025-05-14,prompt-caching-scope-2026-01-05,effort-2025-11-24,adaptive-thinking-2026-01-28",
1484
+ "accept-language": "*",
1485
+ "accept-encoding": "gzip, deflate",
1486
+ "Authorization": f"Bearer {user_api_key}", # Same key (uppercase for compatibility)
1487
+ "x-api-key": user_api_key # Same key for x-api-key header
1488
+ }
1489
+
1490
+ # Determine the API path based on model type
1491
+ # For GPT models, use responses endpoint (without /v1 prefix)
1492
+ if is_gpt:
1493
+ # GPT models use responses endpoint
1494
+ api_path = "/responses"
1495
+ else:
1496
+ # Claude uses /v1/messages
1497
+ api_path = "/v1/messages"
1498
+
1499
+ logger.debug(f"[OpenClaw] API path: {api_path}, Is GPT: {is_gpt}")
1500
+
1501
+ # Build proxy message - all requests go to openclaw.aid.pub
1502
+ # Use different type for GPT vs Claude so openclaw.aid.pub can route correctly
1503
+ proxy_message = {
1504
+ "type": "codex_proxy" if is_gpt else "claude_proxy",
1505
+ "status": "success",
1506
+ "timestamp": int(time.time() * 1000),
1507
+ "trace_id": trace_id,
1508
+ "content": {
1509
+ "path": api_path,
1510
+ "method": "POST",
1511
+ "headers": proxy_headers,
1512
+ "body": request_body, # Contains responses-style payload for GPT or Claude messages format
1513
+ "source": "evol",
1514
+ "version": "1.0.0"
1515
+ }
1516
+ }
1517
+ logger.debug(f"[OpenClaw] Proxy message built, type: {proxy_message['type']}")
1518
+
1519
+ # Send to AgentCP using shared claude-proxy session manager
1520
+ async_session_manager = None
1521
+ try:
1522
+ # Shared manager debug
1523
+ logger.debug(f"[OpenClaw] openclaw_agentId(legacy): {openclaw_agentId.id if openclaw_agentId else 'None'}")
1524
+ logger.debug(f"[OpenClaw] shared_agent_id: {shared_agent_id.id if shared_agent_id else 'None'}")
1525
+ async_session_manager = await claude_proxy_async.get_async_session_manager(shared_agent_id)
1526
+ logger.debug(f"[OpenClaw] shared_session_manager: {async_session_manager}")
1527
+
1528
+ # Register response event
1529
+ logger.debug(f"[OpenClaw] Registering response event for trace_id: {trace_id}")
1530
+ response_event = asyncio.Event()
1531
+ async_session_manager._pending_requests[trace_id] = response_event
1532
+ async_session_manager.register_request_timestamp(trace_id)
1533
+ logger.debug(f"[OpenClaw] Response event registered")
1534
+
1535
+ # Get session and send message
1536
+ logger.debug(f"[OpenClaw] Getting session...")
1537
+ agent_name = "openclaw.aid.pub"
1538
+ session_scope_key = f"{agent_name}:{user_api_key}"
1539
+ session_id = await async_session_manager.get_session(
1540
+ user_api_key,
1541
+ shared_agent_id,
1542
+ target_aid=agent_name,
1543
+ session_name="openclaw_proxy",
1544
+ session_subject="openclaw_proxy",
1545
+ session_scope_key=session_scope_key,
1546
+ )
1547
+ if not session_id:
1548
+ logger.debug(f"[OpenClaw] Failed to get session")
1549
+ return openai_error_response("Failed to get AgentCP session", error_type="api_error", status_code=503)
1550
+ logger.debug(f"[OpenClaw] Got session_id: {session_id}")
1551
+ try:
1552
+ session_data = async_session_manager._session_info.get(session_id, {})
1553
+ session_target = session_data.get("target_aid")
1554
+ session_key = session_data.get("session_key")
1555
+ session_last_used = session_data.get("last_used")
1556
+ now_ts = time.time()
1557
+ idle_seconds = (now_ts - session_last_used) if isinstance(session_last_used, (int, float)) else None
1558
+ logger.debug(
1559
+ f"[OpenClawDiag] Session detail: session_id={session_id}, "
1560
+ f"target_aid={session_target}, session_key={session_key}, "
1561
+ f"idle_seconds={idle_seconds:.2f}" if idle_seconds is not None else
1562
+ f"[OpenClawDiag] Session detail: session_id={session_id}, "
1563
+ f"target_aid={session_target}, session_key={session_key}, idle_seconds=None"
1564
+ )
1565
+ except Exception as inspect_err:
1566
+ logger.debug(f"[OpenClawDiag] Session detail inspect failed: {inspect_err}")
1567
+ logger.debug(
1568
+ f"[OpenClaw] Session state: handler_registered={session_id in async_session_manager._handler_registered}, "
1569
+ f"pending_count={len(async_session_manager._pending_requests)}"
1570
+ )
1571
+
1572
+ # All requests go to openclaw.aid.pub
1573
+ logger.debug(f"[OpenClaw] Sending message to {agent_name}...")
1574
+
1575
+ # ===== 发送前诊断:检查 SDK 内部状态 =====
1576
+ try:
1577
+ # 1. 检查 session_manager 中是否存在该 session
1578
+ sm = getattr(shared_agent_id, 'session_manager', None)
1579
+ sm_sessions = getattr(sm, 'sessions', {}) if sm else {}
1580
+ session_exists_in_sm = session_id in sm_sessions
1581
+ sm_session_count = len(sm_sessions)
1582
+
1583
+ # 2. 检查 message_client_map
1584
+ mc_map = getattr(sm, 'message_client_map', {}) if sm else {}
1585
+ mc_count = len(mc_map)
1586
+
1587
+ # 3. 检查 handler 注册状态
1588
+ handler_map = getattr(shared_agent_id, 'message_handlers_session_map', {})
1589
+ handler_bound = session_id in handler_map
1590
+ handler_map_size = len(handler_map)
1591
+ global_handlers_count = len(getattr(shared_agent_id, 'message_handlers', []))
1592
+
1593
+ # 4. 检查 WebSocket 连接状态
1594
+ ws_status = "unknown"
1595
+ mc_connection_id = "unknown"
1596
+ if sm and sm_sessions.get(session_id):
1597
+ sess_obj = sm_sessions[session_id]
1598
+ mc = getattr(sess_obj, 'message_client', None)
1599
+ if mc:
1600
+ ws_status = f"ws_open={mc._is_ws_open()}, state={getattr(mc, '_connection_state', 'N/A')}"
1601
+ mc_connection_id = getattr(mc, '_connection_id', 'N/A')
1602
+ elif mc_map:
1603
+ # session 可能还不在 sessions 中,检查 message_client_map
1604
+ for url, mc in mc_map.items():
1605
+ ws_status = f"ws_open={mc._is_ws_open()}, state={getattr(mc, '_connection_state', 'N/A')}"
1606
+ mc_connection_id = getattr(mc, '_connection_id', 'N/A')
1607
+ break
1608
+
1609
+ logger.debug(
1610
+ f"[OpenClawDiag] PRE-SEND: session_exists_in_sm={session_exists_in_sm}, "
1611
+ f"sm_session_count={sm_session_count}, mc_count={mc_count}, "
1612
+ f"handler_bound={handler_bound}, handler_map_size={handler_map_size}, "
1613
+ f"global_handlers={global_handlers_count}, "
1614
+ f"ws={ws_status}, mc_conn_id={mc_connection_id}"
1615
+ )
1616
+ except Exception as diag_err:
1617
+ logger.debug(f"[OpenClawDiag] PRE-SEND inspect failed: {diag_err}")
1618
+
1619
+ send_result = await asyncio.to_thread(
1620
+ shared_agent_id.send_message, session_id, [agent_name], proxy_message
1621
+ )
1622
+
1623
+ # ===== 发送后诊断 =====
1624
+ try:
1625
+ # 重新检查 session 是否被新建
1626
+ sm = getattr(shared_agent_id, 'session_manager', None)
1627
+ sm_sessions = getattr(sm, 'sessions', {}) if sm else {}
1628
+ session_exists_after = session_id in sm_sessions
1629
+
1630
+ # 检查 handler 是否仍然绑定
1631
+ handler_map = getattr(shared_agent_id, 'message_handlers_session_map', {})
1632
+ handler_bound_after = session_id in handler_map
1633
+
1634
+ logger.debug(
1635
+ f"[OpenClawDiag] POST-SEND: send_result={send_result}, "
1636
+ f"session_exists_after={session_exists_after}, "
1637
+ f"handler_bound_after={handler_bound_after}, "
1638
+ f"handler_map_keys={list(handler_map.keys())[:5]}"
1639
+ )
1640
+ except Exception as diag_err:
1641
+ logger.debug(f"[OpenClawDiag] POST-SEND inspect failed: {diag_err}")
1642
+
1643
+ logger.debug(
1644
+ f"[OpenClawDiag] send_message done: session_id={session_id}, target={agent_name}, "
1645
+ f"trace_id={trace_id}"
1646
+ )
1647
+
1648
+ # Wait for response (300s timeout) - 带早期诊断轮询
1649
+ try:
1650
+ # 前10秒每2秒检查一次状态,帮助诊断消息是否到达
1651
+ for _poll_i in range(5):
1652
+ try:
1653
+ await asyncio.wait_for(response_event.wait(), timeout=2)
1654
+ logger.debug(f"[OpenClaw] Response received (within {(_poll_i+1)*2}s)")
1655
+ break
1656
+ except asyncio.TimeoutError:
1657
+ # 还没收到,打印诊断
1658
+ try:
1659
+ sm = getattr(shared_agent_id, 'session_manager', None)
1660
+ sm_sessions = getattr(sm, 'sessions', {}) if sm else {}
1661
+ handler_map = getattr(shared_agent_id, 'message_handlers_session_map', {})
1662
+ # 检查 dispatch queue 状态
1663
+ dq = getattr(shared_agent_id, 'message_dispatch_queue', None)
1664
+ dq_size = dq.qsize() if dq else -1
1665
+ # 检查 metrics
1666
+ metrics = getattr(shared_agent_id, 'metrics', None)
1667
+ received = getattr(metrics, 'received_total', -1) if metrics else -1
1668
+ dispatched = getattr(metrics, 'dispatched_total', -1) if metrics else -1
1669
+ logger.debug(
1670
+ f"[OpenClawDiag] POLL {(_poll_i+1)*2}s: event_set={response_event.is_set()}, "
1671
+ f"result_exists={trace_id in async_session_manager._request_result_map}, "
1672
+ f"dispatch_queue_size={dq_size}, "
1673
+ f"sm_session_exists={session_id in sm_sessions}, "
1674
+ f"handler_bound={session_id in handler_map}, "
1675
+ f"metrics_received={received}, metrics_dispatched={dispatched}"
1676
+ )
1677
+ except Exception as poll_err:
1678
+ logger.debug(f"[OpenClawDiag] POLL {(_poll_i+1)*2}s inspect failed: {poll_err}")
1679
+ continue
1680
+ else:
1681
+ # 前10秒没收到,切换到长等待
1682
+ logger.debug(f"[OpenClawDiag] 10s内未收到响应,进入长等待 (剩余290s)...")
1683
+ await asyncio.wait_for(response_event.wait(), timeout=290)
1684
+ logger.debug(f"[OpenClaw] Response received (after >10s)")
1685
+ except asyncio.TimeoutError:
1686
+ pending_keys = list(async_session_manager._pending_requests.keys())
1687
+ result_keys = list(async_session_manager._request_result_map.keys())
1688
+ logger.debug(
1689
+ f"[OpenClaw] Request timeout, trace_id={trace_id}, "
1690
+ f"pending_count={len(pending_keys)}, result_count={len(result_keys)}"
1691
+ )
1692
+ # ===== Timeout 全面诊断 =====
1693
+ try:
1694
+ handler_map = getattr(shared_agent_id, "message_handlers_session_map", None)
1695
+ if isinstance(handler_map, dict):
1696
+ logger.debug(
1697
+ f"[OpenClaw] Timeout handler-map: map_size={len(handler_map)}, "
1698
+ f"session_bound={session_id in handler_map}"
1699
+ )
1700
+ if session_id in handler_map:
1701
+ try:
1702
+ handlers_for_session = handler_map.get(session_id, [])
1703
+ handlers_count = len(handlers_for_session) if hasattr(handlers_for_session, "__len__") else -1
1704
+ logger.debug(
1705
+ f"[OpenClawDiag] Timeout handler detail: session_id={session_id}, "
1706
+ f"handlers_count={handlers_count}"
1707
+ )
1708
+ except Exception as h_err:
1709
+ logger.debug(f"[OpenClawDiag] Timeout handler detail inspect failed: {h_err}")
1710
+ except Exception as inspect_err:
1711
+ logger.debug(f"[OpenClaw] Timeout handler-map inspect failed: {inspect_err}")
1712
+
1713
+ # SDK 内部连接 + 线程状态
1714
+ try:
1715
+ sm = getattr(shared_agent_id, 'session_manager', None)
1716
+ sm_sessions = getattr(sm, 'sessions', {}) if sm else {}
1717
+ mc_map = getattr(sm, 'message_client_map', {}) if sm else {}
1718
+
1719
+ # WebSocket 连接状态
1720
+ for url, mc in mc_map.items():
1721
+ ws_open = mc._is_ws_open()
1722
+ conn_state = getattr(mc, '_connection_state', 'N/A')
1723
+ conn_id = getattr(mc, '_connection_id', 'N/A')
1724
+ ws_thread = getattr(mc, 'ws_thread', None)
1725
+ ws_thread_alive = ws_thread.is_alive() if ws_thread else False
1726
+ pending_q = getattr(mc, '_pending_messages', None)
1727
+ pending_q_size = pending_q.qsize() if pending_q and hasattr(pending_q, 'qsize') else -1
1728
+ logger.debug(
1729
+ f"[OpenClawDiag] Timeout MC[{url[:50]}]: ws_open={ws_open}, "
1730
+ f"state={conn_state}, conn_id={conn_id}, "
1731
+ f"ws_thread_alive={ws_thread_alive}, pending_q_size={pending_q_size}"
1732
+ )
1733
+
1734
+ # Dispatcher 线程状态
1735
+ dispatcher_thread = getattr(shared_agent_id, '_message_dispatcher_thread', None)
1736
+ dispatcher_alive = dispatcher_thread.is_alive() if dispatcher_thread else False
1737
+ dq = getattr(shared_agent_id, 'message_dispatch_queue', None)
1738
+ dq_size = dq.qsize() if dq else -1
1739
+ metrics = getattr(shared_agent_id, 'metrics', None)
1740
+ received = getattr(metrics, 'received_total', -1) if metrics else -1
1741
+ dispatched = getattr(metrics, 'dispatched_total', -1) if metrics else -1
1742
+ dispatch_fail = getattr(metrics, 'dispatch_failures', -1) if metrics else -1
1743
+
1744
+ logger.debug(
1745
+ f"[OpenClawDiag] Timeout dispatcher: alive={dispatcher_alive}, "
1746
+ f"queue_size={dq_size}, received={received}, "
1747
+ f"dispatched={dispatched}, dispatch_fail={dispatch_fail}"
1748
+ )
1749
+
1750
+ # SM sessions 列表
1751
+ sm_session_ids = list(sm_sessions.keys())[:10]
1752
+ logger.debug(f"[OpenClawDiag] Timeout SM sessions: {sm_session_ids}")
1753
+ except Exception as sdk_err:
1754
+ logger.debug(f"[OpenClawDiag] Timeout SDK inspect failed: {sdk_err}")
1755
+
1756
+ openclaw_pending = [k for k in async_session_manager._pending_requests.keys() if str(k).startswith("openclaw-")]
1757
+ openclaw_results = [k for k in async_session_manager._request_result_map.keys() if str(k).startswith("openclaw-")]
1758
+ logger.debug(
1759
+ f"[OpenClawDiag] Timeout openclaw maps: pending={len(openclaw_pending)}, "
1760
+ f"results={len(openclaw_results)}, pending_sample={openclaw_pending[:10]}, "
1761
+ f"results_sample={openclaw_results[:10]}"
1762
+ )
1763
+ return openai_error_response("Request timeout", error_type="timeout", status_code=504)
1764
+
1765
+ # Get result from result_map
1766
+ logger.debug(f"[OpenClaw] Getting result from result_map...")
1767
+ result_data = async_session_manager._request_result_map.get(trace_id)
1768
+ logger.debug(
1769
+ f"[OpenClawDiag] result_map lookup: trace_id={trace_id}, "
1770
+ f"found={result_data is not None}, pending_has_trace={trace_id in async_session_manager._pending_requests}, "
1771
+ f"result_has_trace={trace_id in async_session_manager._request_result_map}"
1772
+ )
1773
+ if not result_data:
1774
+ logger.debug(f"[OpenClaw] No response received")
1775
+ openclaw_results = [k for k in async_session_manager._request_result_map.keys() if str(k).startswith("openclaw-")]
1776
+ logger.debug(
1777
+ f"[OpenClawDiag] No response map status: trace_id={trace_id}, "
1778
+ f"openclaw_result_count={len(openclaw_results)}, sample={openclaw_results[:10]}"
1779
+ )
1780
+ return openai_error_response("No response received", error_type="api_error", status_code=503)
1781
+
1782
+ result_type = result_data.get("result_type", "")
1783
+ response_msg = result_data.get("result", {})
1784
+
1785
+ logger.debug(f"[OpenClaw] Result type: {result_type}")
1786
+ logger.debug(f"[OpenClaw] Response message keys: {response_msg.keys() if isinstance(response_msg, dict) else 'not a dict'}")
1787
+ logger.debug(f"[OpenClawDiag] Raw result_data: {result_data}")
1788
+
1789
+ # Handle error response
1790
+ if result_type == "error":
1791
+ error_message, http_status = normalize_openclaw_error_message(response_msg)
1792
+
1793
+ logger.error(f"[Proxy] Error response: {error_message}, status: {http_status}")
1794
+ logger.error(
1795
+ f"[OpenClawDiag] Returning error response: trace_id={trace_id}, "
1796
+ f"message={error_message}, status={http_status}, raw_response_msg={response_msg}"
1797
+ )
1798
+ return openai_error_response(error_message, error_type="service_unavailable", status_code=http_status)
1799
+
1800
+ # Check if it's a service offline error and provide a user-friendly message
1801
+ if "不在线" in error_message or "offline" in error_message.lower():
1802
+ error_message = "模型服务维护中,请稍后重试"
1803
+ http_status = 503
1804
+
1805
+ return openai_error_response(error_message, error_type="service_unavailable", status_code=http_status)
1806
+
1807
+ # Handle streaming response
1808
+ if result_type == "text/event-stream":
1809
+ content = response_msg.get("content", {}) if isinstance(response_msg, dict) else response_msg
1810
+ stream_url = content if isinstance(content, str) else content.get("url", "")
1811
+
1812
+ logger.info(f"[Stream] Extracted stream URL: {stream_url}")
1813
+ logger.info(f"[Stream] Response message: {response_msg}")
1814
+
1815
+ if stream_url:
1816
+ return await handle_stream_response_as_openai(stream_url, model, user_api_key, is_gpt)
1817
+ else:
1818
+ return openai_error_response("Stream URL missing", error_type="api_error", status_code=500)
1819
+
1820
+ # Handle normal response
1821
+ content = response_msg.get("content", {}) if isinstance(response_msg, dict) else response_msg
1822
+ if isinstance(content, dict):
1823
+ response_body = content.get("body", {})
1824
+ if isinstance(response_body, str):
1825
+ try:
1826
+ response_body = json.loads(response_body)
1827
+ except:
1828
+ pass
1829
+ else:
1830
+ response_body = content
1831
+
1832
+ # For GPT models, convert responses format to OpenAI /v1/chat/completions format
1833
+ if is_gpt:
1834
+ try:
1835
+ openai_response = convert_response_to_openai_format(response_body, model)
1836
+ return JSONResponse(openai_response)
1837
+ except Exception as e:
1838
+ logger.error(f"Response conversion error (GPT): {e}")
1839
+ return openai_error_response(f"Response conversion failed: {str(e)}", error_type="api_error", status_code=500)
1840
+
1841
+ # Convert Claude response to OpenAI format
1842
+ try:
1843
+ openai_response = convert_claude_to_openai_response(response_body, model)
1844
+ return JSONResponse(openai_response)
1845
+ except Exception as e:
1846
+ logger.error(f"Response conversion error: {e}")
1847
+ return openai_error_response(f"Response conversion failed: {str(e)}", error_type="api_error", status_code=500)
1848
+
1849
+ except Exception as e:
1850
+ logger.error(f"Proxy error: {e}")
1851
+ import traceback
1852
+ logger.debug(traceback.format_exc())
1853
+ return openai_error_response(f"Proxy error: {str(e)}", error_type="api_error", status_code=500)
1854
+ finally:
1855
+ # Cleanup
1856
+ try:
1857
+ if async_session_manager is not None:
1858
+ async_session_manager._pending_requests.pop(trace_id, None)
1859
+ async_session_manager._request_result_map.pop(trace_id, None)
1860
+ except:
1861
+ pass