anthropic-bridge 0.1.3__tar.gz → 0.1.16__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/PKG-INFO +1 -1
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/README.md +2 -3
- anthropic_bridge-0.1.16/anthropic_bridge/providers/codex.py +662 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/pyproject.toml +1 -1
- anthropic_bridge-0.1.3/anthropic_bridge/providers/codex.py +0 -317
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/.github/workflows/cd.yml +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/.github/workflows/ci.yml +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/.gitignore +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/CLAUDE.md +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/LICENSE +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/__init__.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/__main__.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/cache.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/client.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/models.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/providers/__init__.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/providers/base.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/providers/deepseek.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/providers/gemini.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/providers/grok.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/providers/minimax.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/providers/openai.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/providers/qwen.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/providers/registry.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/server.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/anthropic_bridge/transform.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/tests/__init__.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/tests/test_integration.py +0 -0
- {anthropic_bridge-0.1.3 → anthropic_bridge-0.1.16}/uv.lock +0 -0
|
@@ -11,7 +11,7 @@ A proxy server that exposes an Anthropic Messages API-compatible endpoint while
|
|
|
11
11
|
- Support for multiple providers: Gemini, OpenAI, Grok, DeepSeek, Qwen, MiniMax
|
|
12
12
|
- Extended thinking/reasoning support for compatible models
|
|
13
13
|
- Reasoning cache for Gemini models across tool call rounds
|
|
14
|
-
-
|
|
14
|
+
- Codex CLI integration - Use OpenAI's Codex models with your ChatGPT subscription
|
|
15
15
|
|
|
16
16
|
## Installation
|
|
17
17
|
|
|
@@ -98,7 +98,6 @@ Append reasoning level suffix to control reasoning effort:
|
|
|
98
98
|
| `codex/gpt-5.2-codex:high` | High reasoning effort |
|
|
99
99
|
| `codex/gpt-5.2-codex:xhigh` | Extra high reasoning effort |
|
|
100
100
|
| `codex/gpt-5.2` | GPT-5.2 base model |
|
|
101
|
-
| `codex/o3` | O3 model |
|
|
102
101
|
|
|
103
102
|
## API Endpoints
|
|
104
103
|
|
|
@@ -129,7 +128,7 @@ Append reasoning level suffix to control reasoning effort:
|
|
|
129
128
|
|
|
130
129
|
### Codex CLI (via ChatGPT subscription)
|
|
131
130
|
|
|
132
|
-
- **Codex** (`codex/*`) - GPT-5.2, GPT-5.2-Codex
|
|
131
|
+
- **Codex** (`codex/*`) - GPT-5.2, GPT-5.2-Codex with reasoning levels
|
|
133
132
|
|
|
134
133
|
### OpenRouter
|
|
135
134
|
|
|
@@ -0,0 +1,662 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import contextlib
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
import os
|
|
6
|
+
import pty
|
|
7
|
+
import random
|
|
8
|
+
import string
|
|
9
|
+
import time
|
|
10
|
+
from collections.abc import AsyncIterator
|
|
11
|
+
from typing import Any, Callable
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
class PtyLineReader:
|
|
16
|
+
def __init__(self, queue: "asyncio.Queue[bytes]") -> None:
|
|
17
|
+
self._queue = queue
|
|
18
|
+
self._buffer = b""
|
|
19
|
+
|
|
20
|
+
async def readline(self) -> bytes:
|
|
21
|
+
while True:
|
|
22
|
+
newline = self._buffer.find(b"\n")
|
|
23
|
+
if newline != -1:
|
|
24
|
+
line = self._buffer[: newline + 1]
|
|
25
|
+
self._buffer = self._buffer[newline + 1 :]
|
|
26
|
+
return line
|
|
27
|
+
|
|
28
|
+
chunk = await self._queue.get()
|
|
29
|
+
if not chunk:
|
|
30
|
+
if self._buffer:
|
|
31
|
+
line = self._buffer
|
|
32
|
+
self._buffer = b""
|
|
33
|
+
return line
|
|
34
|
+
return b""
|
|
35
|
+
self._buffer += chunk
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class CodexClient:
|
|
39
|
+
def __init__(self, target_model: str):
|
|
40
|
+
model_str = target_model.removeprefix("codex/")
|
|
41
|
+
if ":" in model_str:
|
|
42
|
+
model, level = model_str.rsplit(":", 1)
|
|
43
|
+
self.target_model = model
|
|
44
|
+
self.reasoning_level: str | None = level
|
|
45
|
+
else:
|
|
46
|
+
self.target_model = model_str
|
|
47
|
+
self.reasoning_level = None
|
|
48
|
+
self._tool_counter = 0
|
|
49
|
+
self._active_tools: dict[str, str] = {}
|
|
50
|
+
self._request_id = 0
|
|
51
|
+
|
|
52
|
+
def _next_tool_id(self, prefix: str) -> str:
|
|
53
|
+
self._tool_counter += 1
|
|
54
|
+
return f"{prefix}_{self._tool_counter}_{self._random_id()}"
|
|
55
|
+
|
|
56
|
+
def _next_request_id(self) -> int:
|
|
57
|
+
self._request_id += 1
|
|
58
|
+
return self._request_id
|
|
59
|
+
|
|
60
|
+
def _tool_marker(self, marker_type: str, payload: dict[str, Any]) -> str:
|
|
61
|
+
return f"<!--CODEX_TOOL_{marker_type}:{json.dumps(payload)}-->"
|
|
62
|
+
|
|
63
|
+
async def handle(self, payload: dict[str, Any]) -> AsyncIterator[str]:
|
|
64
|
+
prompt = self._extract_prompt(payload)
|
|
65
|
+
msg_id = f"msg_{int(time.time())}_{self._random_id()}"
|
|
66
|
+
|
|
67
|
+
yield self._sse(
|
|
68
|
+
"message_start",
|
|
69
|
+
{
|
|
70
|
+
"type": "message_start",
|
|
71
|
+
"message": {
|
|
72
|
+
"id": msg_id,
|
|
73
|
+
"type": "message",
|
|
74
|
+
"role": "assistant",
|
|
75
|
+
"content": [],
|
|
76
|
+
"model": self.target_model,
|
|
77
|
+
"stop_reason": None,
|
|
78
|
+
"stop_sequence": None,
|
|
79
|
+
"usage": {"input_tokens": 0, "output_tokens": 0},
|
|
80
|
+
},
|
|
81
|
+
},
|
|
82
|
+
)
|
|
83
|
+
yield self._sse("ping", {"type": "ping"})
|
|
84
|
+
|
|
85
|
+
text_started = False
|
|
86
|
+
text_idx = 0
|
|
87
|
+
thinking_started = False
|
|
88
|
+
thinking_idx = -1
|
|
89
|
+
cur_idx = 0
|
|
90
|
+
usage: dict[str, int] = {"input_tokens": 0, "output_tokens": 0}
|
|
91
|
+
|
|
92
|
+
master_fd, slave_fd = pty.openpty()
|
|
93
|
+
proc = await asyncio.create_subprocess_exec(
|
|
94
|
+
"codex",
|
|
95
|
+
"app-server",
|
|
96
|
+
stdin=slave_fd,
|
|
97
|
+
stdout=slave_fd,
|
|
98
|
+
stderr=slave_fd,
|
|
99
|
+
)
|
|
100
|
+
os.close(slave_fd)
|
|
101
|
+
|
|
102
|
+
output_queue: asyncio.Queue[bytes] = asyncio.Queue(maxsize=256)
|
|
103
|
+
|
|
104
|
+
async def read_master() -> None:
|
|
105
|
+
loop = asyncio.get_running_loop()
|
|
106
|
+
try:
|
|
107
|
+
while True:
|
|
108
|
+
data = await loop.run_in_executor(None, os.read, master_fd, 4096)
|
|
109
|
+
if not data:
|
|
110
|
+
break
|
|
111
|
+
await output_queue.put(data)
|
|
112
|
+
finally:
|
|
113
|
+
await output_queue.put(b"")
|
|
114
|
+
|
|
115
|
+
reader_task = asyncio.create_task(read_master())
|
|
116
|
+
line_reader = PtyLineReader(output_queue)
|
|
117
|
+
writer: Callable[[bytes], None] = lambda payload: os.write(master_fd, payload)
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
init_id = self._next_request_id()
|
|
121
|
+
await self._send_request(writer, init_id, "initialize", {
|
|
122
|
+
"clientInfo": {"name": "anthropic_bridge", "version": "1.0.0"}
|
|
123
|
+
})
|
|
124
|
+
await self._read_response(line_reader, init_id)
|
|
125
|
+
await self._send_notification(writer, "initialized", {})
|
|
126
|
+
|
|
127
|
+
thread_id_req = self._next_request_id()
|
|
128
|
+
thread_params: dict[str, Any] = {
|
|
129
|
+
"approvalPolicy": "never",
|
|
130
|
+
"sandbox": "danger-full-access",
|
|
131
|
+
}
|
|
132
|
+
if self.target_model and self.target_model.lower() != "default":
|
|
133
|
+
thread_params["model"] = self.target_model
|
|
134
|
+
if self.reasoning_level:
|
|
135
|
+
thread_params["effort"] = self.reasoning_level
|
|
136
|
+
|
|
137
|
+
await self._send_request(writer, thread_id_req, "thread/start", thread_params)
|
|
138
|
+
thread_resp = await self._read_response(line_reader, thread_id_req)
|
|
139
|
+
thread_id = thread_resp.get("result", {}).get("thread", {}).get("id")
|
|
140
|
+
|
|
141
|
+
if not thread_id:
|
|
142
|
+
raise RuntimeError(f"Failed to start thread: {thread_resp}")
|
|
143
|
+
|
|
144
|
+
await self._read_until_method(line_reader, "thread/started")
|
|
145
|
+
|
|
146
|
+
turn_id_req = self._next_request_id()
|
|
147
|
+
await self._send_request(writer, turn_id_req, "turn/start", {
|
|
148
|
+
"threadId": thread_id,
|
|
149
|
+
"input": [{"type": "text", "text": prompt}]
|
|
150
|
+
})
|
|
151
|
+
|
|
152
|
+
async for msg in self._read_notifications(line_reader):
|
|
153
|
+
method = msg.get("method", "")
|
|
154
|
+
params = msg.get("params", {})
|
|
155
|
+
|
|
156
|
+
if method == "item/agentMessage/delta":
|
|
157
|
+
delta = params.get("delta", "")
|
|
158
|
+
if delta:
|
|
159
|
+
if not text_started:
|
|
160
|
+
if thinking_started:
|
|
161
|
+
yield self._sse(
|
|
162
|
+
"content_block_delta",
|
|
163
|
+
{
|
|
164
|
+
"type": "content_block_delta",
|
|
165
|
+
"index": thinking_idx,
|
|
166
|
+
"delta": {"type": "signature_delta", "signature": ""},
|
|
167
|
+
},
|
|
168
|
+
)
|
|
169
|
+
yield self._sse(
|
|
170
|
+
"content_block_stop",
|
|
171
|
+
{"type": "content_block_stop", "index": thinking_idx},
|
|
172
|
+
)
|
|
173
|
+
thinking_started = False
|
|
174
|
+
|
|
175
|
+
text_idx = cur_idx
|
|
176
|
+
cur_idx += 1
|
|
177
|
+
yield self._sse(
|
|
178
|
+
"content_block_start",
|
|
179
|
+
{
|
|
180
|
+
"type": "content_block_start",
|
|
181
|
+
"index": text_idx,
|
|
182
|
+
"content_block": {"type": "text", "text": ""},
|
|
183
|
+
},
|
|
184
|
+
)
|
|
185
|
+
text_started = True
|
|
186
|
+
|
|
187
|
+
yield self._sse(
|
|
188
|
+
"content_block_delta",
|
|
189
|
+
{
|
|
190
|
+
"type": "content_block_delta",
|
|
191
|
+
"index": text_idx,
|
|
192
|
+
"delta": {"type": "text_delta", "text": delta},
|
|
193
|
+
},
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
elif method == "item/reasoning/summaryTextDelta":
|
|
197
|
+
delta = params.get("delta", "")
|
|
198
|
+
if delta:
|
|
199
|
+
if not thinking_started:
|
|
200
|
+
thinking_idx = cur_idx
|
|
201
|
+
cur_idx += 1
|
|
202
|
+
yield self._sse(
|
|
203
|
+
"content_block_start",
|
|
204
|
+
{
|
|
205
|
+
"type": "content_block_start",
|
|
206
|
+
"index": thinking_idx,
|
|
207
|
+
"content_block": {
|
|
208
|
+
"type": "thinking",
|
|
209
|
+
"thinking": "",
|
|
210
|
+
"signature": "",
|
|
211
|
+
},
|
|
212
|
+
},
|
|
213
|
+
)
|
|
214
|
+
thinking_started = True
|
|
215
|
+
|
|
216
|
+
yield self._sse(
|
|
217
|
+
"content_block_delta",
|
|
218
|
+
{
|
|
219
|
+
"type": "content_block_delta",
|
|
220
|
+
"index": thinking_idx,
|
|
221
|
+
"delta": {"type": "thinking_delta", "thinking": delta},
|
|
222
|
+
},
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
elif method == "item/started":
|
|
226
|
+
item = params.get("item", {})
|
|
227
|
+
item_type = item.get("type", "")
|
|
228
|
+
item_id = item.get("id", "")
|
|
229
|
+
|
|
230
|
+
if item_type == "commandExecution":
|
|
231
|
+
command = item.get("command", "")
|
|
232
|
+
tool_id = self._next_tool_id("codex_cmd")
|
|
233
|
+
self._active_tools[item_id] = tool_id
|
|
234
|
+
if not text_started:
|
|
235
|
+
if thinking_started:
|
|
236
|
+
yield self._sse(
|
|
237
|
+
"content_block_delta",
|
|
238
|
+
{
|
|
239
|
+
"type": "content_block_delta",
|
|
240
|
+
"index": thinking_idx,
|
|
241
|
+
"delta": {"type": "signature_delta", "signature": ""},
|
|
242
|
+
},
|
|
243
|
+
)
|
|
244
|
+
yield self._sse(
|
|
245
|
+
"content_block_stop",
|
|
246
|
+
{"type": "content_block_stop", "index": thinking_idx},
|
|
247
|
+
)
|
|
248
|
+
thinking_started = False
|
|
249
|
+
text_idx = cur_idx
|
|
250
|
+
cur_idx += 1
|
|
251
|
+
yield self._sse(
|
|
252
|
+
"content_block_start",
|
|
253
|
+
{
|
|
254
|
+
"type": "content_block_start",
|
|
255
|
+
"index": text_idx,
|
|
256
|
+
"content_block": {"type": "text", "text": ""},
|
|
257
|
+
},
|
|
258
|
+
)
|
|
259
|
+
text_started = True
|
|
260
|
+
marker = self._tool_marker(
|
|
261
|
+
"START",
|
|
262
|
+
{"id": tool_id, "name": "CodexCommand", "input": {"command": command}},
|
|
263
|
+
)
|
|
264
|
+
yield self._sse(
|
|
265
|
+
"content_block_delta",
|
|
266
|
+
{
|
|
267
|
+
"type": "content_block_delta",
|
|
268
|
+
"index": text_idx,
|
|
269
|
+
"delta": {"type": "text_delta", "text": marker},
|
|
270
|
+
},
|
|
271
|
+
)
|
|
272
|
+
yield self._sse("ping", {"type": "ping"})
|
|
273
|
+
|
|
274
|
+
elif item_type == "fileChange":
|
|
275
|
+
changes = item.get("changes", [])
|
|
276
|
+
for change in changes:
|
|
277
|
+
if not isinstance(change, dict):
|
|
278
|
+
continue
|
|
279
|
+
path = change.get("path", "")
|
|
280
|
+
kind = change.get("kind", "update")
|
|
281
|
+
tool_id = self._next_tool_id("codex_file")
|
|
282
|
+
self._active_tools[f"{item_id}:{path}"] = tool_id
|
|
283
|
+
tool_name = "Write" if kind == "add" else "Edit"
|
|
284
|
+
if not text_started:
|
|
285
|
+
text_idx = cur_idx
|
|
286
|
+
cur_idx += 1
|
|
287
|
+
yield self._sse(
|
|
288
|
+
"content_block_start",
|
|
289
|
+
{
|
|
290
|
+
"type": "content_block_start",
|
|
291
|
+
"index": text_idx,
|
|
292
|
+
"content_block": {"type": "text", "text": ""},
|
|
293
|
+
},
|
|
294
|
+
)
|
|
295
|
+
text_started = True
|
|
296
|
+
marker = self._tool_marker(
|
|
297
|
+
"START",
|
|
298
|
+
{"id": tool_id, "name": tool_name, "input": {"file_path": path}},
|
|
299
|
+
)
|
|
300
|
+
yield self._sse(
|
|
301
|
+
"content_block_delta",
|
|
302
|
+
{
|
|
303
|
+
"type": "content_block_delta",
|
|
304
|
+
"index": text_idx,
|
|
305
|
+
"delta": {"type": "text_delta", "text": marker},
|
|
306
|
+
},
|
|
307
|
+
)
|
|
308
|
+
yield self._sse("ping", {"type": "ping"})
|
|
309
|
+
|
|
310
|
+
elif item_type == "mcpToolCall":
|
|
311
|
+
tool_name = item.get("tool", "mcp_tool")
|
|
312
|
+
args = item.get("arguments", {})
|
|
313
|
+
tool_id = self._next_tool_id("codex_mcp")
|
|
314
|
+
self._active_tools[item_id] = tool_id
|
|
315
|
+
if not text_started:
|
|
316
|
+
text_idx = cur_idx
|
|
317
|
+
cur_idx += 1
|
|
318
|
+
yield self._sse(
|
|
319
|
+
"content_block_start",
|
|
320
|
+
{
|
|
321
|
+
"type": "content_block_start",
|
|
322
|
+
"index": text_idx,
|
|
323
|
+
"content_block": {"type": "text", "text": ""},
|
|
324
|
+
},
|
|
325
|
+
)
|
|
326
|
+
text_started = True
|
|
327
|
+
mcp_input = dict(args) if isinstance(args, dict) else {}
|
|
328
|
+
marker = self._tool_marker(
|
|
329
|
+
"START",
|
|
330
|
+
{"id": tool_id, "name": tool_name, "input": mcp_input},
|
|
331
|
+
)
|
|
332
|
+
yield self._sse(
|
|
333
|
+
"content_block_delta",
|
|
334
|
+
{
|
|
335
|
+
"type": "content_block_delta",
|
|
336
|
+
"index": text_idx,
|
|
337
|
+
"delta": {"type": "text_delta", "text": marker},
|
|
338
|
+
},
|
|
339
|
+
)
|
|
340
|
+
yield self._sse("ping", {"type": "ping"})
|
|
341
|
+
|
|
342
|
+
elif item_type == "webSearch":
|
|
343
|
+
query = item.get("query", "")
|
|
344
|
+
tool_id = self._next_tool_id("codex_search")
|
|
345
|
+
self._active_tools[item_id] = tool_id
|
|
346
|
+
if not text_started:
|
|
347
|
+
text_idx = cur_idx
|
|
348
|
+
cur_idx += 1
|
|
349
|
+
yield self._sse(
|
|
350
|
+
"content_block_start",
|
|
351
|
+
{
|
|
352
|
+
"type": "content_block_start",
|
|
353
|
+
"index": text_idx,
|
|
354
|
+
"content_block": {"type": "text", "text": ""},
|
|
355
|
+
},
|
|
356
|
+
)
|
|
357
|
+
text_started = True
|
|
358
|
+
marker = self._tool_marker(
|
|
359
|
+
"START",
|
|
360
|
+
{"id": tool_id, "name": "WebSearch", "input": {"query": query}},
|
|
361
|
+
)
|
|
362
|
+
yield self._sse(
|
|
363
|
+
"content_block_delta",
|
|
364
|
+
{
|
|
365
|
+
"type": "content_block_delta",
|
|
366
|
+
"index": text_idx,
|
|
367
|
+
"delta": {"type": "text_delta", "text": marker},
|
|
368
|
+
},
|
|
369
|
+
)
|
|
370
|
+
yield self._sse("ping", {"type": "ping"})
|
|
371
|
+
|
|
372
|
+
elif method == "item/completed":
|
|
373
|
+
item = params.get("item", {})
|
|
374
|
+
item_type = item.get("type", "")
|
|
375
|
+
item_id = item.get("id", "")
|
|
376
|
+
|
|
377
|
+
if item_type == "commandExecution":
|
|
378
|
+
if item_id in self._active_tools:
|
|
379
|
+
tool_id = self._active_tools.pop(item_id)
|
|
380
|
+
output = item.get("aggregatedOutput", "")
|
|
381
|
+
exit_code = item.get("exitCode", 0)
|
|
382
|
+
if not text_started:
|
|
383
|
+
text_idx = cur_idx
|
|
384
|
+
cur_idx += 1
|
|
385
|
+
yield self._sse(
|
|
386
|
+
"content_block_start",
|
|
387
|
+
{
|
|
388
|
+
"type": "content_block_start",
|
|
389
|
+
"index": text_idx,
|
|
390
|
+
"content_block": {"type": "text", "text": ""},
|
|
391
|
+
},
|
|
392
|
+
)
|
|
393
|
+
text_started = True
|
|
394
|
+
marker = self._tool_marker(
|
|
395
|
+
"RESULT",
|
|
396
|
+
{"id": tool_id, "output": output, "exit_code": exit_code},
|
|
397
|
+
)
|
|
398
|
+
yield self._sse(
|
|
399
|
+
"content_block_delta",
|
|
400
|
+
{
|
|
401
|
+
"type": "content_block_delta",
|
|
402
|
+
"index": text_idx,
|
|
403
|
+
"delta": {"type": "text_delta", "text": marker},
|
|
404
|
+
},
|
|
405
|
+
)
|
|
406
|
+
yield self._sse("ping", {"type": "ping"})
|
|
407
|
+
|
|
408
|
+
elif item_type == "fileChange":
|
|
409
|
+
changes = item.get("changes", [])
|
|
410
|
+
for change in changes:
|
|
411
|
+
if not isinstance(change, dict):
|
|
412
|
+
continue
|
|
413
|
+
path = change.get("path", "")
|
|
414
|
+
key = f"{item_id}:{path}"
|
|
415
|
+
if key in self._active_tools:
|
|
416
|
+
tool_id = self._active_tools.pop(key)
|
|
417
|
+
marker = self._tool_marker("RESULT", {"id": tool_id})
|
|
418
|
+
yield self._sse(
|
|
419
|
+
"content_block_delta",
|
|
420
|
+
{
|
|
421
|
+
"type": "content_block_delta",
|
|
422
|
+
"index": text_idx,
|
|
423
|
+
"delta": {"type": "text_delta", "text": marker},
|
|
424
|
+
},
|
|
425
|
+
)
|
|
426
|
+
yield self._sse("ping", {"type": "ping"})
|
|
427
|
+
|
|
428
|
+
elif item_type == "mcpToolCall":
|
|
429
|
+
if item_id in self._active_tools:
|
|
430
|
+
tool_id = self._active_tools.pop(item_id)
|
|
431
|
+
result = item.get("result", {})
|
|
432
|
+
output = result.get("content", "") if result else ""
|
|
433
|
+
if not text_started:
|
|
434
|
+
text_idx = cur_idx
|
|
435
|
+
cur_idx += 1
|
|
436
|
+
yield self._sse(
|
|
437
|
+
"content_block_start",
|
|
438
|
+
{
|
|
439
|
+
"type": "content_block_start",
|
|
440
|
+
"index": text_idx,
|
|
441
|
+
"content_block": {"type": "text", "text": ""},
|
|
442
|
+
},
|
|
443
|
+
)
|
|
444
|
+
text_started = True
|
|
445
|
+
marker = self._tool_marker(
|
|
446
|
+
"RESULT",
|
|
447
|
+
{"id": tool_id, "output": output},
|
|
448
|
+
)
|
|
449
|
+
yield self._sse(
|
|
450
|
+
"content_block_delta",
|
|
451
|
+
{
|
|
452
|
+
"type": "content_block_delta",
|
|
453
|
+
"index": text_idx,
|
|
454
|
+
"delta": {"type": "text_delta", "text": marker},
|
|
455
|
+
},
|
|
456
|
+
)
|
|
457
|
+
yield self._sse("ping", {"type": "ping"})
|
|
458
|
+
|
|
459
|
+
elif item_type == "webSearch":
|
|
460
|
+
if item_id in self._active_tools:
|
|
461
|
+
tool_id = self._active_tools.pop(item_id)
|
|
462
|
+
if not text_started:
|
|
463
|
+
text_idx = cur_idx
|
|
464
|
+
cur_idx += 1
|
|
465
|
+
yield self._sse(
|
|
466
|
+
"content_block_start",
|
|
467
|
+
{
|
|
468
|
+
"type": "content_block_start",
|
|
469
|
+
"index": text_idx,
|
|
470
|
+
"content_block": {"type": "text", "text": ""},
|
|
471
|
+
},
|
|
472
|
+
)
|
|
473
|
+
text_started = True
|
|
474
|
+
marker = self._tool_marker(
|
|
475
|
+
"RESULT",
|
|
476
|
+
{"id": tool_id, "output": "search completed"},
|
|
477
|
+
)
|
|
478
|
+
yield self._sse(
|
|
479
|
+
"content_block_delta",
|
|
480
|
+
{
|
|
481
|
+
"type": "content_block_delta",
|
|
482
|
+
"index": text_idx,
|
|
483
|
+
"delta": {"type": "text_delta", "text": marker},
|
|
484
|
+
},
|
|
485
|
+
)
|
|
486
|
+
yield self._sse("ping", {"type": "ping"})
|
|
487
|
+
|
|
488
|
+
elif method == "turn/completed":
|
|
489
|
+
break
|
|
490
|
+
|
|
491
|
+
elif method == "thread/tokenUsage/updated":
|
|
492
|
+
token_usage = params.get("tokenUsage", {})
|
|
493
|
+
usage["input_tokens"] = token_usage.get("inputTokens", 0)
|
|
494
|
+
usage["output_tokens"] = token_usage.get("outputTokens", 0)
|
|
495
|
+
|
|
496
|
+
elif method == "error":
|
|
497
|
+
error_msg = params.get("error", {}).get("message", "Unknown error")
|
|
498
|
+
if not text_started:
|
|
499
|
+
text_idx = cur_idx
|
|
500
|
+
cur_idx += 1
|
|
501
|
+
yield self._sse(
|
|
502
|
+
"content_block_start",
|
|
503
|
+
{
|
|
504
|
+
"type": "content_block_start",
|
|
505
|
+
"index": text_idx,
|
|
506
|
+
"content_block": {"type": "text", "text": ""},
|
|
507
|
+
},
|
|
508
|
+
)
|
|
509
|
+
text_started = True
|
|
510
|
+
|
|
511
|
+
yield self._sse(
|
|
512
|
+
"content_block_delta",
|
|
513
|
+
{
|
|
514
|
+
"type": "content_block_delta",
|
|
515
|
+
"index": text_idx,
|
|
516
|
+
"delta": {
|
|
517
|
+
"type": "text_delta",
|
|
518
|
+
"text": f"Error: {error_msg}",
|
|
519
|
+
},
|
|
520
|
+
},
|
|
521
|
+
)
|
|
522
|
+
|
|
523
|
+
finally:
|
|
524
|
+
reader_task.cancel()
|
|
525
|
+
with contextlib.suppress(asyncio.CancelledError):
|
|
526
|
+
await reader_task
|
|
527
|
+
with contextlib.suppress(OSError):
|
|
528
|
+
os.close(master_fd)
|
|
529
|
+
if proc.returncode is None:
|
|
530
|
+
proc.terminate()
|
|
531
|
+
await proc.wait()
|
|
532
|
+
|
|
533
|
+
if thinking_started:
|
|
534
|
+
yield self._sse(
|
|
535
|
+
"content_block_delta",
|
|
536
|
+
{
|
|
537
|
+
"type": "content_block_delta",
|
|
538
|
+
"index": thinking_idx,
|
|
539
|
+
"delta": {"type": "signature_delta", "signature": ""},
|
|
540
|
+
},
|
|
541
|
+
)
|
|
542
|
+
yield self._sse(
|
|
543
|
+
"content_block_stop",
|
|
544
|
+
{"type": "content_block_stop", "index": thinking_idx},
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
if text_started:
|
|
548
|
+
yield self._sse(
|
|
549
|
+
"content_block_stop", {"type": "content_block_stop", "index": text_idx}
|
|
550
|
+
)
|
|
551
|
+
|
|
552
|
+
yield self._sse(
|
|
553
|
+
"message_delta",
|
|
554
|
+
{
|
|
555
|
+
"type": "message_delta",
|
|
556
|
+
"delta": {"stop_reason": "end_turn", "stop_sequence": None},
|
|
557
|
+
"usage": usage,
|
|
558
|
+
},
|
|
559
|
+
)
|
|
560
|
+
yield self._sse("message_stop", {"type": "message_stop"})
|
|
561
|
+
yield "data: [DONE]\n\n"
|
|
562
|
+
|
|
563
|
+
async def _send_request(
|
|
564
|
+
self,
|
|
565
|
+
write_fn: Callable[[bytes], None],
|
|
566
|
+
req_id: int,
|
|
567
|
+
method: str,
|
|
568
|
+
params: dict[str, Any],
|
|
569
|
+
) -> None:
|
|
570
|
+
msg = {"method": method, "id": req_id, "params": params}
|
|
571
|
+
line = json.dumps(msg) + "\n"
|
|
572
|
+
await asyncio.to_thread(write_fn, line.encode())
|
|
573
|
+
|
|
574
|
+
async def _send_notification(
|
|
575
|
+
self,
|
|
576
|
+
write_fn: Callable[[bytes], None],
|
|
577
|
+
method: str,
|
|
578
|
+
params: dict[str, Any],
|
|
579
|
+
) -> None:
|
|
580
|
+
msg = {"method": method, "params": params}
|
|
581
|
+
line = json.dumps(msg) + "\n"
|
|
582
|
+
await asyncio.to_thread(write_fn, line.encode())
|
|
583
|
+
|
|
584
|
+
async def _read_response(
|
|
585
|
+
self,
|
|
586
|
+
reader: PtyLineReader,
|
|
587
|
+
expected_id: int,
|
|
588
|
+
) -> dict[str, Any]:
|
|
589
|
+
while True:
|
|
590
|
+
line = await reader.readline()
|
|
591
|
+
if not line:
|
|
592
|
+
return {}
|
|
593
|
+
try:
|
|
594
|
+
msg: dict[str, Any] = json.loads(line.decode())
|
|
595
|
+
if msg.get("id") == expected_id:
|
|
596
|
+
return msg
|
|
597
|
+
except json.JSONDecodeError:
|
|
598
|
+
continue
|
|
599
|
+
|
|
600
|
+
async def _read_until_method(
|
|
601
|
+
self,
|
|
602
|
+
reader: PtyLineReader,
|
|
603
|
+
target_method: str,
|
|
604
|
+
) -> dict[str, Any]:
|
|
605
|
+
while True:
|
|
606
|
+
line = await reader.readline()
|
|
607
|
+
if not line:
|
|
608
|
+
return {}
|
|
609
|
+
try:
|
|
610
|
+
msg: dict[str, Any] = json.loads(line.decode())
|
|
611
|
+
if msg.get("method") == target_method:
|
|
612
|
+
return msg
|
|
613
|
+
except json.JSONDecodeError:
|
|
614
|
+
continue
|
|
615
|
+
|
|
616
|
+
async def _read_notifications(
|
|
617
|
+
self,
|
|
618
|
+
reader: PtyLineReader,
|
|
619
|
+
) -> AsyncIterator[dict[str, Any]]:
|
|
620
|
+
while True:
|
|
621
|
+
try:
|
|
622
|
+
line = await asyncio.wait_for(reader.readline(), timeout=300)
|
|
623
|
+
if not line:
|
|
624
|
+
break
|
|
625
|
+
msg = json.loads(line.decode())
|
|
626
|
+
yield msg
|
|
627
|
+
if msg.get("method") == "turn/completed":
|
|
628
|
+
break
|
|
629
|
+
except asyncio.TimeoutError:
|
|
630
|
+
logger.warning("Timeout waiting for Codex response")
|
|
631
|
+
break
|
|
632
|
+
except json.JSONDecodeError as e:
|
|
633
|
+
logger.warning("Failed to parse Codex JSON: %s", e)
|
|
634
|
+
continue
|
|
635
|
+
|
|
636
|
+
def _extract_prompt(self, payload: dict[str, Any]) -> str:
|
|
637
|
+
messages = payload.get("messages", [])
|
|
638
|
+
parts: list[str] = []
|
|
639
|
+
|
|
640
|
+
for msg in messages:
|
|
641
|
+
role = msg.get("role", "user")
|
|
642
|
+
content = msg.get("content", "")
|
|
643
|
+
|
|
644
|
+
if isinstance(content, list):
|
|
645
|
+
text_parts = []
|
|
646
|
+
for item in content:
|
|
647
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
|
648
|
+
text_parts.append(item.get("text", ""))
|
|
649
|
+
elif isinstance(item, str):
|
|
650
|
+
text_parts.append(item)
|
|
651
|
+
content = "\n".join(text_parts)
|
|
652
|
+
|
|
653
|
+
if content:
|
|
654
|
+
parts.append(f"{role.capitalize()}: {content}")
|
|
655
|
+
|
|
656
|
+
return "\n\n".join(parts)
|
|
657
|
+
|
|
658
|
+
def _sse(self, event: str, data: dict[str, Any]) -> str:
|
|
659
|
+
return f"event: {event}\ndata: {json.dumps(data)}\n\n"
|
|
660
|
+
|
|
661
|
+
def _random_id(self) -> str:
|
|
662
|
+
return "".join(random.choices(string.ascii_lowercase + string.digits, k=12))
|
|
@@ -1,317 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import json
|
|
3
|
-
import random
|
|
4
|
-
import string
|
|
5
|
-
import time
|
|
6
|
-
from collections.abc import AsyncIterator
|
|
7
|
-
from typing import Any
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class CodexClient:
|
|
11
|
-
def __init__(self, target_model: str):
|
|
12
|
-
model_str = target_model.removeprefix("codex/")
|
|
13
|
-
# Parse reasoning level suffix (e.g., "gpt-5.2-codex:high")
|
|
14
|
-
if ":" in model_str:
|
|
15
|
-
model, level = model_str.rsplit(":", 1)
|
|
16
|
-
self.target_model = model
|
|
17
|
-
self.reasoning_level: str | None = level
|
|
18
|
-
else:
|
|
19
|
-
self.target_model = model_str
|
|
20
|
-
self.reasoning_level = None
|
|
21
|
-
|
|
22
|
-
async def handle(self, payload: dict[str, Any]) -> AsyncIterator[str]:
|
|
23
|
-
prompt = self._extract_prompt(payload)
|
|
24
|
-
msg_id = f"msg_{int(time.time())}_{self._random_id()}"
|
|
25
|
-
|
|
26
|
-
yield self._sse(
|
|
27
|
-
"message_start",
|
|
28
|
-
{
|
|
29
|
-
"type": "message_start",
|
|
30
|
-
"message": {
|
|
31
|
-
"id": msg_id,
|
|
32
|
-
"type": "message",
|
|
33
|
-
"role": "assistant",
|
|
34
|
-
"content": [],
|
|
35
|
-
"model": self.target_model,
|
|
36
|
-
"stop_reason": None,
|
|
37
|
-
"stop_sequence": None,
|
|
38
|
-
"usage": {"input_tokens": 0, "output_tokens": 0},
|
|
39
|
-
},
|
|
40
|
-
},
|
|
41
|
-
)
|
|
42
|
-
yield self._sse("ping", {"type": "ping"})
|
|
43
|
-
|
|
44
|
-
text_started = False
|
|
45
|
-
text_idx = 0
|
|
46
|
-
thinking_started = False
|
|
47
|
-
thinking_idx = -1
|
|
48
|
-
cur_idx = 0
|
|
49
|
-
usage: dict[str, int] = {"input_tokens": 0, "output_tokens": 0}
|
|
50
|
-
|
|
51
|
-
cmd = [
|
|
52
|
-
"codex",
|
|
53
|
-
"exec",
|
|
54
|
-
"--json",
|
|
55
|
-
"--skip-git-repo-check",
|
|
56
|
-
"--dangerously-bypass-approvals-and-sandbox",
|
|
57
|
-
"--search",
|
|
58
|
-
"-m",
|
|
59
|
-
self.target_model,
|
|
60
|
-
]
|
|
61
|
-
if self.reasoning_level:
|
|
62
|
-
cmd.extend(["-c", f"reasoning_effort={self.reasoning_level}"])
|
|
63
|
-
cmd.append(prompt)
|
|
64
|
-
|
|
65
|
-
proc = await asyncio.create_subprocess_exec(
|
|
66
|
-
*cmd,
|
|
67
|
-
stdout=asyncio.subprocess.PIPE,
|
|
68
|
-
stderr=asyncio.subprocess.PIPE,
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
try:
|
|
72
|
-
async for line in self._read_lines(proc.stdout):
|
|
73
|
-
if not line.strip():
|
|
74
|
-
continue
|
|
75
|
-
|
|
76
|
-
try:
|
|
77
|
-
event = json.loads(line)
|
|
78
|
-
except json.JSONDecodeError:
|
|
79
|
-
continue
|
|
80
|
-
|
|
81
|
-
event_type = event.get("type", "")
|
|
82
|
-
|
|
83
|
-
if event_type == "item.started":
|
|
84
|
-
item = event.get("item", {})
|
|
85
|
-
item_type = item.get("type", "")
|
|
86
|
-
|
|
87
|
-
if item_type == "reasoning" and not thinking_started:
|
|
88
|
-
thinking_idx = cur_idx
|
|
89
|
-
cur_idx += 1
|
|
90
|
-
yield self._sse(
|
|
91
|
-
"content_block_start",
|
|
92
|
-
{
|
|
93
|
-
"type": "content_block_start",
|
|
94
|
-
"index": thinking_idx,
|
|
95
|
-
"content_block": {
|
|
96
|
-
"type": "thinking",
|
|
97
|
-
"thinking": "",
|
|
98
|
-
"signature": "",
|
|
99
|
-
},
|
|
100
|
-
},
|
|
101
|
-
)
|
|
102
|
-
thinking_started = True
|
|
103
|
-
|
|
104
|
-
elif event_type == "item.updated":
|
|
105
|
-
item = event.get("item", {})
|
|
106
|
-
item_type = item.get("type", "")
|
|
107
|
-
text = item.get("text", "")
|
|
108
|
-
|
|
109
|
-
if item_type == "reasoning" and thinking_started and text:
|
|
110
|
-
yield self._sse(
|
|
111
|
-
"content_block_delta",
|
|
112
|
-
{
|
|
113
|
-
"type": "content_block_delta",
|
|
114
|
-
"index": thinking_idx,
|
|
115
|
-
"delta": {"type": "thinking_delta", "thinking": text},
|
|
116
|
-
},
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
elif item_type == "agent_message" and text:
|
|
120
|
-
if thinking_started:
|
|
121
|
-
yield self._sse(
|
|
122
|
-
"content_block_delta",
|
|
123
|
-
{
|
|
124
|
-
"type": "content_block_delta",
|
|
125
|
-
"index": thinking_idx,
|
|
126
|
-
"delta": {"type": "signature_delta", "signature": ""},
|
|
127
|
-
},
|
|
128
|
-
)
|
|
129
|
-
yield self._sse(
|
|
130
|
-
"content_block_stop",
|
|
131
|
-
{"type": "content_block_stop", "index": thinking_idx},
|
|
132
|
-
)
|
|
133
|
-
thinking_started = False
|
|
134
|
-
|
|
135
|
-
if not text_started:
|
|
136
|
-
text_idx = cur_idx
|
|
137
|
-
cur_idx += 1
|
|
138
|
-
yield self._sse(
|
|
139
|
-
"content_block_start",
|
|
140
|
-
{
|
|
141
|
-
"type": "content_block_start",
|
|
142
|
-
"index": text_idx,
|
|
143
|
-
"content_block": {"type": "text", "text": ""},
|
|
144
|
-
},
|
|
145
|
-
)
|
|
146
|
-
text_started = True
|
|
147
|
-
|
|
148
|
-
yield self._sse(
|
|
149
|
-
"content_block_delta",
|
|
150
|
-
{
|
|
151
|
-
"type": "content_block_delta",
|
|
152
|
-
"index": text_idx,
|
|
153
|
-
"delta": {"type": "text_delta", "text": text},
|
|
154
|
-
},
|
|
155
|
-
)
|
|
156
|
-
|
|
157
|
-
elif event_type == "item.completed":
|
|
158
|
-
item = event.get("item", {})
|
|
159
|
-
item_type = item.get("type", "")
|
|
160
|
-
text = item.get("text", "")
|
|
161
|
-
|
|
162
|
-
if item_type == "agent_message" and text:
|
|
163
|
-
if thinking_started:
|
|
164
|
-
yield self._sse(
|
|
165
|
-
"content_block_delta",
|
|
166
|
-
{
|
|
167
|
-
"type": "content_block_delta",
|
|
168
|
-
"index": thinking_idx,
|
|
169
|
-
"delta": {"type": "signature_delta", "signature": ""},
|
|
170
|
-
},
|
|
171
|
-
)
|
|
172
|
-
yield self._sse(
|
|
173
|
-
"content_block_stop",
|
|
174
|
-
{"type": "content_block_stop", "index": thinking_idx},
|
|
175
|
-
)
|
|
176
|
-
thinking_started = False
|
|
177
|
-
|
|
178
|
-
if not text_started:
|
|
179
|
-
text_idx = cur_idx
|
|
180
|
-
cur_idx += 1
|
|
181
|
-
yield self._sse(
|
|
182
|
-
"content_block_start",
|
|
183
|
-
{
|
|
184
|
-
"type": "content_block_start",
|
|
185
|
-
"index": text_idx,
|
|
186
|
-
"content_block": {"type": "text", "text": ""},
|
|
187
|
-
},
|
|
188
|
-
)
|
|
189
|
-
text_started = True
|
|
190
|
-
|
|
191
|
-
yield self._sse(
|
|
192
|
-
"content_block_delta",
|
|
193
|
-
{
|
|
194
|
-
"type": "content_block_delta",
|
|
195
|
-
"index": text_idx,
|
|
196
|
-
"delta": {"type": "text_delta", "text": text},
|
|
197
|
-
},
|
|
198
|
-
)
|
|
199
|
-
|
|
200
|
-
elif event_type == "turn.completed":
|
|
201
|
-
turn_usage = event.get("usage", {})
|
|
202
|
-
usage["input_tokens"] = turn_usage.get("input_tokens", 0)
|
|
203
|
-
usage["output_tokens"] = turn_usage.get("output_tokens", 0)
|
|
204
|
-
|
|
205
|
-
elif event_type == "error" or event_type == "turn.failed":
|
|
206
|
-
error_msg = event.get("message") or event.get("error", {}).get(
|
|
207
|
-
"message", "Unknown error"
|
|
208
|
-
)
|
|
209
|
-
if not text_started:
|
|
210
|
-
text_idx = cur_idx
|
|
211
|
-
cur_idx += 1
|
|
212
|
-
yield self._sse(
|
|
213
|
-
"content_block_start",
|
|
214
|
-
{
|
|
215
|
-
"type": "content_block_start",
|
|
216
|
-
"index": text_idx,
|
|
217
|
-
"content_block": {"type": "text", "text": ""},
|
|
218
|
-
},
|
|
219
|
-
)
|
|
220
|
-
text_started = True
|
|
221
|
-
|
|
222
|
-
yield self._sse(
|
|
223
|
-
"content_block_delta",
|
|
224
|
-
{
|
|
225
|
-
"type": "content_block_delta",
|
|
226
|
-
"index": text_idx,
|
|
227
|
-
"delta": {
|
|
228
|
-
"type": "text_delta",
|
|
229
|
-
"text": f"Error: {error_msg}",
|
|
230
|
-
},
|
|
231
|
-
},
|
|
232
|
-
)
|
|
233
|
-
|
|
234
|
-
finally:
|
|
235
|
-
if proc.returncode is None:
|
|
236
|
-
proc.terminate()
|
|
237
|
-
await proc.wait()
|
|
238
|
-
|
|
239
|
-
if thinking_started:
|
|
240
|
-
yield self._sse(
|
|
241
|
-
"content_block_delta",
|
|
242
|
-
{
|
|
243
|
-
"type": "content_block_delta",
|
|
244
|
-
"index": thinking_idx,
|
|
245
|
-
"delta": {"type": "signature_delta", "signature": ""},
|
|
246
|
-
},
|
|
247
|
-
)
|
|
248
|
-
yield self._sse(
|
|
249
|
-
"content_block_stop",
|
|
250
|
-
{"type": "content_block_stop", "index": thinking_idx},
|
|
251
|
-
)
|
|
252
|
-
|
|
253
|
-
if text_started:
|
|
254
|
-
yield self._sse(
|
|
255
|
-
"content_block_stop", {"type": "content_block_stop", "index": text_idx}
|
|
256
|
-
)
|
|
257
|
-
|
|
258
|
-
yield self._sse(
|
|
259
|
-
"message_delta",
|
|
260
|
-
{
|
|
261
|
-
"type": "message_delta",
|
|
262
|
-
"delta": {"stop_reason": "end_turn", "stop_sequence": None},
|
|
263
|
-
"usage": usage,
|
|
264
|
-
},
|
|
265
|
-
)
|
|
266
|
-
yield self._sse("message_stop", {"type": "message_stop"})
|
|
267
|
-
yield "data: [DONE]\n\n"
|
|
268
|
-
|
|
269
|
-
def _extract_prompt(self, payload: dict[str, Any]) -> str:
|
|
270
|
-
messages = payload.get("messages", [])
|
|
271
|
-
system = payload.get("system", "")
|
|
272
|
-
|
|
273
|
-
if isinstance(system, list):
|
|
274
|
-
system = "\n\n".join(
|
|
275
|
-
item.get("text", "") if isinstance(item, dict) else str(item)
|
|
276
|
-
for item in system
|
|
277
|
-
)
|
|
278
|
-
|
|
279
|
-
parts: list[str] = []
|
|
280
|
-
if system:
|
|
281
|
-
parts.append(f"System: {system}")
|
|
282
|
-
|
|
283
|
-
for msg in messages:
|
|
284
|
-
role = msg.get("role", "user")
|
|
285
|
-
content = msg.get("content", "")
|
|
286
|
-
|
|
287
|
-
if isinstance(content, list):
|
|
288
|
-
text_parts = []
|
|
289
|
-
for item in content:
|
|
290
|
-
if isinstance(item, dict) and item.get("type") == "text":
|
|
291
|
-
text_parts.append(item.get("text", ""))
|
|
292
|
-
elif isinstance(item, str):
|
|
293
|
-
text_parts.append(item)
|
|
294
|
-
content = "\n".join(text_parts)
|
|
295
|
-
|
|
296
|
-
if content:
|
|
297
|
-
parts.append(f"{role.capitalize()}: {content}")
|
|
298
|
-
|
|
299
|
-
return "\n\n".join(parts)
|
|
300
|
-
|
|
301
|
-
async def _read_lines(
|
|
302
|
-
self, stream: asyncio.StreamReader | None
|
|
303
|
-
) -> AsyncIterator[str]:
|
|
304
|
-
if stream is None:
|
|
305
|
-
return
|
|
306
|
-
|
|
307
|
-
while True:
|
|
308
|
-
line = await stream.readline()
|
|
309
|
-
if not line:
|
|
310
|
-
break
|
|
311
|
-
yield line.decode("utf-8", errors="replace")
|
|
312
|
-
|
|
313
|
-
def _sse(self, event: str, data: dict[str, Any]) -> str:
|
|
314
|
-
return f"event: {event}\ndata: {json.dumps(data)}\n\n"
|
|
315
|
-
|
|
316
|
-
def _random_id(self) -> str:
|
|
317
|
-
return "".join(random.choices(string.ascii_lowercase + string.digits, k=12))
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|