local-openai2anthropic 0.3.1__tar.gz → 0.3.3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/PKG-INFO +1 -1
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/pyproject.toml +1 -1
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/converter.py +0 -4
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/router.py +287 -156
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/.github/workflows/publish.yml +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/.gitignore +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/LICENSE +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/README.md +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/README_zh.md +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/examples/basic_chat.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/examples/streaming.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/examples/thinking_mode.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/examples/tool_calling.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/examples/vision.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/examples/web_search.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/__init__.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/__main__.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/config.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/daemon.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/daemon_runner.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/main.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/openai_types.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/protocol.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/server_tools/__init__.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/server_tools/base.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/server_tools/web_search.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/tavily_client.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/tests/__init__.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/tests/test_converter.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/tests/test_integration.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/tests/test_router.py +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/tests/test_upstream.sh +0 -0
- {local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/uv.lock +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: local-openai2anthropic
|
|
3
|
-
Version: 0.3.
|
|
3
|
+
Version: 0.3.3
|
|
4
4
|
Summary: A lightweight proxy server that converts Anthropic Messages API to OpenAI API
|
|
5
5
|
Project-URL: Homepage, https://github.com/dongfangzan/local-openai2anthropic
|
|
6
6
|
Project-URL: Repository, https://github.com/dongfangzan/local-openai2anthropic
|
|
@@ -404,10 +404,6 @@ def convert_openai_to_anthropic(
|
|
|
404
404
|
# Convert tool calls
|
|
405
405
|
if message.tool_calls:
|
|
406
406
|
for tc in message.tool_calls:
|
|
407
|
-
# Handle case where function might be None
|
|
408
|
-
if not tc.function:
|
|
409
|
-
continue
|
|
410
|
-
|
|
411
407
|
tool_input: dict[str, Any] = {}
|
|
412
408
|
try:
|
|
413
409
|
tool_input = json.loads(tc.function.arguments)
|
{local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/router.py
RENAMED
|
@@ -8,7 +8,7 @@ import logging
|
|
|
8
8
|
import secrets
|
|
9
9
|
import string
|
|
10
10
|
from http import HTTPStatus
|
|
11
|
-
from typing import Any, AsyncGenerator
|
|
11
|
+
from typing import Any, AsyncGenerator, cast
|
|
12
12
|
|
|
13
13
|
import httpx
|
|
14
14
|
from fastapi import APIRouter, Depends, HTTPException, Request
|
|
@@ -51,6 +51,87 @@ def _generate_server_tool_id() -> str:
|
|
|
51
51
|
return f"srvtoolu_{random_part}"
|
|
52
52
|
|
|
53
53
|
|
|
54
|
+
def _normalize_usage(usage: dict[str, Any] | None) -> dict[str, Any] | None:
|
|
55
|
+
if not isinstance(usage, dict):
|
|
56
|
+
return usage
|
|
57
|
+
allowed_keys = {
|
|
58
|
+
"input_tokens",
|
|
59
|
+
"output_tokens",
|
|
60
|
+
"cache_creation_input_tokens",
|
|
61
|
+
"cache_read_input_tokens",
|
|
62
|
+
"server_tool_use",
|
|
63
|
+
}
|
|
64
|
+
normalized = {k: v for k, v in usage.items() if k in allowed_keys}
|
|
65
|
+
return normalized or None
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def _count_tokens(text: str) -> int:
|
|
69
|
+
try:
|
|
70
|
+
import tiktoken # type: ignore[import-not-found]
|
|
71
|
+
except Exception:
|
|
72
|
+
return 0
|
|
73
|
+
|
|
74
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
|
75
|
+
return len(encoding.encode(text))
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _chunk_text(text: str, chunk_size: int = 200) -> list[str]:
|
|
79
|
+
if not text:
|
|
80
|
+
return []
|
|
81
|
+
return [text[i : i + chunk_size] for i in range(0, len(text), chunk_size)]
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _estimate_input_tokens(openai_params: dict[str, Any]) -> int:
|
|
85
|
+
try:
|
|
86
|
+
import tiktoken # type: ignore[import-not-found]
|
|
87
|
+
except Exception:
|
|
88
|
+
return 0
|
|
89
|
+
|
|
90
|
+
encoding = tiktoken.get_encoding("cl100k_base")
|
|
91
|
+
total_tokens = 0
|
|
92
|
+
|
|
93
|
+
system = openai_params.get("system")
|
|
94
|
+
if isinstance(system, str):
|
|
95
|
+
total_tokens += len(encoding.encode(system))
|
|
96
|
+
|
|
97
|
+
messages = openai_params.get("messages", [])
|
|
98
|
+
if isinstance(messages, list):
|
|
99
|
+
for msg in messages:
|
|
100
|
+
if not isinstance(msg, dict):
|
|
101
|
+
continue
|
|
102
|
+
content = msg.get("content", "")
|
|
103
|
+
if isinstance(content, str):
|
|
104
|
+
total_tokens += len(encoding.encode(content))
|
|
105
|
+
elif isinstance(content, list):
|
|
106
|
+
for block in content:
|
|
107
|
+
if not isinstance(block, dict):
|
|
108
|
+
total_tokens += len(encoding.encode(str(block)))
|
|
109
|
+
continue
|
|
110
|
+
block_type = block.get("type")
|
|
111
|
+
if block_type == "text":
|
|
112
|
+
total_tokens += len(encoding.encode(block.get("text", "")))
|
|
113
|
+
elif block_type == "image_url":
|
|
114
|
+
total_tokens += 85
|
|
115
|
+
|
|
116
|
+
tool_calls = msg.get("tool_calls")
|
|
117
|
+
if isinstance(tool_calls, list) and tool_calls:
|
|
118
|
+
total_tokens += len(encoding.encode(json.dumps(tool_calls)))
|
|
119
|
+
|
|
120
|
+
tools = openai_params.get("tools")
|
|
121
|
+
if isinstance(tools, list) and tools:
|
|
122
|
+
total_tokens += len(encoding.encode(json.dumps(tools)))
|
|
123
|
+
|
|
124
|
+
tool_choice = openai_params.get("tool_choice")
|
|
125
|
+
if tool_choice is not None:
|
|
126
|
+
total_tokens += len(encoding.encode(json.dumps(tool_choice)))
|
|
127
|
+
|
|
128
|
+
response_format = openai_params.get("response_format")
|
|
129
|
+
if response_format is not None:
|
|
130
|
+
total_tokens += len(encoding.encode(json.dumps(response_format)))
|
|
131
|
+
|
|
132
|
+
return total_tokens
|
|
133
|
+
|
|
134
|
+
|
|
54
135
|
async def _stream_response(
|
|
55
136
|
client: httpx.AsyncClient,
|
|
56
137
|
url: str,
|
|
@@ -61,23 +142,23 @@ async def _stream_response(
|
|
|
61
142
|
"""
|
|
62
143
|
Stream response from OpenAI and convert to Anthropic format.
|
|
63
144
|
"""
|
|
64
|
-
# Log streaming request start
|
|
65
|
-
logger.info(f"[OpenAI Stream] Starting streaming request to {url}")
|
|
66
|
-
logger.info(f"[OpenAI Stream] Request model: {json_data.get('model', 'unknown')}")
|
|
67
|
-
|
|
68
145
|
try:
|
|
69
146
|
async with client.stream(
|
|
70
147
|
"POST", url, headers=headers, json=json_data
|
|
71
148
|
) as response:
|
|
72
149
|
if response.status_code != 200:
|
|
73
150
|
error_body = await response.aread()
|
|
151
|
+
error_text = error_body.decode("utf-8", errors="replace").strip()
|
|
74
152
|
try:
|
|
75
|
-
error_json = json.loads(
|
|
76
|
-
error_msg = error_json.get("error", {}).get(
|
|
77
|
-
"message", error_body.decode()
|
|
78
|
-
)
|
|
153
|
+
error_json = json.loads(error_text) if error_text else {}
|
|
154
|
+
error_msg = error_json.get("error", {}).get("message") or error_text
|
|
79
155
|
except json.JSONDecodeError:
|
|
80
|
-
error_msg =
|
|
156
|
+
error_msg = error_text
|
|
157
|
+
if not error_msg:
|
|
158
|
+
error_msg = (
|
|
159
|
+
response.reason_phrase
|
|
160
|
+
or f"Upstream API error ({response.status_code})"
|
|
161
|
+
)
|
|
81
162
|
|
|
82
163
|
error_event = AnthropicErrorResponse(
|
|
83
164
|
error=AnthropicError(type="api_error", message=error_msg)
|
|
@@ -91,10 +172,13 @@ async def _stream_response(
|
|
|
91
172
|
content_block_started = False
|
|
92
173
|
content_block_index = 0
|
|
93
174
|
current_block_type = None # 'thinking', 'text', or 'tool_use'
|
|
175
|
+
current_tool_call_index = None
|
|
176
|
+
tool_call_buffers: dict[int, str] = {}
|
|
94
177
|
finish_reason = None
|
|
95
|
-
input_tokens =
|
|
178
|
+
input_tokens = _estimate_input_tokens(json_data)
|
|
96
179
|
output_tokens = 0
|
|
97
180
|
message_id = None
|
|
181
|
+
sent_message_delta = False
|
|
98
182
|
|
|
99
183
|
async for line in response.aiter_lines():
|
|
100
184
|
if not line.startswith("data: "):
|
|
@@ -102,6 +186,30 @@ async def _stream_response(
|
|
|
102
186
|
|
|
103
187
|
data = line[6:]
|
|
104
188
|
if data == "[DONE]":
|
|
189
|
+
if not sent_message_delta:
|
|
190
|
+
stop_reason_map = {
|
|
191
|
+
"stop": "end_turn",
|
|
192
|
+
"length": "max_tokens",
|
|
193
|
+
"tool_calls": "tool_use",
|
|
194
|
+
}
|
|
195
|
+
delta_event = {
|
|
196
|
+
"type": "message_delta",
|
|
197
|
+
"delta": {
|
|
198
|
+
"stop_reason": stop_reason_map.get(
|
|
199
|
+
finish_reason or "stop", "end_turn"
|
|
200
|
+
)
|
|
201
|
+
},
|
|
202
|
+
"usage": {
|
|
203
|
+
"input_tokens": input_tokens,
|
|
204
|
+
"output_tokens": output_tokens,
|
|
205
|
+
"cache_creation_input_tokens": None,
|
|
206
|
+
"cache_read_input_tokens": None,
|
|
207
|
+
},
|
|
208
|
+
}
|
|
209
|
+
logger.debug(
|
|
210
|
+
f"[Anthropic Stream Event] message_delta: {json.dumps(delta_event, ensure_ascii=False)}"
|
|
211
|
+
)
|
|
212
|
+
yield f"event: message_delta\ndata: {json.dumps(delta_event)}\n\n"
|
|
105
213
|
break
|
|
106
214
|
|
|
107
215
|
try:
|
|
@@ -116,7 +224,7 @@ async def _stream_response(
|
|
|
116
224
|
if first_chunk:
|
|
117
225
|
message_id = chunk.get("id", "")
|
|
118
226
|
usage = chunk.get("usage") or {}
|
|
119
|
-
input_tokens = usage.get("prompt_tokens",
|
|
227
|
+
input_tokens = usage.get("prompt_tokens", input_tokens)
|
|
120
228
|
|
|
121
229
|
start_event = {
|
|
122
230
|
"type": "message_start",
|
|
@@ -147,6 +255,8 @@ async def _stream_response(
|
|
|
147
255
|
if not chunk.get("choices"):
|
|
148
256
|
usage = chunk.get("usage") or {}
|
|
149
257
|
if usage:
|
|
258
|
+
input_tokens = usage.get("prompt_tokens", input_tokens)
|
|
259
|
+
output_tokens = usage.get("completion_tokens", output_tokens)
|
|
150
260
|
if content_block_started:
|
|
151
261
|
yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': content_block_index})}\n\n"
|
|
152
262
|
content_block_started = False
|
|
@@ -164,7 +274,9 @@ async def _stream_response(
|
|
|
164
274
|
)
|
|
165
275
|
},
|
|
166
276
|
"usage": {
|
|
167
|
-
"input_tokens": usage.get(
|
|
277
|
+
"input_tokens": usage.get(
|
|
278
|
+
"prompt_tokens", input_tokens
|
|
279
|
+
),
|
|
168
280
|
"output_tokens": usage.get("completion_tokens", 0),
|
|
169
281
|
"cache_creation_input_tokens": None,
|
|
170
282
|
"cache_read_input_tokens": None,
|
|
@@ -174,6 +286,7 @@ async def _stream_response(
|
|
|
174
286
|
f"[Anthropic Stream Event] message_delta: {json.dumps(delta_event, ensure_ascii=False)}"
|
|
175
287
|
)
|
|
176
288
|
yield f"event: message_delta\ndata: {json.dumps(delta_event)}\n\n"
|
|
289
|
+
sent_message_delta = True
|
|
177
290
|
continue
|
|
178
291
|
|
|
179
292
|
choice = chunk["choices"][0]
|
|
@@ -183,19 +296,6 @@ async def _stream_response(
|
|
|
183
296
|
if choice.get("finish_reason"):
|
|
184
297
|
finish_reason = choice["finish_reason"]
|
|
185
298
|
|
|
186
|
-
# When finish_reason is tool_calls, we need to close the current block
|
|
187
|
-
# and prepare to send message_delta
|
|
188
|
-
if finish_reason == "tool_calls" and content_block_started:
|
|
189
|
-
stop_block = {
|
|
190
|
-
"type": "content_block_stop",
|
|
191
|
-
"index": content_block_index,
|
|
192
|
-
}
|
|
193
|
-
logger.debug(
|
|
194
|
-
f"[Anthropic Stream Event] content_block_stop (tool_calls): {json.dumps(stop_block, ensure_ascii=False)}"
|
|
195
|
-
)
|
|
196
|
-
yield f"event: content_block_stop\ndata: {json.dumps(stop_block)}\n\n"
|
|
197
|
-
content_block_started = False
|
|
198
|
-
|
|
199
299
|
# Handle reasoning content (thinking)
|
|
200
300
|
if delta.get("reasoning_content"):
|
|
201
301
|
reasoning = delta["reasoning_content"]
|
|
@@ -215,7 +315,11 @@ async def _stream_response(
|
|
|
215
315
|
start_block = {
|
|
216
316
|
"type": "content_block_start",
|
|
217
317
|
"index": content_block_index,
|
|
218
|
-
"content_block": {
|
|
318
|
+
"content_block": {
|
|
319
|
+
"type": "thinking",
|
|
320
|
+
"thinking": "",
|
|
321
|
+
"signature": "",
|
|
322
|
+
},
|
|
219
323
|
}
|
|
220
324
|
logger.debug(
|
|
221
325
|
f"[Anthropic Stream Event] content_block_start (thinking): {json.dumps(start_block, ensure_ascii=False)}"
|
|
@@ -224,12 +328,13 @@ async def _stream_response(
|
|
|
224
328
|
content_block_started = True
|
|
225
329
|
current_block_type = "thinking"
|
|
226
330
|
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
331
|
+
for chunk in _chunk_text(reasoning):
|
|
332
|
+
delta_block = {
|
|
333
|
+
"type": "content_block_delta",
|
|
334
|
+
"index": content_block_index,
|
|
335
|
+
"delta": {"type": "thinking_delta", "thinking": chunk},
|
|
336
|
+
}
|
|
337
|
+
yield f"event: content_block_delta\ndata: {json.dumps(delta_block)}\n\n"
|
|
233
338
|
continue
|
|
234
339
|
|
|
235
340
|
# Handle content
|
|
@@ -258,6 +363,7 @@ async def _stream_response(
|
|
|
258
363
|
content_block_started = True
|
|
259
364
|
current_block_type = "text"
|
|
260
365
|
|
|
366
|
+
output_tokens += _count_tokens(delta["content"])
|
|
261
367
|
delta_block = {
|
|
262
368
|
"type": "content_block_delta",
|
|
263
369
|
"index": content_block_index,
|
|
@@ -266,33 +372,50 @@ async def _stream_response(
|
|
|
266
372
|
yield f"event: content_block_delta\ndata: {json.dumps(delta_block)}\n\n"
|
|
267
373
|
|
|
268
374
|
# Handle tool calls
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
375
|
+
if delta.get("tool_calls"):
|
|
376
|
+
for tool_call in delta["tool_calls"]:
|
|
377
|
+
tool_call_idx = tool_call.get("index", 0)
|
|
378
|
+
|
|
379
|
+
if tool_call.get("id"):
|
|
380
|
+
if content_block_started and (
|
|
381
|
+
current_block_type != "tool_use"
|
|
382
|
+
or current_tool_call_index != tool_call_idx
|
|
383
|
+
):
|
|
384
|
+
yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': content_block_index})}\n\n"
|
|
385
|
+
content_block_started = False
|
|
386
|
+
content_block_index += 1
|
|
387
|
+
|
|
388
|
+
if not content_block_started:
|
|
389
|
+
func = tool_call.get("function") or {}
|
|
390
|
+
yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': content_block_index, 'content_block': {'type': 'tool_use', 'id': tool_call['id'], 'name': func.get('name', ''), 'input': {}}})}\n\n"
|
|
391
|
+
content_block_started = True
|
|
392
|
+
current_block_type = "tool_use"
|
|
393
|
+
current_tool_call_index = tool_call_idx
|
|
394
|
+
tool_call_buffers.setdefault(tool_call_idx, "")
|
|
395
|
+
|
|
396
|
+
if (tool_call.get("function") or {}).get("arguments"):
|
|
397
|
+
args = (tool_call.get("function") or {}).get(
|
|
398
|
+
"arguments", ""
|
|
399
|
+
)
|
|
400
|
+
if (
|
|
401
|
+
not content_block_started
|
|
402
|
+
or current_block_type != "tool_use"
|
|
403
|
+
or current_tool_call_index != tool_call_idx
|
|
404
|
+
):
|
|
405
|
+
if content_block_started:
|
|
406
|
+
yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': content_block_index})}\n\n"
|
|
407
|
+
content_block_index += 1
|
|
408
|
+
func = tool_call.get("function") or {}
|
|
409
|
+
tool_id = tool_call.get("id", "")
|
|
410
|
+
yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': content_block_index, 'content_block': {'type': 'tool_use', 'id': tool_id, 'name': func.get('name', ''), 'input': {}}})}\n\n"
|
|
411
|
+
content_block_started = True
|
|
412
|
+
current_block_type = "tool_use"
|
|
413
|
+
current_tool_call_index = tool_call_idx
|
|
414
|
+
tool_call_buffers.setdefault(tool_call_idx, "")
|
|
415
|
+
tool_call_buffers[tool_call_idx] = (
|
|
416
|
+
tool_call_buffers.get(tool_call_idx, "") + args
|
|
417
|
+
)
|
|
418
|
+
yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': content_block_index, 'delta': {'type': 'input_json_delta', 'partial_json': args}})}\n\n"
|
|
296
419
|
|
|
297
420
|
# Close final content block
|
|
298
421
|
if content_block_started:
|
|
@@ -305,13 +428,6 @@ async def _stream_response(
|
|
|
305
428
|
)
|
|
306
429
|
yield f"event: content_block_stop\ndata: {json.dumps(stop_block)}\n\n"
|
|
307
430
|
|
|
308
|
-
# Log stream summary before ending
|
|
309
|
-
logger.info(
|
|
310
|
-
f"[OpenAI Stream] Stream ended - message_id={message_id}, "
|
|
311
|
-
f"finish_reason={finish_reason}, input_tokens={input_tokens}, "
|
|
312
|
-
f"output_tokens={output_tokens}, content_blocks={content_block_index + 1}"
|
|
313
|
-
)
|
|
314
|
-
|
|
315
431
|
# Message stop
|
|
316
432
|
stop_event = {"type": "message_stop"}
|
|
317
433
|
logger.debug(
|
|
@@ -337,7 +453,7 @@ async def _convert_result_to_stream(
|
|
|
337
453
|
"""Convert a JSONResponse to streaming SSE format."""
|
|
338
454
|
import time
|
|
339
455
|
|
|
340
|
-
body = json.loads(result.body)
|
|
456
|
+
body = json.loads(bytes(result.body).decode("utf-8"))
|
|
341
457
|
message_id = body.get("id", f"msg_{int(time.time() * 1000)}")
|
|
342
458
|
content = body.get("content", [])
|
|
343
459
|
usage = body.get("usage", {})
|
|
@@ -384,6 +500,10 @@ async def _convert_result_to_stream(
|
|
|
384
500
|
|
|
385
501
|
elif block_type == "tool_use":
|
|
386
502
|
yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': i, 'content_block': {'type': 'tool_use', 'id': block.get('id', ''), 'name': block.get('name', ''), 'input': block.get('input', {})}})}\n\n"
|
|
503
|
+
tool_input = block.get("input", {})
|
|
504
|
+
if tool_input:
|
|
505
|
+
input_json = json.dumps(tool_input, ensure_ascii=False)
|
|
506
|
+
yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': i, 'delta': {'type': 'input_json_delta', 'partial_json': input_json}})}\n\n"
|
|
387
507
|
yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': i})}\n\n"
|
|
388
508
|
|
|
389
509
|
elif block_type == "server_tool_use":
|
|
@@ -393,17 +513,14 @@ async def _convert_result_to_stream(
|
|
|
393
513
|
|
|
394
514
|
elif block_type == "web_search_tool_result":
|
|
395
515
|
# Stream the tool result as its own content block.
|
|
396
|
-
# Some clients expect `results`, others expect `content`; include both when possible.
|
|
397
516
|
tool_result_block = dict(block)
|
|
398
|
-
if "content" not in tool_result_block and "results" in tool_result_block:
|
|
399
|
-
tool_result_block["content"] = tool_result_block["results"]
|
|
400
|
-
|
|
401
517
|
yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': i, 'content_block': tool_result_block})}\n\n"
|
|
402
518
|
yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': i})}\n\n"
|
|
403
519
|
|
|
404
520
|
elif block_type == "thinking":
|
|
405
521
|
# Handle thinking blocks (BetaThinkingBlock)
|
|
406
|
-
|
|
522
|
+
signature = block.get("signature", "")
|
|
523
|
+
yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': i, 'content_block': {'type': 'thinking', 'thinking': '', 'signature': signature}})}\n\n"
|
|
407
524
|
thinking_text = block.get("thinking", "")
|
|
408
525
|
if thinking_text:
|
|
409
526
|
yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': i, 'delta': {'type': 'thinking_delta', 'thinking': thinking_text}})}\n\n"
|
|
@@ -456,6 +573,7 @@ class ServerToolHandler:
|
|
|
456
573
|
"""
|
|
457
574
|
func_name = tool_call.get("function", {}).get("name")
|
|
458
575
|
call_id = tool_call.get("id", "")
|
|
576
|
+
openai_call_id = tool_call.get("openai_id", call_id)
|
|
459
577
|
|
|
460
578
|
tool_class = self.server_tools[func_name]
|
|
461
579
|
config = self.configs.get(tool_class.tool_type, {})
|
|
@@ -476,7 +594,9 @@ class ServerToolHandler:
|
|
|
476
594
|
content_blocks = tool_class.build_content_blocks(call_id, args, result)
|
|
477
595
|
|
|
478
596
|
# Build tool result message for OpenAI
|
|
479
|
-
tool_result_msg = tool_class.build_tool_result_message(
|
|
597
|
+
tool_result_msg = tool_class.build_tool_result_message(
|
|
598
|
+
openai_call_id, args, result
|
|
599
|
+
)
|
|
480
600
|
|
|
481
601
|
return content_blocks, tool_result_msg
|
|
482
602
|
|
|
@@ -519,8 +639,23 @@ async def _handle_with_server_tools(
|
|
|
519
639
|
logger.error(
|
|
520
640
|
f"OpenAI API error: {response.status_code} - {response.text}"
|
|
521
641
|
)
|
|
642
|
+
raw_text = response.text
|
|
643
|
+
try:
|
|
644
|
+
if not raw_text:
|
|
645
|
+
raw_text = response.content.decode(
|
|
646
|
+
"utf-8", errors="replace"
|
|
647
|
+
)
|
|
648
|
+
except Exception:
|
|
649
|
+
raw_text = ""
|
|
650
|
+
if not raw_text:
|
|
651
|
+
raw_text = response.reason_phrase or ""
|
|
652
|
+
error_message = (raw_text or "").strip()
|
|
522
653
|
error_response = AnthropicErrorResponse(
|
|
523
|
-
error=AnthropicError(
|
|
654
|
+
error=AnthropicError(
|
|
655
|
+
type="api_error",
|
|
656
|
+
message=error_message
|
|
657
|
+
or f"Upstream API error ({response.status_code})",
|
|
658
|
+
)
|
|
524
659
|
)
|
|
525
660
|
return JSONResponse(
|
|
526
661
|
status_code=response.status_code,
|
|
@@ -528,9 +663,8 @@ async def _handle_with_server_tools(
|
|
|
528
663
|
)
|
|
529
664
|
|
|
530
665
|
completion_data = response.json()
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
f"[OpenAI Response (Server Tools)] {json.dumps(completion_data, ensure_ascii=False, indent=2)[:2000]}"
|
|
666
|
+
logger.debug(
|
|
667
|
+
f"OpenAI response: {json.dumps(completion_data, indent=2)[:500]}..."
|
|
534
668
|
)
|
|
535
669
|
from openai.types.chat import ChatCompletion
|
|
536
670
|
|
|
@@ -547,13 +681,9 @@ async def _handle_with_server_tools(
|
|
|
547
681
|
|
|
548
682
|
if tool_calls:
|
|
549
683
|
for tc in tool_calls:
|
|
550
|
-
|
|
551
|
-
|
|
684
|
+
func = getattr(tc, "function", None)
|
|
685
|
+
func_name = func.name if func else ""
|
|
552
686
|
logger.info(f" Tool call: {func_name}")
|
|
553
|
-
logger.info(f" Tool ID: {tc.id}")
|
|
554
|
-
logger.info(
|
|
555
|
-
f" Arguments: {func_args[:200]}"
|
|
556
|
-
) # Log first 200 chars
|
|
557
687
|
|
|
558
688
|
# Generate Anthropic-style ID for server tools
|
|
559
689
|
is_server = handler.is_server_tool_call(
|
|
@@ -564,18 +694,21 @@ async def _handle_with_server_tools(
|
|
|
564
694
|
)
|
|
565
695
|
|
|
566
696
|
# Use Anthropic-style ID for server tools, original ID otherwise
|
|
567
|
-
|
|
697
|
+
client_tool_id = (
|
|
698
|
+
_generate_server_tool_id() if is_server else tc.id
|
|
699
|
+
)
|
|
568
700
|
|
|
569
701
|
tc_dict = {
|
|
570
|
-
"id":
|
|
702
|
+
"id": client_tool_id,
|
|
703
|
+
"openai_id": tc.id,
|
|
571
704
|
"function": {
|
|
572
705
|
"name": func_name,
|
|
573
|
-
"arguments":
|
|
574
|
-
if tc.function
|
|
575
|
-
else "{}",
|
|
706
|
+
"arguments": func.arguments if func else "{}",
|
|
576
707
|
},
|
|
577
708
|
}
|
|
578
|
-
logger.info(
|
|
709
|
+
logger.info(
|
|
710
|
+
f" Is server tool: {is_server}, ID: {client_tool_id}"
|
|
711
|
+
)
|
|
579
712
|
if is_server:
|
|
580
713
|
server_tool_calls.append(tc_dict)
|
|
581
714
|
else:
|
|
@@ -596,6 +729,9 @@ async def _handle_with_server_tools(
|
|
|
596
729
|
|
|
597
730
|
if message_dict.get("usage"):
|
|
598
731
|
message_dict["usage"]["server_tool_use"] = handler.usage
|
|
732
|
+
message_dict["usage"] = _normalize_usage(
|
|
733
|
+
message_dict.get("usage")
|
|
734
|
+
)
|
|
599
735
|
|
|
600
736
|
# Log full response for debugging
|
|
601
737
|
logger.info(
|
|
@@ -606,7 +742,9 @@ async def _handle_with_server_tools(
|
|
|
606
742
|
|
|
607
743
|
return JSONResponse(content=message_dict)
|
|
608
744
|
|
|
609
|
-
|
|
745
|
+
message_dict = message.model_dump()
|
|
746
|
+
message_dict["usage"] = _normalize_usage(message_dict.get("usage"))
|
|
747
|
+
return JSONResponse(content=message_dict)
|
|
610
748
|
|
|
611
749
|
# Check max_uses limit
|
|
612
750
|
if total_tool_calls >= max_uses:
|
|
@@ -631,9 +769,23 @@ async def _handle_with_server_tools(
|
|
|
631
769
|
accumulated_content.extend(error_blocks)
|
|
632
770
|
|
|
633
771
|
# Continue with modified messages
|
|
772
|
+
assistant_tool_calls = []
|
|
773
|
+
for call in server_tool_calls:
|
|
774
|
+
assistant_tool_calls.append(
|
|
775
|
+
{
|
|
776
|
+
"id": call.get("openai_id", call.get("id", "")),
|
|
777
|
+
"type": "function",
|
|
778
|
+
"function": {
|
|
779
|
+
"name": call.get("function", {}).get("name", ""),
|
|
780
|
+
"arguments": call.get("function", {}).get(
|
|
781
|
+
"arguments", "{}"
|
|
782
|
+
),
|
|
783
|
+
},
|
|
784
|
+
}
|
|
785
|
+
)
|
|
634
786
|
messages = params.get("messages", [])
|
|
635
787
|
messages = _add_tool_results_to_messages(
|
|
636
|
-
messages,
|
|
788
|
+
messages, assistant_tool_calls, handler, is_error=True
|
|
637
789
|
)
|
|
638
790
|
params["messages"] = messages
|
|
639
791
|
continue
|
|
@@ -651,7 +803,7 @@ async def _handle_with_server_tools(
|
|
|
651
803
|
# Track for assistant message
|
|
652
804
|
assistant_tool_calls.append(
|
|
653
805
|
{
|
|
654
|
-
"id": call
|
|
806
|
+
"id": call.get("openai_id", call.get("id", "")),
|
|
655
807
|
"type": "function",
|
|
656
808
|
"function": {
|
|
657
809
|
"name": call["function"]["name"],
|
|
@@ -673,17 +825,17 @@ async def _handle_with_server_tools(
|
|
|
673
825
|
type="timeout_error", message="Request timed out"
|
|
674
826
|
)
|
|
675
827
|
)
|
|
676
|
-
|
|
828
|
+
return JSONResponse(
|
|
677
829
|
status_code=HTTPStatus.GATEWAY_TIMEOUT,
|
|
678
|
-
|
|
830
|
+
content=error_response.model_dump(),
|
|
679
831
|
)
|
|
680
832
|
except httpx.RequestError as e:
|
|
681
833
|
error_response = AnthropicErrorResponse(
|
|
682
834
|
error=AnthropicError(type="connection_error", message=str(e))
|
|
683
835
|
)
|
|
684
|
-
|
|
836
|
+
return JSONResponse(
|
|
685
837
|
status_code=HTTPStatus.BAD_GATEWAY,
|
|
686
|
-
|
|
838
|
+
content=error_response.model_dump(),
|
|
687
839
|
)
|
|
688
840
|
|
|
689
841
|
|
|
@@ -709,10 +861,11 @@ def _add_tool_results_to_messages(
|
|
|
709
861
|
# Add tool results
|
|
710
862
|
if is_error:
|
|
711
863
|
for call in tool_calls:
|
|
864
|
+
tool_call_id = call.get("openai_id", call.get("id", ""))
|
|
712
865
|
messages.append(
|
|
713
866
|
{
|
|
714
867
|
"role": "tool",
|
|
715
|
-
"tool_call_id":
|
|
868
|
+
"tool_call_id": tool_call_id,
|
|
716
869
|
"content": json.dumps(
|
|
717
870
|
{
|
|
718
871
|
"error": "max_uses_exceeded",
|
|
@@ -759,7 +912,7 @@ async def create_message(
|
|
|
759
912
|
type="invalid_request_error", message=f"Invalid JSON: {e}"
|
|
760
913
|
)
|
|
761
914
|
)
|
|
762
|
-
return JSONResponse(status_code=
|
|
915
|
+
return JSONResponse(status_code=400, content=error_response.model_dump())
|
|
763
916
|
except Exception as e:
|
|
764
917
|
logger.error(f"Failed to parse request body: {e}")
|
|
765
918
|
error_response = AnthropicErrorResponse(
|
|
@@ -775,7 +928,7 @@ async def create_message(
|
|
|
775
928
|
message="Request body must be a JSON object",
|
|
776
929
|
)
|
|
777
930
|
)
|
|
778
|
-
return JSONResponse(status_code=
|
|
931
|
+
return JSONResponse(status_code=400, content=error_response.model_dump())
|
|
779
932
|
|
|
780
933
|
model_value = anthropic_params.get("model")
|
|
781
934
|
if not isinstance(model_value, str) or not model_value.strip():
|
|
@@ -784,7 +937,7 @@ async def create_message(
|
|
|
784
937
|
type="invalid_request_error", message="Model must be a non-empty string"
|
|
785
938
|
)
|
|
786
939
|
)
|
|
787
|
-
return JSONResponse(status_code=
|
|
940
|
+
return JSONResponse(status_code=400, content=error_response.model_dump())
|
|
788
941
|
|
|
789
942
|
messages_value = anthropic_params.get("messages")
|
|
790
943
|
if not isinstance(messages_value, list) or len(messages_value) == 0:
|
|
@@ -794,7 +947,7 @@ async def create_message(
|
|
|
794
947
|
message="Messages must be a non-empty list",
|
|
795
948
|
)
|
|
796
949
|
)
|
|
797
|
-
return JSONResponse(status_code=
|
|
950
|
+
return JSONResponse(status_code=400, content=error_response.model_dump())
|
|
798
951
|
|
|
799
952
|
max_tokens_value = anthropic_params.get("max_tokens")
|
|
800
953
|
if not isinstance(max_tokens_value, int):
|
|
@@ -803,7 +956,7 @@ async def create_message(
|
|
|
803
956
|
type="invalid_request_error", message="max_tokens is required"
|
|
804
957
|
)
|
|
805
958
|
)
|
|
806
|
-
return JSONResponse(status_code=
|
|
959
|
+
return JSONResponse(status_code=400, content=error_response.model_dump())
|
|
807
960
|
|
|
808
961
|
# Check for server tools
|
|
809
962
|
tools = anthropic_params.get("tools", [])
|
|
@@ -815,7 +968,7 @@ async def create_message(
|
|
|
815
968
|
|
|
816
969
|
# Convert Anthropic params to OpenAI params
|
|
817
970
|
openai_params_obj = convert_anthropic_to_openai(
|
|
818
|
-
anthropic_params,
|
|
971
|
+
cast(MessageCreateParams, anthropic_params),
|
|
819
972
|
enabled_server_tools=enabled_server_tools if has_server_tools else None,
|
|
820
973
|
)
|
|
821
974
|
openai_params: dict[str, Any] = dict(openai_params_obj) # type: ignore
|
|
@@ -872,8 +1025,23 @@ async def create_message(
|
|
|
872
1025
|
response = await client.post(url, headers=headers, json=openai_params)
|
|
873
1026
|
|
|
874
1027
|
if response.status_code != 200:
|
|
1028
|
+
raw_text = response.text
|
|
1029
|
+
try:
|
|
1030
|
+
if not raw_text:
|
|
1031
|
+
raw_text = response.content.decode(
|
|
1032
|
+
"utf-8", errors="replace"
|
|
1033
|
+
)
|
|
1034
|
+
except Exception:
|
|
1035
|
+
raw_text = ""
|
|
1036
|
+
if not raw_text:
|
|
1037
|
+
raw_text = response.reason_phrase or ""
|
|
1038
|
+
error_message = (raw_text or "").strip()
|
|
875
1039
|
error_response = AnthropicErrorResponse(
|
|
876
|
-
error=AnthropicError(
|
|
1040
|
+
error=AnthropicError(
|
|
1041
|
+
type="api_error",
|
|
1042
|
+
message=error_message
|
|
1043
|
+
or f"Upstream API error ({response.status_code})",
|
|
1044
|
+
)
|
|
877
1045
|
)
|
|
878
1046
|
return JSONResponse(
|
|
879
1047
|
status_code=response.status_code,
|
|
@@ -881,60 +1049,23 @@ async def create_message(
|
|
|
881
1049
|
)
|
|
882
1050
|
|
|
883
1051
|
openai_completion = response.json()
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
f"[OpenAI Raw Response] {json.dumps(openai_completion, ensure_ascii=False, indent=2)[:2000]}"
|
|
1052
|
+
logger.debug(
|
|
1053
|
+
f"[OpenAI Response] {json.dumps(openai_completion, ensure_ascii=False, indent=2)}"
|
|
887
1054
|
)
|
|
888
1055
|
|
|
889
|
-
# Log response details
|
|
890
|
-
if openai_completion.get("choices"):
|
|
891
|
-
choice = openai_completion["choices"][0]
|
|
892
|
-
message = choice.get("message", {})
|
|
893
|
-
finish_reason = choice.get("finish_reason")
|
|
894
|
-
content_preview = (
|
|
895
|
-
message.get("content", "")[:100]
|
|
896
|
-
if message.get("content")
|
|
897
|
-
else ""
|
|
898
|
-
)
|
|
899
|
-
tool_calls_count = (
|
|
900
|
-
len(message.get("tool_calls", []))
|
|
901
|
-
if message.get("tool_calls")
|
|
902
|
-
else 0
|
|
903
|
-
)
|
|
904
|
-
logger.info(
|
|
905
|
-
f"[OpenAI Response Details] finish_reason={finish_reason}, "
|
|
906
|
-
f"content_length={len(message.get('content', ''))}, "
|
|
907
|
-
f"tool_calls={tool_calls_count}, "
|
|
908
|
-
f"content_preview={content_preview[:50]!r}"
|
|
909
|
-
)
|
|
910
|
-
|
|
911
1056
|
from openai.types.chat import ChatCompletion
|
|
912
1057
|
|
|
913
1058
|
completion = ChatCompletion.model_validate(openai_completion)
|
|
914
1059
|
anthropic_message = convert_openai_to_anthropic(completion, model)
|
|
915
1060
|
|
|
916
1061
|
anthropic_response = anthropic_message.model_dump()
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
f"[Anthropic Converted Response] {json.dumps(anthropic_response, ensure_ascii=False, indent=2)[:2000]}"
|
|
1062
|
+
anthropic_response["usage"] = _normalize_usage(
|
|
1063
|
+
anthropic_response.get("usage")
|
|
920
1064
|
)
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
content_blocks = anthropic_response.get("content", [])
|
|
924
|
-
stop_reason = anthropic_response.get("stop_reason")
|
|
925
|
-
usage = anthropic_response.get("usage", {})
|
|
926
|
-
logger.info(
|
|
927
|
-
f"[Anthropic Response Details] stop_reason={stop_reason}, "
|
|
928
|
-
f"content_blocks={len(content_blocks)}, "
|
|
929
|
-
f"input_tokens={usage.get('input_tokens')}, "
|
|
930
|
-
f"output_tokens={usage.get('output_tokens')}"
|
|
1065
|
+
logger.debug(
|
|
1066
|
+
f"[Anthropic Response] {json.dumps(anthropic_response, ensure_ascii=False, indent=2)}"
|
|
931
1067
|
)
|
|
932
1068
|
|
|
933
|
-
# Log content block types
|
|
934
|
-
if content_blocks:
|
|
935
|
-
block_types = [block.get("type") for block in content_blocks]
|
|
936
|
-
logger.info(f"[Anthropic Content Blocks] types={block_types}")
|
|
937
|
-
|
|
938
1069
|
return JSONResponse(content=anthropic_response)
|
|
939
1070
|
|
|
940
1071
|
except httpx.TimeoutException:
|
|
@@ -943,17 +1074,17 @@ async def create_message(
|
|
|
943
1074
|
type="timeout_error", message="Request timed out"
|
|
944
1075
|
)
|
|
945
1076
|
)
|
|
946
|
-
|
|
1077
|
+
return JSONResponse(
|
|
947
1078
|
status_code=HTTPStatus.GATEWAY_TIMEOUT,
|
|
948
|
-
|
|
1079
|
+
content=error_response.model_dump(),
|
|
949
1080
|
)
|
|
950
1081
|
except httpx.RequestError as e:
|
|
951
1082
|
error_response = AnthropicErrorResponse(
|
|
952
1083
|
error=AnthropicError(type="connection_error", message=str(e))
|
|
953
1084
|
)
|
|
954
|
-
|
|
1085
|
+
return JSONResponse(
|
|
955
1086
|
status_code=HTTPStatus.BAD_GATEWAY,
|
|
956
|
-
|
|
1087
|
+
content=error_response.model_dump(),
|
|
957
1088
|
)
|
|
958
1089
|
|
|
959
1090
|
|
|
@@ -1007,7 +1138,7 @@ async def count_tokens(
|
|
|
1007
1138
|
type="invalid_request_error", message=f"Invalid JSON: {e}"
|
|
1008
1139
|
)
|
|
1009
1140
|
)
|
|
1010
|
-
return JSONResponse(status_code=
|
|
1141
|
+
return JSONResponse(status_code=400, content=error_response.model_dump())
|
|
1011
1142
|
except Exception as e:
|
|
1012
1143
|
error_response = AnthropicErrorResponse(
|
|
1013
1144
|
error=AnthropicError(type="invalid_request_error", message=str(e))
|
|
@@ -1022,7 +1153,7 @@ async def count_tokens(
|
|
|
1022
1153
|
message="Request body must be a JSON object",
|
|
1023
1154
|
)
|
|
1024
1155
|
)
|
|
1025
|
-
return JSONResponse(status_code=
|
|
1156
|
+
return JSONResponse(status_code=400, content=error_response.model_dump())
|
|
1026
1157
|
|
|
1027
1158
|
messages = body_json.get("messages", [])
|
|
1028
1159
|
if not isinstance(messages, list):
|
|
@@ -1031,7 +1162,7 @@ async def count_tokens(
|
|
|
1031
1162
|
type="invalid_request_error", message="messages must be a list"
|
|
1032
1163
|
)
|
|
1033
1164
|
)
|
|
1034
|
-
return JSONResponse(status_code=
|
|
1165
|
+
return JSONResponse(status_code=400, content=error_response.model_dump())
|
|
1035
1166
|
|
|
1036
1167
|
model = body_json.get("model", "")
|
|
1037
1168
|
system = body_json.get("system")
|
|
@@ -1039,7 +1170,7 @@ async def count_tokens(
|
|
|
1039
1170
|
|
|
1040
1171
|
try:
|
|
1041
1172
|
# Use tiktoken for token counting
|
|
1042
|
-
import tiktoken
|
|
1173
|
+
import tiktoken # type: ignore[import-not-found]
|
|
1043
1174
|
|
|
1044
1175
|
# Map model names to tiktoken encoding
|
|
1045
1176
|
# Claude models don't have direct tiktoken encodings, so we use cl100k_base as approximation
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/__init__.py
RENAMED
|
File without changes
|
{local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/__main__.py
RENAMED
|
File without changes
|
{local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/config.py
RENAMED
|
File without changes
|
{local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/daemon.py
RENAMED
|
File without changes
|
|
File without changes
|
{local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/main.py
RENAMED
|
File without changes
|
|
File without changes
|
{local_openai2anthropic-0.3.1 → local_openai2anthropic-0.3.3}/src/local_openai2anthropic/protocol.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|