local-openai2anthropic 0.2.7__py3-none-any.whl → 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -92,7 +92,9 @@ def convert_anthropic_to_openai(
92
92
  converted_messages = _convert_anthropic_message_to_openai(msg)
93
93
  openai_messages.extend(converted_messages)
94
94
  msg_count += 1
95
- logger.debug(f"Converted {msg_count} messages, total OpenAI messages: {len(openai_messages)}")
95
+ logger.debug(
96
+ f"Converted {msg_count} messages, total OpenAI messages: {len(openai_messages)}"
97
+ )
96
98
 
97
99
  # Build OpenAI params
98
100
  params: dict[str, Any] = {
@@ -139,17 +141,21 @@ def convert_anthropic_to_openai(
139
141
  openai_tools.append(openai_tool)
140
142
 
141
143
  # Add server tools as OpenAI function tools
142
- for tool_class in (enabled_server_tools or []):
144
+ for tool_class in enabled_server_tools or []:
143
145
  if tool_class.tool_type in server_tools_config:
144
146
  config = server_tools_config[tool_class.tool_type]
145
147
  openai_tools.append(tool_class.to_openai_tool(config))
146
148
 
147
149
  if openai_tools:
148
150
  params["tools"] = openai_tools
149
-
151
+
150
152
  # Convert tool_choice
151
153
  if tool_choice:
152
- tc = tool_choice if isinstance(tool_choice, dict) else tool_choice.model_dump()
154
+ tc = (
155
+ tool_choice
156
+ if isinstance(tool_choice, dict)
157
+ else tool_choice.model_dump()
158
+ )
153
159
  tc_type = tc.get("type")
154
160
  if tc_type == "auto":
155
161
  params["tool_choice"] = "auto"
@@ -162,7 +168,7 @@ def convert_anthropic_to_openai(
162
168
  }
163
169
  else:
164
170
  params["tool_choice"] = "auto"
165
-
171
+
166
172
  # Handle thinking parameter
167
173
  # vLLM/SGLang use chat_template_kwargs.thinking to toggle thinking mode
168
174
  # Some models use "thinking", others use "enable_thinking", so we include both
@@ -181,7 +187,7 @@ def convert_anthropic_to_openai(
181
187
  logger.debug(
182
188
  "thinking.budget_tokens (%s) is accepted but not supported by "
183
189
  "vLLM/SGLang. Using default thinking configuration.",
184
- budget_tokens
190
+ budget_tokens,
185
191
  )
186
192
  else:
187
193
  # Default to disabled thinking mode if not explicitly enabled
@@ -208,32 +214,32 @@ def _convert_anthropic_message_to_openai(
208
214
  ) -> list[dict[str, Any]]:
209
215
  """
210
216
  Convert a single Anthropic message to OpenAI format.
211
-
212
- Returns a list of messages because tool_results need to be
217
+
218
+ Returns a list of messages because tool_results need to be
213
219
  separate tool messages in OpenAI format.
214
220
  """
215
221
  role = msg.get("role", "user")
216
222
  content = msg.get("content", "")
217
-
223
+
218
224
  if isinstance(content, str):
219
225
  return [{"role": role, "content": content}]
220
-
226
+
221
227
  # Handle list of content blocks
222
228
  openai_content: list[dict[str, Any]] = []
223
229
  tool_calls: list[dict[str, Any]] = []
224
230
  tool_call_results: list[dict[str, Any]] = []
225
-
231
+
226
232
  for block in content:
227
233
  if isinstance(block, str):
228
234
  openai_content.append({"type": "text", "text": block})
229
235
  continue
230
-
236
+
231
237
  block_type = block.get("type") if isinstance(block, dict) else block.type
232
-
238
+
233
239
  if block_type == "text":
234
240
  text = block.get("text") if isinstance(block, dict) else block.text
235
241
  openai_content.append({"type": "text", "text": text})
236
-
242
+
237
243
  elif block_type == "image":
238
244
  # Convert image to image_url format
239
245
  source = block.get("source") if isinstance(block, dict) else block.source
@@ -246,11 +252,13 @@ def _convert_anthropic_message_to_openai(
246
252
  data = source.data
247
253
  # Build data URL
248
254
  url = f"data:{media_type};base64,{data}"
249
- openai_content.append({
250
- "type": "image_url",
251
- "image_url": {"url": url},
252
- })
253
-
255
+ openai_content.append(
256
+ {
257
+ "type": "image_url",
258
+ "image_url": {"url": url},
259
+ }
260
+ )
261
+
254
262
  elif block_type == "tool_use":
255
263
  # Convert to function call
256
264
  if isinstance(block, dict):
@@ -261,16 +269,20 @@ def _convert_anthropic_message_to_openai(
261
269
  tool_id = block.id
262
270
  name = block.name
263
271
  input_data = block.input
264
-
265
- tool_calls.append({
266
- "id": tool_id,
267
- "type": "function",
268
- "function": {
269
- "name": name,
270
- "arguments": json.dumps(input_data) if isinstance(input_data, dict) else str(input_data),
271
- },
272
- })
273
-
272
+
273
+ tool_calls.append(
274
+ {
275
+ "id": tool_id,
276
+ "type": "function",
277
+ "function": {
278
+ "name": name,
279
+ "arguments": json.dumps(input_data)
280
+ if isinstance(input_data, dict)
281
+ else str(input_data),
282
+ },
283
+ }
284
+ )
285
+
274
286
  elif block_type == "tool_result":
275
287
  # Tool results need to be separate tool messages
276
288
  if isinstance(block, dict):
@@ -281,7 +293,7 @@ def _convert_anthropic_message_to_openai(
281
293
  tool_use_id = block.tool_use_id
282
294
  result_content = block.content
283
295
  is_error = getattr(block, "is_error", False)
284
-
296
+
285
297
  # Handle content that might be a list or string
286
298
  if isinstance(result_content, list):
287
299
  # Extract text from content blocks
@@ -298,7 +310,7 @@ def _convert_anthropic_message_to_openai(
298
310
  result_text = "\n".join(text_parts)
299
311
  else:
300
312
  result_text = str(result_content)
301
-
313
+
302
314
  tool_msg: dict[str, Any] = {
303
315
  "role": "tool",
304
316
  "tool_call_id": tool_use_id,
@@ -306,28 +318,28 @@ def _convert_anthropic_message_to_openai(
306
318
  }
307
319
  # Note: is_error is not directly supported in OpenAI API
308
320
  # but we could add it to content if needed
309
-
321
+
310
322
  tool_call_results.append(tool_msg)
311
-
323
+
312
324
  # Build primary message
313
325
  messages: list[dict[str, Any]] = []
314
326
  # SGLang requires content field to be present, default to empty string
315
327
  primary_msg: dict[str, Any] = {"role": role, "content": ""}
316
-
328
+
317
329
  if openai_content:
318
330
  if len(openai_content) == 1 and openai_content[0]["type"] == "text":
319
331
  primary_msg["content"] = openai_content[0]["text"]
320
332
  else:
321
333
  primary_msg["content"] = openai_content
322
-
334
+
323
335
  if tool_calls:
324
336
  primary_msg["tool_calls"] = tool_calls
325
-
337
+
326
338
  messages.append(primary_msg)
327
-
339
+
328
340
  # Add tool result messages separately
329
341
  messages.extend(tool_call_results)
330
-
342
+
331
343
  return messages
332
344
 
333
345
 
@@ -353,24 +365,24 @@ def convert_openai_to_anthropic(
353
365
  ) -> Message:
354
366
  """
355
367
  Convert OpenAI ChatCompletion to Anthropic Message.
356
-
368
+
357
369
  Args:
358
370
  completion: OpenAI chat completion response
359
371
  model: Model name
360
-
372
+
361
373
  Returns:
362
374
  Anthropic Message response
363
375
  """
364
376
  from anthropic.types.beta import BetaThinkingBlock
365
-
377
+
366
378
  choice = completion.choices[0]
367
379
  message = choice.message
368
-
380
+
369
381
  # Convert content blocks
370
382
  content: list[ContentBlock] = []
371
-
383
+
372
384
  # Add reasoning content (thinking) first if present
373
- reasoning_content = getattr(message, 'reasoning_content', None)
385
+ reasoning_content = getattr(message, "reasoning_content", None)
374
386
  if reasoning_content:
375
387
  content.append(
376
388
  BetaThinkingBlock(
@@ -379,7 +391,7 @@ def convert_openai_to_anthropic(
379
391
  signature="", # Signature not available from OpenAI format
380
392
  )
381
393
  )
382
-
394
+
383
395
  # Add text content if present
384
396
  if message.content:
385
397
  if isinstance(message.content, str):
@@ -388,16 +400,20 @@ def convert_openai_to_anthropic(
388
400
  for part in message.content:
389
401
  if part.type == "text":
390
402
  content.append(TextBlock(type="text", text=part.text))
391
-
403
+
392
404
  # Convert tool calls
393
405
  if message.tool_calls:
394
406
  for tc in message.tool_calls:
407
+ # Handle case where function might be None
408
+ if not tc.function:
409
+ continue
410
+
395
411
  tool_input: dict[str, Any] = {}
396
412
  try:
397
413
  tool_input = json.loads(tc.function.arguments)
398
414
  except json.JSONDecodeError:
399
415
  tool_input = {"raw": tc.function.arguments}
400
-
416
+
401
417
  content.append(
402
418
  ToolUseBlock(
403
419
  type="tool_use",
@@ -406,7 +422,7 @@ def convert_openai_to_anthropic(
406
422
  input=tool_input,
407
423
  )
408
424
  )
409
-
425
+
410
426
  # Determine stop reason
411
427
  stop_reason_map = {
412
428
  "stop": "end_turn",
@@ -414,18 +430,24 @@ def convert_openai_to_anthropic(
414
430
  "tool_calls": "tool_use",
415
431
  "content_filter": "end_turn",
416
432
  }
417
- anthropic_stop_reason = stop_reason_map.get(choice.finish_reason or "stop", "end_turn")
418
-
433
+ anthropic_stop_reason = stop_reason_map.get(
434
+ choice.finish_reason or "stop", "end_turn"
435
+ )
436
+
419
437
  # Build usage dict with cache support (if available from upstream)
420
438
  usage_dict = None
421
439
  if completion.usage:
422
440
  usage_dict = {
423
441
  "input_tokens": completion.usage.prompt_tokens,
424
442
  "output_tokens": completion.usage.completion_tokens,
425
- "cache_creation_input_tokens": getattr(completion.usage, "cache_creation_input_tokens", None),
426
- "cache_read_input_tokens": getattr(completion.usage, "cache_read_input_tokens", None),
443
+ "cache_creation_input_tokens": getattr(
444
+ completion.usage, "cache_creation_input_tokens", None
445
+ ),
446
+ "cache_read_input_tokens": getattr(
447
+ completion.usage, "cache_read_input_tokens", None
448
+ ),
427
449
  }
428
-
450
+
429
451
  # Build message dict to avoid Pydantic validation issues
430
452
  message_dict = {
431
453
  "id": completion.id,
@@ -437,5 +459,5 @@ def convert_openai_to_anthropic(
437
459
  "stop_sequence": None,
438
460
  "usage": usage_dict,
439
461
  }
440
-
462
+
441
463
  return Message.model_validate(message_dict)
@@ -47,7 +47,7 @@ def _generate_server_tool_id() -> str:
47
47
  """Generate Anthropic-style server tool use ID (srvtoolu_...)."""
48
48
  # Generate 24 random alphanumeric characters
49
49
  chars = string.ascii_lowercase + string.digits
50
- random_part = ''.join(secrets.choice(chars) for _ in range(24))
50
+ random_part = "".join(secrets.choice(chars) for _ in range(24))
51
51
  return f"srvtoolu_{random_part}"
52
52
 
53
53
 
@@ -62,12 +62,16 @@ async def _stream_response(
62
62
  Stream response from OpenAI and convert to Anthropic format.
63
63
  """
64
64
  try:
65
- async with client.stream("POST", url, headers=headers, json=json_data) as response:
65
+ async with client.stream(
66
+ "POST", url, headers=headers, json=json_data
67
+ ) as response:
66
68
  if response.status_code != 200:
67
69
  error_body = await response.aread()
68
70
  try:
69
71
  error_json = json.loads(error_body.decode())
70
- error_msg = error_json.get("error", {}).get("message", error_body.decode())
72
+ error_msg = error_json.get("error", {}).get(
73
+ "message", error_body.decode()
74
+ )
71
75
  except json.JSONDecodeError:
72
76
  error_msg = error_body.decode()
73
77
 
@@ -98,7 +102,9 @@ async def _stream_response(
98
102
 
99
103
  try:
100
104
  chunk = json.loads(data)
101
- logger.debug(f"[OpenAI Stream Chunk] {json.dumps(chunk, ensure_ascii=False)}")
105
+ logger.debug(
106
+ f"[OpenAI Stream Chunk] {json.dumps(chunk, ensure_ascii=False)}"
107
+ )
102
108
  except json.JSONDecodeError:
103
109
  continue
104
110
 
@@ -126,7 +132,9 @@ async def _stream_response(
126
132
  },
127
133
  },
128
134
  }
129
- logger.debug(f"[Anthropic Stream Event] message_start: {json.dumps(start_event, ensure_ascii=False)}")
135
+ logger.debug(
136
+ f"[Anthropic Stream Event] message_start: {json.dumps(start_event, ensure_ascii=False)}"
137
+ )
130
138
  yield f"event: message_start\ndata: {json.dumps(start_event)}\n\n"
131
139
  first_chunk = False
132
140
  continue
@@ -139,9 +147,28 @@ async def _stream_response(
139
147
  yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': content_block_index})}\n\n"
140
148
  content_block_started = False
141
149
 
142
- stop_reason_map = {"stop": "end_turn", "length": "max_tokens", "tool_calls": "tool_use"}
143
- delta_event = {'type': 'message_delta', 'delta': {'stop_reason': stop_reason_map.get(finish_reason or 'stop', 'end_turn')}, 'usage': {'input_tokens': usage.get('prompt_tokens', 0), 'output_tokens': usage.get('completion_tokens', 0), 'cache_creation_input_tokens': None, 'cache_read_input_tokens': None}}
144
- logger.debug(f"[Anthropic Stream Event] message_delta: {json.dumps(delta_event, ensure_ascii=False)}")
150
+ stop_reason_map = {
151
+ "stop": "end_turn",
152
+ "length": "max_tokens",
153
+ "tool_calls": "tool_use",
154
+ }
155
+ delta_event = {
156
+ "type": "message_delta",
157
+ "delta": {
158
+ "stop_reason": stop_reason_map.get(
159
+ finish_reason or "stop", "end_turn"
160
+ )
161
+ },
162
+ "usage": {
163
+ "input_tokens": usage.get("prompt_tokens", 0),
164
+ "output_tokens": usage.get("completion_tokens", 0),
165
+ "cache_creation_input_tokens": None,
166
+ "cache_read_input_tokens": None,
167
+ },
168
+ }
169
+ logger.debug(
170
+ f"[Anthropic Stream Event] message_delta: {json.dumps(delta_event, ensure_ascii=False)}"
171
+ )
145
172
  yield f"event: message_delta\ndata: {json.dumps(delta_event)}\n\n"
146
173
  continue
147
174
 
@@ -156,44 +183,75 @@ async def _stream_response(
156
183
  if delta.get("reasoning_content"):
157
184
  reasoning = delta["reasoning_content"]
158
185
  # Start thinking content block if not already started
159
- if not content_block_started or current_block_type != 'thinking':
186
+ if not content_block_started or current_block_type != "thinking":
160
187
  # Close previous block if exists
161
188
  if content_block_started:
162
- stop_block = {'type': 'content_block_stop', 'index': content_block_index}
163
- logger.debug(f"[Anthropic Stream Event] content_block_stop ({current_block_type}): {json.dumps(stop_block, ensure_ascii=False)}")
189
+ stop_block = {
190
+ "type": "content_block_stop",
191
+ "index": content_block_index,
192
+ }
193
+ logger.debug(
194
+ f"[Anthropic Stream Event] content_block_stop ({current_block_type}): {json.dumps(stop_block, ensure_ascii=False)}"
195
+ )
164
196
  yield f"event: content_block_stop\ndata: {json.dumps(stop_block)}\n\n"
165
197
  content_block_index += 1
166
- start_block = {'type': 'content_block_start', 'index': content_block_index, 'content_block': {'type': 'thinking', 'thinking': ''}}
167
- logger.debug(f"[Anthropic Stream Event] content_block_start (thinking): {json.dumps(start_block, ensure_ascii=False)}")
198
+ start_block = {
199
+ "type": "content_block_start",
200
+ "index": content_block_index,
201
+ "content_block": {"type": "thinking", "thinking": ""},
202
+ }
203
+ logger.debug(
204
+ f"[Anthropic Stream Event] content_block_start (thinking): {json.dumps(start_block, ensure_ascii=False)}"
205
+ )
168
206
  yield f"event: content_block_start\ndata: {json.dumps(start_block)}\n\n"
169
207
  content_block_started = True
170
- current_block_type = 'thinking'
208
+ current_block_type = "thinking"
171
209
 
172
- delta_block = {'type': 'content_block_delta', 'index': content_block_index, 'delta': {'type': 'thinking_delta', 'thinking': reasoning}}
210
+ delta_block = {
211
+ "type": "content_block_delta",
212
+ "index": content_block_index,
213
+ "delta": {"type": "thinking_delta", "thinking": reasoning},
214
+ }
173
215
  yield f"event: content_block_delta\ndata: {json.dumps(delta_block)}\n\n"
174
216
  continue
175
217
 
176
218
  # Handle content
177
219
  if delta.get("content"):
178
- if not content_block_started or current_block_type != 'text':
220
+ if not content_block_started or current_block_type != "text":
179
221
  # Close previous block if exists
180
222
  if content_block_started:
181
- stop_block = {'type': 'content_block_stop', 'index': content_block_index}
182
- logger.debug(f"[Anthropic Stream Event] content_block_stop ({current_block_type}): {json.dumps(stop_block, ensure_ascii=False)}")
223
+ stop_block = {
224
+ "type": "content_block_stop",
225
+ "index": content_block_index,
226
+ }
227
+ logger.debug(
228
+ f"[Anthropic Stream Event] content_block_stop ({current_block_type}): {json.dumps(stop_block, ensure_ascii=False)}"
229
+ )
183
230
  yield f"event: content_block_stop\ndata: {json.dumps(stop_block)}\n\n"
184
231
  content_block_index += 1
185
- start_block = {'type': 'content_block_start', 'index': content_block_index, 'content_block': {'type': 'text', 'text': ''}}
186
- logger.debug(f"[Anthropic Stream Event] content_block_start (text): {json.dumps(start_block, ensure_ascii=False)}")
232
+ start_block = {
233
+ "type": "content_block_start",
234
+ "index": content_block_index,
235
+ "content_block": {"type": "text", "text": ""},
236
+ }
237
+ logger.debug(
238
+ f"[Anthropic Stream Event] content_block_start (text): {json.dumps(start_block, ensure_ascii=False)}"
239
+ )
187
240
  yield f"event: content_block_start\ndata: {json.dumps(start_block)}\n\n"
188
241
  content_block_started = True
189
- current_block_type = 'text'
242
+ current_block_type = "text"
190
243
 
191
- delta_block = {'type': 'content_block_delta', 'index': content_block_index, 'delta': {'type': 'text_delta', 'text': delta['content']}}
244
+ delta_block = {
245
+ "type": "content_block_delta",
246
+ "index": content_block_index,
247
+ "delta": {"type": "text_delta", "text": delta["content"]},
248
+ }
192
249
  yield f"event: content_block_delta\ndata: {json.dumps(delta_block)}\n\n"
193
250
 
194
251
  # Handle tool calls
195
- if delta.get("tool_calls"):
196
- tool_call = delta["tool_calls"][0]
252
+ tool_calls = delta.get("tool_calls", [])
253
+ if tool_calls:
254
+ tool_call = tool_calls[0]
197
255
 
198
256
  if tool_call.get("id"):
199
257
  if content_block_started:
@@ -201,28 +259,36 @@ async def _stream_response(
201
259
  content_block_started = False
202
260
  content_block_index += 1
203
261
 
204
- func = tool_call.get('function') or {}
262
+ func = tool_call.get("function") or {}
205
263
  yield f"event: content_block_start\ndata: {json.dumps({'type': 'content_block_start', 'index': content_block_index, 'content_block': {'type': 'tool_use', 'id': tool_call['id'], 'name': func.get('name', ''), 'input': {}}})}\n\n"
206
264
  content_block_started = True
207
- current_block_type = 'tool_use'
265
+ current_block_type = "tool_use"
208
266
 
209
- elif (tool_call.get('function') or {}).get("arguments"):
210
- args = (tool_call.get('function') or {}).get("arguments", "")
267
+ elif (tool_call.get("function") or {}).get("arguments"):
268
+ args = (tool_call.get("function") or {}).get("arguments", "")
211
269
  yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': content_block_index, 'delta': {'type': 'input_json_delta', 'partial_json': args}})}\n\n"
212
270
 
213
271
  # Close final content block
214
272
  if content_block_started:
215
- stop_block = {'type': 'content_block_stop', 'index': content_block_index}
216
- logger.debug(f"[Anthropic Stream Event] content_block_stop (final): {json.dumps(stop_block, ensure_ascii=False)}")
273
+ stop_block = {
274
+ "type": "content_block_stop",
275
+ "index": content_block_index,
276
+ }
277
+ logger.debug(
278
+ f"[Anthropic Stream Event] content_block_stop (final): {json.dumps(stop_block, ensure_ascii=False)}"
279
+ )
217
280
  yield f"event: content_block_stop\ndata: {json.dumps(stop_block)}\n\n"
218
281
 
219
282
  # Message stop
220
- stop_event = {'type': 'message_stop'}
221
- logger.debug(f"[Anthropic Stream Event] message_stop: {json.dumps(stop_event, ensure_ascii=False)}")
283
+ stop_event = {"type": "message_stop"}
284
+ logger.debug(
285
+ f"[Anthropic Stream Event] message_stop: {json.dumps(stop_event, ensure_ascii=False)}"
286
+ )
222
287
  yield f"event: message_stop\ndata: {json.dumps(stop_event)}\n\n"
223
288
 
224
289
  except Exception as e:
225
290
  import traceback
291
+
226
292
  error_msg = f"{str(e)}\n{traceback.format_exc()}"
227
293
  logger.error(f"Stream error: {error_msg}")
228
294
  error_event = AnthropicErrorResponse(
@@ -237,17 +303,21 @@ async def _convert_result_to_stream(
237
303
  ) -> AsyncGenerator[str, None]:
238
304
  """Convert a JSONResponse to streaming SSE format."""
239
305
  import time
240
-
306
+
241
307
  body = json.loads(result.body)
242
308
  message_id = body.get("id", f"msg_{int(time.time() * 1000)}")
243
309
  content = body.get("content", [])
244
310
  usage = body.get("usage", {})
245
311
  stop_reason = body.get("stop_reason", "end_turn")
246
-
312
+
247
313
  # Map stop_reason
248
- stop_reason_map = {"end_turn": "stop", "max_tokens": "length", "tool_use": "tool_calls"}
314
+ stop_reason_map = {
315
+ "end_turn": "stop",
316
+ "max_tokens": "length",
317
+ "tool_use": "tool_calls",
318
+ }
249
319
  openai_stop_reason = stop_reason_map.get(stop_reason, "stop")
250
-
320
+
251
321
  # 1. message_start event
252
322
  start_event = {
253
323
  "type": "message_start",
@@ -268,7 +338,7 @@ async def _convert_result_to_stream(
268
338
  },
269
339
  }
270
340
  yield f"event: message_start\ndata: {json.dumps(start_event)}\n\n"
271
-
341
+
272
342
  # 2. Process content blocks
273
343
  for i, block in enumerate(content):
274
344
  block_type = block.get("type")
@@ -305,7 +375,7 @@ async def _convert_result_to_stream(
305
375
  if thinking_text:
306
376
  yield f"event: content_block_delta\ndata: {json.dumps({'type': 'content_block_delta', 'index': i, 'delta': {'type': 'thinking_delta', 'thinking': thinking_text}})}\n\n"
307
377
  yield f"event: content_block_stop\ndata: {json.dumps({'type': 'content_block_stop', 'index': i})}\n\n"
308
-
378
+
309
379
  # 3. message_delta with final usage
310
380
  delta_event = {
311
381
  "type": "message_delta",
@@ -319,7 +389,7 @@ async def _convert_result_to_stream(
319
389
  },
320
390
  }
321
391
  yield f"event: message_delta\ndata: {json.dumps(delta_event)}\n\n"
322
-
392
+
323
393
  # 4. message_stop
324
394
  yield f"event: message_stop\ndata: {json.dumps({'type': 'message_stop'})}\n\n"
325
395
 
@@ -406,12 +476,16 @@ async def _handle_with_server_tools(
406
476
  async with httpx.AsyncClient(timeout=settings.request_timeout) as client:
407
477
  try:
408
478
  # Log full request for debugging
409
- logger.debug(f"Request body: {json.dumps(params, indent=2, default=str)[:3000]}")
410
-
479
+ logger.debug(
480
+ f"Request body: {json.dumps(params, indent=2, default=str)[:3000]}"
481
+ )
482
+
411
483
  response = await client.post(url, headers=headers, json=params)
412
484
 
413
485
  if response.status_code != 200:
414
- logger.error(f"OpenAI API error: {response.status_code} - {response.text}")
486
+ logger.error(
487
+ f"OpenAI API error: {response.status_code} - {response.text}"
488
+ )
415
489
  error_response = AnthropicErrorResponse(
416
490
  error=AnthropicError(type="api_error", message=response.text)
417
491
  )
@@ -421,36 +495,45 @@ async def _handle_with_server_tools(
421
495
  )
422
496
 
423
497
  completion_data = response.json()
424
- logger.debug(f"OpenAI response: {json.dumps(completion_data, indent=2)[:500]}...")
498
+ logger.debug(
499
+ f"OpenAI response: {json.dumps(completion_data, indent=2)[:500]}..."
500
+ )
425
501
  from openai.types.chat import ChatCompletion
502
+
426
503
  completion = ChatCompletion.model_validate(completion_data)
427
504
 
428
505
  # Check for server tool calls
429
506
  server_tool_calls = []
430
507
  other_tool_calls = []
431
-
508
+
432
509
  tool_calls = completion.choices[0].message.tool_calls
433
- logger.info(f"Model returned tool_calls: {len(tool_calls) if tool_calls else 0}")
510
+ logger.info(
511
+ f"Model returned tool_calls: {len(tool_calls) if tool_calls else 0}"
512
+ )
434
513
 
435
514
  if tool_calls:
436
515
  for tc in tool_calls:
437
516
  func_name = tc.function.name if tc.function else ""
438
517
  logger.info(f" Tool call: {func_name}")
439
-
518
+
440
519
  # Generate Anthropic-style ID for server tools
441
- is_server = handler.is_server_tool_call({
442
- "id": tc.id,
443
- "function": {"name": func_name, "arguments": ""},
444
- })
445
-
520
+ is_server = handler.is_server_tool_call(
521
+ {
522
+ "id": tc.id,
523
+ "function": {"name": func_name, "arguments": ""},
524
+ }
525
+ )
526
+
446
527
  # Use Anthropic-style ID for server tools, original ID otherwise
447
528
  tool_id = _generate_server_tool_id() if is_server else tc.id
448
-
529
+
449
530
  tc_dict = {
450
531
  "id": tool_id,
451
532
  "function": {
452
533
  "name": func_name,
453
- "arguments": tc.function.arguments if tc.function else "{}",
534
+ "arguments": tc.function.arguments
535
+ if tc.function
536
+ else "{}",
454
537
  },
455
538
  }
456
539
  logger.info(f" Is server tool: {is_server}, ID: {tool_id}")
@@ -460,19 +543,25 @@ async def _handle_with_server_tools(
460
543
  other_tool_calls.append(tc)
461
544
 
462
545
  # No server tool calls - we're done
463
- logger.info(f"Server tool calls: {len(server_tool_calls)}, Other: {len(other_tool_calls)}")
546
+ logger.info(
547
+ f"Server tool calls: {len(server_tool_calls)}, Other: {len(other_tool_calls)}"
548
+ )
464
549
  if not server_tool_calls:
465
550
  message = convert_openai_to_anthropic(completion, model)
466
551
 
467
552
  if accumulated_content:
468
553
  message_dict = message.model_dump()
469
- message_dict["content"] = accumulated_content + message_dict.get("content", [])
470
-
554
+ message_dict["content"] = (
555
+ accumulated_content + message_dict.get("content", [])
556
+ )
557
+
471
558
  if message_dict.get("usage"):
472
559
  message_dict["usage"]["server_tool_use"] = handler.usage
473
-
560
+
474
561
  # Log full response for debugging
475
- logger.info(f"Response content blocks: {json.dumps(message_dict.get('content', []), ensure_ascii=False)[:1000]}")
562
+ logger.info(
563
+ f"Response content blocks: {json.dumps(message_dict.get('content', []), ensure_ascii=False)[:1000]}"
564
+ )
476
565
  logger.info(f"Response usage: {message_dict.get('usage')}")
477
566
  logger.info(f"Server tool use count: {handler.usage}")
478
567
 
@@ -489,6 +578,7 @@ async def _handle_with_server_tools(
489
578
  tool_class = handler.server_tools.get(func_name)
490
579
  if tool_class:
491
580
  from local_openai2anthropic.server_tools import ToolResult
581
+
492
582
  error_result = ToolResult(
493
583
  success=False,
494
584
  content=[],
@@ -520,14 +610,16 @@ async def _handle_with_server_tools(
520
610
  accumulated_content.extend(content_blocks)
521
611
 
522
612
  # Track for assistant message
523
- assistant_tool_calls.append({
524
- "id": call["id"],
525
- "type": "function",
526
- "function": {
527
- "name": call["function"]["name"],
528
- "arguments": call["function"]["arguments"],
529
- },
530
- })
613
+ assistant_tool_calls.append(
614
+ {
615
+ "id": call["id"],
616
+ "type": "function",
617
+ "function": {
618
+ "name": call["function"]["name"],
619
+ "arguments": call["function"]["arguments"],
620
+ },
621
+ }
622
+ )
531
623
  tool_results.append(tool_result)
532
624
 
533
625
  # Add to messages for next iteration
@@ -538,7 +630,9 @@ async def _handle_with_server_tools(
538
630
 
539
631
  except httpx.TimeoutException:
540
632
  error_response = AnthropicErrorResponse(
541
- error=AnthropicError(type="timeout_error", message="Request timed out")
633
+ error=AnthropicError(
634
+ type="timeout_error", message="Request timed out"
635
+ )
542
636
  )
543
637
  raise HTTPException(
544
638
  status_code=HTTPStatus.GATEWAY_TIMEOUT,
@@ -576,14 +670,18 @@ def _add_tool_results_to_messages(
576
670
  # Add tool results
577
671
  if is_error:
578
672
  for call in tool_calls:
579
- messages.append({
580
- "role": "tool",
581
- "tool_call_id": call["id"],
582
- "content": json.dumps({
583
- "error": "max_uses_exceeded",
584
- "message": "Maximum tool uses exceeded.",
585
- }),
586
- })
673
+ messages.append(
674
+ {
675
+ "role": "tool",
676
+ "tool_call_id": call["id"],
677
+ "content": json.dumps(
678
+ {
679
+ "error": "max_uses_exceeded",
680
+ "message": "Maximum tool uses exceeded.",
681
+ }
682
+ ),
683
+ }
684
+ )
587
685
  elif tool_results:
588
686
  messages.extend(tool_results)
589
687
 
@@ -611,12 +709,16 @@ async def create_message(
611
709
  try:
612
710
  body_bytes = await request.body()
613
711
  body_json = json.loads(body_bytes.decode("utf-8"))
614
- logger.debug(f"[Anthropic Request] {json.dumps(body_json, ensure_ascii=False, indent=2)}")
712
+ logger.debug(
713
+ f"[Anthropic Request] {json.dumps(body_json, ensure_ascii=False, indent=2)}"
714
+ )
615
715
  anthropic_params = body_json
616
716
  except json.JSONDecodeError as e:
617
717
  logger.error(f"Invalid JSON in request body: {e}")
618
718
  error_response = AnthropicErrorResponse(
619
- error=AnthropicError(type="invalid_request_error", message=f"Invalid JSON: {e}")
719
+ error=AnthropicError(
720
+ type="invalid_request_error", message=f"Invalid JSON: {e}"
721
+ )
620
722
  )
621
723
  return JSONResponse(status_code=422, content=error_response.model_dump())
622
724
  except Exception as e:
@@ -629,28 +731,38 @@ async def create_message(
629
731
  # Validate request shape early (avoid making upstream calls for obviously invalid requests)
630
732
  if not isinstance(anthropic_params, dict):
631
733
  error_response = AnthropicErrorResponse(
632
- error=AnthropicError(type="invalid_request_error", message="Request body must be a JSON object")
734
+ error=AnthropicError(
735
+ type="invalid_request_error",
736
+ message="Request body must be a JSON object",
737
+ )
633
738
  )
634
739
  return JSONResponse(status_code=422, content=error_response.model_dump())
635
740
 
636
741
  model_value = anthropic_params.get("model")
637
742
  if not isinstance(model_value, str) or not model_value.strip():
638
743
  error_response = AnthropicErrorResponse(
639
- error=AnthropicError(type="invalid_request_error", message="Model must be a non-empty string")
744
+ error=AnthropicError(
745
+ type="invalid_request_error", message="Model must be a non-empty string"
746
+ )
640
747
  )
641
748
  return JSONResponse(status_code=422, content=error_response.model_dump())
642
749
 
643
750
  messages_value = anthropic_params.get("messages")
644
751
  if not isinstance(messages_value, list) or len(messages_value) == 0:
645
752
  error_response = AnthropicErrorResponse(
646
- error=AnthropicError(type="invalid_request_error", message="Messages must be a non-empty list")
753
+ error=AnthropicError(
754
+ type="invalid_request_error",
755
+ message="Messages must be a non-empty list",
756
+ )
647
757
  )
648
758
  return JSONResponse(status_code=422, content=error_response.model_dump())
649
759
 
650
760
  max_tokens_value = anthropic_params.get("max_tokens")
651
761
  if not isinstance(max_tokens_value, int):
652
762
  error_response = AnthropicErrorResponse(
653
- error=AnthropicError(type="invalid_request_error", message="max_tokens is required")
763
+ error=AnthropicError(
764
+ type="invalid_request_error", message="max_tokens is required"
765
+ )
654
766
  )
655
767
  return JSONResponse(status_code=422, content=error_response.model_dump())
656
768
 
@@ -668,10 +780,12 @@ async def create_message(
668
780
  enabled_server_tools=enabled_server_tools if has_server_tools else None,
669
781
  )
670
782
  openai_params: dict[str, Any] = dict(openai_params_obj) # type: ignore
671
-
783
+
672
784
  # Log converted OpenAI request (remove internal fields)
673
- log_params = {k: v for k, v in openai_params.items() if not k.startswith('_')}
674
- logger.debug(f"[OpenAI Request] {json.dumps(log_params, ensure_ascii=False, indent=2)}")
785
+ log_params = {k: v for k, v in openai_params.items() if not k.startswith("_")}
786
+ logger.debug(
787
+ f"[OpenAI Request] {json.dumps(log_params, ensure_ascii=False, indent=2)}"
788
+ )
675
789
 
676
790
  stream = openai_params.get("stream", False)
677
791
  model = openai_params.get("model", "")
@@ -698,7 +812,7 @@ async def create_message(
698
812
  result = await _handle_with_server_tools(
699
813
  openai_params, url, headers, settings, tool_classes, model
700
814
  )
701
-
815
+
702
816
  # If original request was streaming, convert result to streaming format
703
817
  if stream:
704
818
  return StreamingResponse(
@@ -728,20 +842,27 @@ async def create_message(
728
842
  )
729
843
 
730
844
  openai_completion = response.json()
731
- logger.debug(f"[OpenAI Response] {json.dumps(openai_completion, ensure_ascii=False, indent=2)}")
732
-
845
+ logger.debug(
846
+ f"[OpenAI Response] {json.dumps(openai_completion, ensure_ascii=False, indent=2)}"
847
+ )
848
+
733
849
  from openai.types.chat import ChatCompletion
850
+
734
851
  completion = ChatCompletion.model_validate(openai_completion)
735
852
  anthropic_message = convert_openai_to_anthropic(completion, model)
736
-
853
+
737
854
  anthropic_response = anthropic_message.model_dump()
738
- logger.debug(f"[Anthropic Response] {json.dumps(anthropic_response, ensure_ascii=False, indent=2)}")
855
+ logger.debug(
856
+ f"[Anthropic Response] {json.dumps(anthropic_response, ensure_ascii=False, indent=2)}"
857
+ )
739
858
 
740
859
  return JSONResponse(content=anthropic_response)
741
860
 
742
861
  except httpx.TimeoutException:
743
862
  error_response = AnthropicErrorResponse(
744
- error=AnthropicError(type="timeout_error", message="Request timed out")
863
+ error=AnthropicError(
864
+ type="timeout_error", message="Request timed out"
865
+ )
745
866
  )
746
867
  raise HTTPException(
747
868
  status_code=HTTPStatus.GATEWAY_TIMEOUT,
@@ -798,10 +919,14 @@ async def count_tokens(
798
919
  try:
799
920
  body_bytes = await request.body()
800
921
  body_json = json.loads(body_bytes.decode("utf-8"))
801
- logger.debug(f"[Count Tokens Request] {json.dumps(body_json, ensure_ascii=False, indent=2)}")
922
+ logger.debug(
923
+ f"[Count Tokens Request] {json.dumps(body_json, ensure_ascii=False, indent=2)}"
924
+ )
802
925
  except json.JSONDecodeError as e:
803
926
  error_response = AnthropicErrorResponse(
804
- error=AnthropicError(type="invalid_request_error", message=f"Invalid JSON: {e}")
927
+ error=AnthropicError(
928
+ type="invalid_request_error", message=f"Invalid JSON: {e}"
929
+ )
805
930
  )
806
931
  return JSONResponse(status_code=422, content=error_response.model_dump())
807
932
  except Exception as e:
@@ -813,14 +938,19 @@ async def count_tokens(
813
938
  # Validate required fields
814
939
  if not isinstance(body_json, dict):
815
940
  error_response = AnthropicErrorResponse(
816
- error=AnthropicError(type="invalid_request_error", message="Request body must be a JSON object")
941
+ error=AnthropicError(
942
+ type="invalid_request_error",
943
+ message="Request body must be a JSON object",
944
+ )
817
945
  )
818
946
  return JSONResponse(status_code=422, content=error_response.model_dump())
819
947
 
820
948
  messages = body_json.get("messages", [])
821
949
  if not isinstance(messages, list):
822
950
  error_response = AnthropicErrorResponse(
823
- error=AnthropicError(type="invalid_request_error", message="messages must be a list")
951
+ error=AnthropicError(
952
+ type="invalid_request_error", message="messages must be a list"
953
+ )
824
954
  )
825
955
  return JSONResponse(status_code=422, content=error_response.model_dump())
826
956
 
@@ -831,13 +961,13 @@ async def count_tokens(
831
961
  try:
832
962
  # Use tiktoken for token counting
833
963
  import tiktoken
834
-
964
+
835
965
  # Map model names to tiktoken encoding
836
966
  # Claude models don't have direct tiktoken encodings, so we use cl100k_base as approximation
837
967
  encoding = tiktoken.get_encoding("cl100k_base")
838
-
968
+
839
969
  total_tokens = 0
840
-
970
+
841
971
  # Count system prompt tokens if present
842
972
  if system:
843
973
  if isinstance(system, str):
@@ -846,7 +976,7 @@ async def count_tokens(
846
976
  for block in system:
847
977
  if isinstance(block, dict) and block.get("type") == "text":
848
978
  total_tokens += len(encoding.encode(block.get("text", "")))
849
-
979
+
850
980
  # Count message tokens
851
981
  for msg in messages:
852
982
  content = msg.get("content", "")
@@ -861,24 +991,24 @@ async def count_tokens(
861
991
  # Images are typically counted as a fixed number of tokens
862
992
  # This is an approximation
863
993
  total_tokens += 85 # Standard approximation for images
864
-
994
+
865
995
  # Count tool definitions tokens
866
996
  if tools:
867
997
  for tool in tools:
868
998
  tool_def = tool if isinstance(tool, dict) else tool.model_dump()
869
999
  # Rough approximation for tool definitions
870
1000
  total_tokens += len(encoding.encode(json.dumps(tool_def)))
871
-
1001
+
872
1002
  logger.debug(f"[Count Tokens Response] input_tokens: {total_tokens}")
873
-
874
- return JSONResponse(content={
875
- "input_tokens": total_tokens
876
- })
877
-
1003
+
1004
+ return JSONResponse(content={"input_tokens": total_tokens})
1005
+
878
1006
  except Exception as e:
879
1007
  logger.error(f"Token counting error: {e}")
880
1008
  error_response = AnthropicErrorResponse(
881
- error=AnthropicError(type="internal_error", message=f"Failed to count tokens: {str(e)}")
1009
+ error=AnthropicError(
1010
+ type="internal_error", message=f"Failed to count tokens: {str(e)}"
1011
+ )
882
1012
  )
883
1013
  return JSONResponse(status_code=500, content=error_response.model_dump())
884
1014
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: local-openai2anthropic
3
- Version: 0.2.7
3
+ Version: 0.2.8
4
4
  Summary: A lightweight proxy server that converts Anthropic Messages API to OpenAI API
5
5
  Project-URL: Homepage, https://github.com/dongfangzan/local-openai2anthropic
6
6
  Project-URL: Repository, https://github.com/dongfangzan/local-openai2anthropic
@@ -1,19 +1,19 @@
1
1
  local_openai2anthropic/__init__.py,sha256=IEn8YcQGsaEaCr04s3hS2AcgsIt5NU5Qa2C8Uwz7RdY,1059
2
2
  local_openai2anthropic/__main__.py,sha256=K21u5u7FN8-DbO67TT_XDF0neGqJeFrVNkteRauCRQk,179
3
3
  local_openai2anthropic/config.py,sha256=3M5ZAz3uYNMGxaottEBseEOZF-GnVaGuioH9Hpmgnd8,1918
4
- local_openai2anthropic/converter.py,sha256=d-qYwtv6FIbpKSRsZN4jhnKM4D4k52la-_bpEYPTAS0,15790
4
+ local_openai2anthropic/converter.py,sha256=-cxPlZIPcey4LFIb7250YLlhLntN2uuh1YUpWGCsmfQ,15969
5
5
  local_openai2anthropic/daemon.py,sha256=pZnRojGFcuIpR8yLDNjV-b0LJRBVhgRAa-dKeRRse44,10017
6
6
  local_openai2anthropic/daemon_runner.py,sha256=rguOH0PgpbjqNsKYei0uCQX8JQOQ1wmtQH1CtW95Dbw,3274
7
7
  local_openai2anthropic/main.py,sha256=FK5JBBpzB_T44y3N16lPl1hK4ht4LEQqRKzVmkIjIoo,9866
8
8
  local_openai2anthropic/openai_types.py,sha256=jFdCvLwtXYoo5gGRqOhbHQcVaxcsxNnCP_yFPIv7rG4,3823
9
9
  local_openai2anthropic/protocol.py,sha256=vUEgxtRPFll6jEtLc4DyxTLCBjrWIEScZXhEqe4uibk,5185
10
- local_openai2anthropic/router.py,sha256=imzvgduneiniwHroTgeT9d8q4iF5GAuptaVP38sakUg,40226
10
+ local_openai2anthropic/router.py,sha256=SCmwXSh02E8QX_oNCSXC5JvMDDI8Zfso0YBnPIykBss,42853
11
11
  local_openai2anthropic/tavily_client.py,sha256=QsBhnyF8BFWPAxB4XtWCCpHCquNL5SW93-zjTTi4Meg,3774
12
12
  local_openai2anthropic/server_tools/__init__.py,sha256=QlJfjEta-HOCtLe7NaY_fpbEKv-ZpInjAnfmSqE9tbk,615
13
13
  local_openai2anthropic/server_tools/base.py,sha256=pNFsv-jSgxVrkY004AHAcYMNZgVSO8ZOeCzQBUtQ3vU,5633
14
14
  local_openai2anthropic/server_tools/web_search.py,sha256=1C7lX_cm-tMaN3MsCjinEZYPJc_Hj4yAxYay9h8Zbvs,6543
15
- local_openai2anthropic-0.2.7.dist-info/METADATA,sha256=eA34CtgLACHsE4gf4Scuj7yU5IBg_Ys26x8nMnCd_eM,11240
16
- local_openai2anthropic-0.2.7.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
17
- local_openai2anthropic-0.2.7.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
18
- local_openai2anthropic-0.2.7.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
19
- local_openai2anthropic-0.2.7.dist-info/RECORD,,
15
+ local_openai2anthropic-0.2.8.dist-info/METADATA,sha256=VfHnmWbI52pgbH9kzjimeUJ3r0XVwvdMnTodX8-3nH4,11240
16
+ local_openai2anthropic-0.2.8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
17
+ local_openai2anthropic-0.2.8.dist-info/entry_points.txt,sha256=hdc9tSJUNxyNLXcTYye5SuD2K0bEQhxBhGnWTFup6ZM,116
18
+ local_openai2anthropic-0.2.8.dist-info/licenses/LICENSE,sha256=X3_kZy3lJvd_xp8IeyUcIAO2Y367MXZc6aaRx8BYR_s,11369
19
+ local_openai2anthropic-0.2.8.dist-info/RECORD,,