ccproxy-api 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. ccproxy/__init__.py +4 -0
  2. ccproxy/__main__.py +7 -0
  3. ccproxy/_version.py +21 -0
  4. ccproxy/adapters/__init__.py +11 -0
  5. ccproxy/adapters/base.py +80 -0
  6. ccproxy/adapters/openai/__init__.py +43 -0
  7. ccproxy/adapters/openai/adapter.py +915 -0
  8. ccproxy/adapters/openai/models.py +412 -0
  9. ccproxy/adapters/openai/streaming.py +449 -0
  10. ccproxy/api/__init__.py +28 -0
  11. ccproxy/api/app.py +225 -0
  12. ccproxy/api/dependencies.py +140 -0
  13. ccproxy/api/middleware/__init__.py +11 -0
  14. ccproxy/api/middleware/auth.py +0 -0
  15. ccproxy/api/middleware/cors.py +55 -0
  16. ccproxy/api/middleware/errors.py +703 -0
  17. ccproxy/api/middleware/headers.py +51 -0
  18. ccproxy/api/middleware/logging.py +175 -0
  19. ccproxy/api/middleware/request_id.py +69 -0
  20. ccproxy/api/middleware/server_header.py +62 -0
  21. ccproxy/api/responses.py +84 -0
  22. ccproxy/api/routes/__init__.py +16 -0
  23. ccproxy/api/routes/claude.py +181 -0
  24. ccproxy/api/routes/health.py +489 -0
  25. ccproxy/api/routes/metrics.py +1033 -0
  26. ccproxy/api/routes/proxy.py +238 -0
  27. ccproxy/auth/__init__.py +75 -0
  28. ccproxy/auth/bearer.py +68 -0
  29. ccproxy/auth/credentials_adapter.py +93 -0
  30. ccproxy/auth/dependencies.py +229 -0
  31. ccproxy/auth/exceptions.py +79 -0
  32. ccproxy/auth/manager.py +102 -0
  33. ccproxy/auth/models.py +118 -0
  34. ccproxy/auth/oauth/__init__.py +26 -0
  35. ccproxy/auth/oauth/models.py +49 -0
  36. ccproxy/auth/oauth/routes.py +396 -0
  37. ccproxy/auth/oauth/storage.py +0 -0
  38. ccproxy/auth/storage/__init__.py +12 -0
  39. ccproxy/auth/storage/base.py +57 -0
  40. ccproxy/auth/storage/json_file.py +159 -0
  41. ccproxy/auth/storage/keyring.py +192 -0
  42. ccproxy/claude_sdk/__init__.py +20 -0
  43. ccproxy/claude_sdk/client.py +169 -0
  44. ccproxy/claude_sdk/converter.py +331 -0
  45. ccproxy/claude_sdk/options.py +120 -0
  46. ccproxy/cli/__init__.py +14 -0
  47. ccproxy/cli/commands/__init__.py +8 -0
  48. ccproxy/cli/commands/auth.py +553 -0
  49. ccproxy/cli/commands/config/__init__.py +14 -0
  50. ccproxy/cli/commands/config/commands.py +766 -0
  51. ccproxy/cli/commands/config/schema_commands.py +119 -0
  52. ccproxy/cli/commands/serve.py +630 -0
  53. ccproxy/cli/docker/__init__.py +34 -0
  54. ccproxy/cli/docker/adapter_factory.py +157 -0
  55. ccproxy/cli/docker/params.py +278 -0
  56. ccproxy/cli/helpers.py +144 -0
  57. ccproxy/cli/main.py +193 -0
  58. ccproxy/cli/options/__init__.py +14 -0
  59. ccproxy/cli/options/claude_options.py +216 -0
  60. ccproxy/cli/options/core_options.py +40 -0
  61. ccproxy/cli/options/security_options.py +48 -0
  62. ccproxy/cli/options/server_options.py +117 -0
  63. ccproxy/config/__init__.py +40 -0
  64. ccproxy/config/auth.py +154 -0
  65. ccproxy/config/claude.py +124 -0
  66. ccproxy/config/cors.py +79 -0
  67. ccproxy/config/discovery.py +87 -0
  68. ccproxy/config/docker_settings.py +265 -0
  69. ccproxy/config/loader.py +108 -0
  70. ccproxy/config/observability.py +158 -0
  71. ccproxy/config/pricing.py +88 -0
  72. ccproxy/config/reverse_proxy.py +31 -0
  73. ccproxy/config/scheduler.py +89 -0
  74. ccproxy/config/security.py +14 -0
  75. ccproxy/config/server.py +81 -0
  76. ccproxy/config/settings.py +534 -0
  77. ccproxy/config/validators.py +231 -0
  78. ccproxy/core/__init__.py +274 -0
  79. ccproxy/core/async_utils.py +675 -0
  80. ccproxy/core/constants.py +97 -0
  81. ccproxy/core/errors.py +256 -0
  82. ccproxy/core/http.py +328 -0
  83. ccproxy/core/http_transformers.py +428 -0
  84. ccproxy/core/interfaces.py +247 -0
  85. ccproxy/core/logging.py +189 -0
  86. ccproxy/core/middleware.py +114 -0
  87. ccproxy/core/proxy.py +143 -0
  88. ccproxy/core/system.py +38 -0
  89. ccproxy/core/transformers.py +259 -0
  90. ccproxy/core/types.py +129 -0
  91. ccproxy/core/validators.py +288 -0
  92. ccproxy/docker/__init__.py +67 -0
  93. ccproxy/docker/adapter.py +588 -0
  94. ccproxy/docker/docker_path.py +207 -0
  95. ccproxy/docker/middleware.py +103 -0
  96. ccproxy/docker/models.py +228 -0
  97. ccproxy/docker/protocol.py +192 -0
  98. ccproxy/docker/stream_process.py +264 -0
  99. ccproxy/docker/validators.py +173 -0
  100. ccproxy/models/__init__.py +123 -0
  101. ccproxy/models/errors.py +42 -0
  102. ccproxy/models/messages.py +243 -0
  103. ccproxy/models/requests.py +85 -0
  104. ccproxy/models/responses.py +227 -0
  105. ccproxy/models/types.py +102 -0
  106. ccproxy/observability/__init__.py +51 -0
  107. ccproxy/observability/access_logger.py +400 -0
  108. ccproxy/observability/context.py +447 -0
  109. ccproxy/observability/metrics.py +539 -0
  110. ccproxy/observability/pushgateway.py +366 -0
  111. ccproxy/observability/sse_events.py +303 -0
  112. ccproxy/observability/stats_printer.py +755 -0
  113. ccproxy/observability/storage/__init__.py +1 -0
  114. ccproxy/observability/storage/duckdb_simple.py +665 -0
  115. ccproxy/observability/storage/models.py +55 -0
  116. ccproxy/pricing/__init__.py +19 -0
  117. ccproxy/pricing/cache.py +212 -0
  118. ccproxy/pricing/loader.py +267 -0
  119. ccproxy/pricing/models.py +106 -0
  120. ccproxy/pricing/updater.py +309 -0
  121. ccproxy/scheduler/__init__.py +39 -0
  122. ccproxy/scheduler/core.py +335 -0
  123. ccproxy/scheduler/exceptions.py +34 -0
  124. ccproxy/scheduler/manager.py +186 -0
  125. ccproxy/scheduler/registry.py +150 -0
  126. ccproxy/scheduler/tasks.py +484 -0
  127. ccproxy/services/__init__.py +10 -0
  128. ccproxy/services/claude_sdk_service.py +614 -0
  129. ccproxy/services/credentials/__init__.py +55 -0
  130. ccproxy/services/credentials/config.py +105 -0
  131. ccproxy/services/credentials/manager.py +562 -0
  132. ccproxy/services/credentials/oauth_client.py +482 -0
  133. ccproxy/services/proxy_service.py +1536 -0
  134. ccproxy/static/.keep +0 -0
  135. ccproxy/testing/__init__.py +34 -0
  136. ccproxy/testing/config.py +148 -0
  137. ccproxy/testing/content_generation.py +197 -0
  138. ccproxy/testing/mock_responses.py +262 -0
  139. ccproxy/testing/response_handlers.py +161 -0
  140. ccproxy/testing/scenarios.py +241 -0
  141. ccproxy/utils/__init__.py +6 -0
  142. ccproxy/utils/cost_calculator.py +210 -0
  143. ccproxy/utils/streaming_metrics.py +199 -0
  144. ccproxy_api-0.1.0.dist-info/METADATA +253 -0
  145. ccproxy_api-0.1.0.dist-info/RECORD +148 -0
  146. ccproxy_api-0.1.0.dist-info/WHEEL +4 -0
  147. ccproxy_api-0.1.0.dist-info/entry_points.txt +2 -0
  148. ccproxy_api-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,915 @@
1
+ """OpenAI API adapter implementation.
2
+
3
+ This module provides the OpenAI adapter that implements the APIAdapter interface
4
+ for converting between OpenAI and Anthropic API formats.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ import re
11
+ import time
12
+ import uuid
13
+ from collections.abc import AsyncIterator
14
+ from inspect import signature
15
+ from typing import Any, Literal, cast
16
+
17
+ import structlog
18
+
19
+ from ccproxy.core.interfaces import APIAdapter
20
+
21
+ from .models import (
22
+ OpenAIChatCompletionRequest,
23
+ OpenAIChatCompletionResponse,
24
+ OpenAIChoice,
25
+ OpenAIResponseMessage,
26
+ OpenAIUsage,
27
+ format_openai_tool_call,
28
+ generate_openai_response_id,
29
+ generate_openai_system_fingerprint,
30
+ )
31
+ from .streaming import OpenAIStreamProcessor
32
+
33
+
34
+ logger = structlog.get_logger(__name__)
35
+
36
+
37
+ # Model mapping from OpenAI to Claude
38
+ OPENAI_TO_CLAUDE_MODEL_MAPPING: dict[str, str] = {
39
+ # GPT-4 models -> Claude 3.5 Sonnet (most comparable)
40
+ "gpt-4": "claude-3-5-sonnet-20241022",
41
+ "gpt-4-turbo": "claude-3-5-sonnet-20241022",
42
+ "gpt-4-turbo-preview": "claude-3-5-sonnet-20241022",
43
+ "gpt-4-1106-preview": "claude-3-5-sonnet-20241022",
44
+ "gpt-4-0125-preview": "claude-3-5-sonnet-20241022",
45
+ "gpt-4-turbo-2024-04-09": "claude-3-5-sonnet-20241022",
46
+ "gpt-4o": "claude-3-7-sonnet-20250219",
47
+ "gpt-4o-2024-05-13": "claude-3-7-sonnet-20250219",
48
+ "gpt-4o-2024-08-06": "claude-3-7-sonnet-20250219",
49
+ "gpt-4o-2024-11-20": "claude-3-7-sonnet-20250219",
50
+ "gpt-4o-mini": "claude-3-5-haiku-latest",
51
+ "gpt-4o-mini-2024-07-18": "claude-3-5-haiku-latest",
52
+ # o1 models -> Claude models that support thinking
53
+ "o1": "claude-opus-4-20250514",
54
+ "o1-preview": "claude-opus-4-20250514",
55
+ "o1-mini": "claude-sonnet-4-20250514",
56
+ # o3 models -> Claude Opus 4
57
+ "o3-mini": "claude-opus-4-20250514",
58
+ # GPT-3.5 models -> Claude 3.5 Haiku (faster, cheaper)
59
+ "gpt-3.5-turbo": "claude-3-5-haiku-20241022",
60
+ "gpt-3.5-turbo-16k": "claude-3-5-haiku-20241022",
61
+ "gpt-3.5-turbo-1106": "claude-3-5-haiku-20241022",
62
+ "gpt-3.5-turbo-0125": "claude-3-5-haiku-20241022",
63
+ # Generic fallback
64
+ "text-davinci-003": "claude-3-5-sonnet-20241022",
65
+ "text-davinci-002": "claude-3-5-sonnet-20241022",
66
+ }
67
+
68
+
69
+ def map_openai_model_to_claude(openai_model: str) -> str:
70
+ """Map OpenAI model name to Claude model name.
71
+
72
+ Args:
73
+ openai_model: OpenAI model identifier
74
+
75
+ Returns:
76
+ Claude model identifier
77
+ """
78
+ # Direct mapping first
79
+ claude_model = OPENAI_TO_CLAUDE_MODEL_MAPPING.get(openai_model)
80
+ if claude_model:
81
+ return claude_model
82
+
83
+ # Pattern matching for versioned models
84
+ if openai_model.startswith("gpt-4o-mini"):
85
+ return "claude-3-5-haiku-latest"
86
+ elif openai_model.startswith("gpt-4o") or openai_model.startswith("gpt-4"):
87
+ return "claude-3-7-sonnet-20250219"
88
+ elif openai_model.startswith("gpt-3.5"):
89
+ return "claude-3-5-haiku-latest"
90
+ elif openai_model.startswith("o1"):
91
+ return "claude-sonnet-4-20250514"
92
+ elif openai_model.startswith("o3"):
93
+ return "claude-opus-4-20250514"
94
+ elif openai_model.startswith("gpt"):
95
+ return "claude-sonnet-4-20250514"
96
+
97
+ # If it's already a Claude model, pass through unchanged
98
+ if openai_model.startswith("claude-"):
99
+ return openai_model
100
+
101
+ # For unknown models, pass through unchanged we may change
102
+ # this to a default model in the future
103
+ return openai_model
104
+
105
+
106
+ class OpenAIAdapter(APIAdapter):
107
+ """OpenAI API adapter for converting between OpenAI and Anthropic formats."""
108
+
109
+ def __init__(self) -> None:
110
+ """Initialize the OpenAI adapter."""
111
+ pass
112
+
113
+ def adapt_request(self, request: dict[str, Any]) -> dict[str, Any]:
114
+ """Convert OpenAI request format to Anthropic format.
115
+
116
+ Args:
117
+ request: OpenAI format request
118
+
119
+ Returns:
120
+ Anthropic format request
121
+
122
+ Raises:
123
+ ValueError: If the request format is invalid or unsupported
124
+ """
125
+ try:
126
+ # Parse OpenAI request
127
+ openai_req = OpenAIChatCompletionRequest(**request)
128
+ except Exception as e:
129
+ raise ValueError(f"Invalid OpenAI request format: {e}") from e
130
+
131
+ # Map OpenAI model to Claude model
132
+ model = map_openai_model_to_claude(openai_req.model)
133
+
134
+ # Convert messages
135
+ messages, system_prompt = self._convert_messages_to_anthropic(
136
+ openai_req.messages
137
+ )
138
+
139
+ # Build Anthropic request
140
+ anthropic_request = {
141
+ "model": model,
142
+ "messages": messages,
143
+ "max_tokens": openai_req.max_tokens or 4096,
144
+ }
145
+
146
+ # Add system prompt if present
147
+ if system_prompt:
148
+ anthropic_request["system"] = system_prompt
149
+
150
+ # Add optional parameters
151
+ if openai_req.temperature is not None:
152
+ anthropic_request["temperature"] = openai_req.temperature
153
+
154
+ if openai_req.top_p is not None:
155
+ anthropic_request["top_p"] = openai_req.top_p
156
+
157
+ if openai_req.stream is not None:
158
+ anthropic_request["stream"] = openai_req.stream
159
+
160
+ if openai_req.stop is not None:
161
+ if isinstance(openai_req.stop, str):
162
+ anthropic_request["stop_sequences"] = [openai_req.stop]
163
+ else:
164
+ anthropic_request["stop_sequences"] = openai_req.stop
165
+
166
+ # Handle metadata - combine user field and metadata
167
+ metadata = {}
168
+ if openai_req.user:
169
+ metadata["user_id"] = openai_req.user
170
+ if openai_req.metadata:
171
+ metadata.update(openai_req.metadata)
172
+ if metadata:
173
+ anthropic_request["metadata"] = metadata
174
+
175
+ # Handle response format - add to system prompt for JSON mode
176
+ if openai_req.response_format:
177
+ format_type = (
178
+ openai_req.response_format.type if openai_req.response_format else None
179
+ )
180
+
181
+ if format_type == "json_object" and system_prompt is not None:
182
+ system_prompt += "\nYou must respond with valid JSON only."
183
+ anthropic_request["system"] = system_prompt
184
+ elif format_type == "json_schema" and system_prompt is not None:
185
+ # For JSON schema, we can add more specific instructions
186
+ if openai_req.response_format and hasattr(
187
+ openai_req.response_format, "json_schema"
188
+ ):
189
+ system_prompt += f"\nYou must respond with valid JSON that conforms to this schema: {openai_req.response_format.json_schema}"
190
+ anthropic_request["system"] = system_prompt
191
+
192
+ # Handle reasoning_effort (o1 models) -> thinking configuration
193
+ # Automatically enable thinking for o1 models even without explicit reasoning_effort
194
+ if (
195
+ openai_req.reasoning_effort
196
+ or openai_req.model.startswith("o1")
197
+ or openai_req.model.startswith("o3")
198
+ ):
199
+ # Map reasoning effort to thinking tokens
200
+ thinking_tokens_map = {
201
+ "low": 1000,
202
+ "medium": 5000,
203
+ "high": 10000,
204
+ }
205
+
206
+ # Default thinking tokens based on model if reasoning_effort not specified
207
+ default_thinking_tokens = 5000 # medium by default
208
+ if openai_req.model.startswith("o3"):
209
+ default_thinking_tokens = 10000 # high for o3 models
210
+ elif openai_req.model == "o1-mini":
211
+ default_thinking_tokens = 3000 # lower for mini model
212
+
213
+ thinking_tokens = (
214
+ thinking_tokens_map.get(
215
+ openai_req.reasoning_effort, default_thinking_tokens
216
+ )
217
+ if openai_req.reasoning_effort
218
+ else default_thinking_tokens
219
+ )
220
+
221
+ anthropic_request["thinking"] = {
222
+ "type": "enabled",
223
+ "budget_tokens": thinking_tokens,
224
+ }
225
+
226
+ # Ensure max_tokens is greater than budget_tokens
227
+ current_max_tokens = cast(int, anthropic_request.get("max_tokens", 4096))
228
+ if current_max_tokens <= thinking_tokens:
229
+ # Set max_tokens to be 2x thinking tokens + some buffer for response
230
+ anthropic_request["max_tokens"] = thinking_tokens + max(
231
+ thinking_tokens, 4096
232
+ )
233
+ logger.debug(
234
+ "max_tokens_adjusted_for_thinking",
235
+ original_max_tokens=current_max_tokens,
236
+ thinking_tokens=thinking_tokens,
237
+ new_max_tokens=anthropic_request["max_tokens"],
238
+ operation="adapt_request",
239
+ )
240
+
241
+ # When thinking is enabled, temperature must be 1.0
242
+ if (
243
+ anthropic_request.get("temperature") is not None
244
+ and anthropic_request["temperature"] != 1.0
245
+ ):
246
+ logger.debug(
247
+ "temperature_adjusted_for_thinking",
248
+ original_temperature=anthropic_request["temperature"],
249
+ new_temperature=1.0,
250
+ operation="adapt_request",
251
+ )
252
+ anthropic_request["temperature"] = 1.0
253
+ elif "temperature" not in anthropic_request:
254
+ # Set default temperature to 1.0 for thinking mode
255
+ anthropic_request["temperature"] = 1.0
256
+
257
+ logger.debug(
258
+ "thinking_enabled",
259
+ reasoning_effort=openai_req.reasoning_effort,
260
+ model=openai_req.model,
261
+ thinking_tokens=thinking_tokens,
262
+ temperature=anthropic_request["temperature"],
263
+ operation="adapt_request",
264
+ )
265
+
266
+ # Note: seed, logprobs, top_logprobs, and store don't have direct Anthropic equivalents
267
+ if openai_req.seed is not None:
268
+ logger.debug(
269
+ "unsupported_parameter_ignored",
270
+ parameter="seed",
271
+ value=openai_req.seed,
272
+ operation="adapt_request",
273
+ )
274
+ if openai_req.logprobs or openai_req.top_logprobs:
275
+ logger.debug(
276
+ "unsupported_parameters_ignored",
277
+ parameters=["logprobs", "top_logprobs"],
278
+ logprobs=openai_req.logprobs,
279
+ top_logprobs=openai_req.top_logprobs,
280
+ operation="adapt_request",
281
+ )
282
+ if openai_req.store:
283
+ logger.debug(
284
+ "unsupported_parameter_ignored",
285
+ parameter="store",
286
+ value=openai_req.store,
287
+ operation="adapt_request",
288
+ )
289
+
290
+ # Handle tools/functions
291
+ if openai_req.tools:
292
+ anthropic_request["tools"] = self._convert_tools_to_anthropic(
293
+ openai_req.tools
294
+ )
295
+ elif openai_req.functions:
296
+ # Convert deprecated functions to tools
297
+ anthropic_request["tools"] = self._convert_functions_to_anthropic(
298
+ openai_req.functions
299
+ )
300
+
301
+ if openai_req.tool_choice:
302
+ # Convert tool choice - can be string or OpenAIToolChoice object
303
+ if isinstance(openai_req.tool_choice, str):
304
+ anthropic_request["tool_choice"] = (
305
+ self._convert_tool_choice_to_anthropic(openai_req.tool_choice)
306
+ )
307
+ else:
308
+ # Convert OpenAIToolChoice object to dict
309
+ tool_choice_dict = {
310
+ "type": openai_req.tool_choice.type,
311
+ "function": openai_req.tool_choice.function,
312
+ }
313
+ anthropic_request["tool_choice"] = (
314
+ self._convert_tool_choice_to_anthropic(tool_choice_dict)
315
+ )
316
+ elif openai_req.function_call:
317
+ # Convert deprecated function_call to tool_choice
318
+ anthropic_request["tool_choice"] = self._convert_function_call_to_anthropic(
319
+ openai_req.function_call
320
+ )
321
+
322
+ logger.debug(
323
+ "format_conversion_completed",
324
+ from_format="openai",
325
+ to_format="anthropic",
326
+ original_model=openai_req.model,
327
+ anthropic_model=anthropic_request.get("model"),
328
+ has_tools=bool(anthropic_request.get("tools")),
329
+ has_system=bool(anthropic_request.get("system")),
330
+ message_count=len(cast(list[Any], anthropic_request["messages"])),
331
+ operation="adapt_request",
332
+ )
333
+ return anthropic_request
334
+
335
+ def adapt_response(self, response: dict[str, Any]) -> dict[str, Any]:
336
+ """Convert Anthropic response format to OpenAI format.
337
+
338
+ Args:
339
+ response: Anthropic format response
340
+
341
+ Returns:
342
+ OpenAI format response
343
+
344
+ Raises:
345
+ ValueError: If the response format is invalid or unsupported
346
+ """
347
+ try:
348
+ # Extract original model from response metadata if available
349
+ original_model = response.get("model", "gpt-4")
350
+
351
+ # Generate response ID
352
+ request_id = generate_openai_response_id()
353
+
354
+ # Convert content
355
+ content = ""
356
+ tool_calls = []
357
+
358
+ if "content" in response and response["content"]:
359
+ for block in response["content"]:
360
+ if block.get("type") == "text":
361
+ content += block.get("text", "")
362
+ elif block.get("type") == "thinking":
363
+ # Handle thinking blocks - we can include them with a marker
364
+ thinking_text = block.get("thinking", "")
365
+ signature = block.get("signature")
366
+ if thinking_text:
367
+ content += f'<thinking signature="{signature}">{thinking_text}</thinking>'
368
+ elif block.get("type") == "tool_use":
369
+ tool_calls.append(format_openai_tool_call(block))
370
+ else:
371
+ logger.warning(
372
+ "unsupported_content_block_type", type=block.get("type")
373
+ )
374
+
375
+ # Create OpenAI message
376
+ # When there are tool calls but no content, use empty string instead of None
377
+ # Otherwise, if content is empty string, convert to None
378
+ final_content: str | None = content
379
+ if tool_calls and not content:
380
+ final_content = ""
381
+ elif content == "":
382
+ final_content = None
383
+
384
+ message = OpenAIResponseMessage(
385
+ role="assistant",
386
+ content=final_content,
387
+ tool_calls=tool_calls if tool_calls else None,
388
+ )
389
+
390
+ # Map stop reason
391
+ finish_reason = self._convert_stop_reason_to_openai(
392
+ response.get("stop_reason")
393
+ )
394
+
395
+ # Ensure finish_reason is a valid literal type
396
+ if finish_reason not in ["stop", "length", "tool_calls", "content_filter"]:
397
+ finish_reason = "stop"
398
+
399
+ # Cast to proper literal type
400
+ valid_finish_reason = cast(
401
+ Literal["stop", "length", "tool_calls", "content_filter"], finish_reason
402
+ )
403
+
404
+ # Create choice
405
+ choice = OpenAIChoice(
406
+ index=0,
407
+ message=message,
408
+ finish_reason=valid_finish_reason,
409
+ logprobs=None, # Anthropic doesn't support logprobs
410
+ )
411
+
412
+ # Create usage
413
+ usage_info = response.get("usage", {})
414
+ usage = OpenAIUsage(
415
+ prompt_tokens=usage_info.get("input_tokens", 0),
416
+ completion_tokens=usage_info.get("output_tokens", 0),
417
+ total_tokens=usage_info.get("input_tokens", 0)
418
+ + usage_info.get("output_tokens", 0),
419
+ )
420
+
421
+ # Create OpenAI response
422
+ openai_response = OpenAIChatCompletionResponse(
423
+ id=request_id,
424
+ object="chat.completion",
425
+ created=int(time.time()),
426
+ model=original_model,
427
+ choices=[choice],
428
+ usage=usage,
429
+ system_fingerprint=generate_openai_system_fingerprint(),
430
+ )
431
+
432
+ logger.debug(
433
+ "format_conversion_completed",
434
+ from_format="anthropic",
435
+ to_format="openai",
436
+ response_id=request_id,
437
+ original_model=original_model,
438
+ finish_reason=valid_finish_reason,
439
+ content_length=len(content) if content else 0,
440
+ tool_calls_count=len(tool_calls),
441
+ input_tokens=usage_info.get("input_tokens", 0),
442
+ output_tokens=usage_info.get("output_tokens", 0),
443
+ operation="adapt_response",
444
+ choice=choice,
445
+ )
446
+ return openai_response.model_dump()
447
+
448
+ except Exception as e:
449
+ raise ValueError(f"Invalid Anthropic response format: {e}") from e
450
+
451
+ async def adapt_stream(
452
+ self, stream: AsyncIterator[dict[str, Any]]
453
+ ) -> AsyncIterator[dict[str, Any]]:
454
+ """Convert Anthropic streaming response to OpenAI streaming format.
455
+
456
+ Args:
457
+ stream: Anthropic streaming response
458
+
459
+ Yields:
460
+ OpenAI format streaming chunks
461
+
462
+ Raises:
463
+ ValueError: If the stream format is invalid or unsupported
464
+ """
465
+ # Create stream processor
466
+ processor = OpenAIStreamProcessor(
467
+ enable_usage=True,
468
+ enable_tool_calls=True,
469
+ enable_text_chunking=False, # Keep text as-is for compatibility
470
+ )
471
+
472
+ try:
473
+ # Process the stream and parse SSE format back to dict objects
474
+ async for sse_chunk in processor.process_stream(stream):
475
+ if sse_chunk.startswith("data: "):
476
+ data_str = sse_chunk[6:].strip()
477
+ if data_str and data_str != "[DONE]":
478
+ try:
479
+ yield json.loads(data_str)
480
+ except json.JSONDecodeError:
481
+ logger.warning(
482
+ "streaming_chunk_parse_failed",
483
+ chunk_data=data_str[:100] + "..."
484
+ if len(data_str) > 100
485
+ else data_str,
486
+ operation="adapt_stream",
487
+ )
488
+ continue
489
+ except Exception as e:
490
+ raise ValueError(f"Error processing streaming response: {e}") from e
491
+
492
+ def _convert_messages_to_anthropic(
493
+ self, openai_messages: list[Any]
494
+ ) -> tuple[list[dict[str, Any]], str | None]:
495
+ """Convert OpenAI messages to Anthropic format."""
496
+ messages = []
497
+ system_prompt = None
498
+
499
+ for msg in openai_messages:
500
+ if msg.role in ["system", "developer"]:
501
+ # System and developer messages become system prompt
502
+ if isinstance(msg.content, str):
503
+ if system_prompt:
504
+ system_prompt += "\n" + msg.content
505
+ else:
506
+ system_prompt = msg.content
507
+ elif isinstance(msg.content, list):
508
+ # Extract text from content blocks
509
+ text_parts: list[str] = []
510
+ for block in msg.content:
511
+ if (
512
+ hasattr(block, "type")
513
+ and block.type == "text"
514
+ and hasattr(block, "text")
515
+ and block.text
516
+ ):
517
+ text_parts.append(block.text)
518
+ text_content = " ".join(text_parts)
519
+ if system_prompt:
520
+ system_prompt += "\n" + text_content
521
+ else:
522
+ system_prompt = text_content
523
+
524
+ elif msg.role in ["user", "assistant"]:
525
+ # Convert user/assistant messages
526
+ anthropic_msg = {
527
+ "role": msg.role,
528
+ "content": self._convert_content_to_anthropic(msg.content),
529
+ }
530
+
531
+ # Add tool calls if present
532
+ if hasattr(msg, "tool_calls") and msg.tool_calls:
533
+ # Ensure content is a list
534
+ if isinstance(anthropic_msg["content"], str):
535
+ anthropic_msg["content"] = [
536
+ {"type": "text", "text": anthropic_msg["content"]}
537
+ ]
538
+ if not isinstance(anthropic_msg["content"], list):
539
+ anthropic_msg["content"] = []
540
+
541
+ # Content is now guaranteed to be a list
542
+ content_list = anthropic_msg["content"]
543
+ for tool_call in msg.tool_calls:
544
+ content_list.append(
545
+ self._convert_tool_call_to_anthropic(tool_call)
546
+ )
547
+
548
+ messages.append(anthropic_msg)
549
+
550
+ elif msg.role == "tool":
551
+ # Tool result messages
552
+ if messages and messages[-1]["role"] == "user":
553
+ # Add to previous user message
554
+ if isinstance(messages[-1]["content"], str):
555
+ messages[-1]["content"] = [
556
+ {"type": "text", "text": messages[-1]["content"]}
557
+ ]
558
+
559
+ tool_result = {
560
+ "type": "tool_result",
561
+ "tool_use_id": getattr(msg, "tool_call_id", "unknown")
562
+ or "unknown",
563
+ "content": msg.content or "",
564
+ }
565
+ if isinstance(messages[-1]["content"], list):
566
+ messages[-1]["content"].append(tool_result)
567
+ else:
568
+ # Create new user message with tool result
569
+ tool_result = {
570
+ "type": "tool_result",
571
+ "tool_use_id": getattr(msg, "tool_call_id", "unknown")
572
+ or "unknown",
573
+ "content": msg.content or "",
574
+ }
575
+ messages.append(
576
+ {
577
+ "role": "user",
578
+ "content": [tool_result],
579
+ }
580
+ )
581
+
582
+ return messages, system_prompt
583
+
584
+ def _convert_content_to_anthropic(
585
+ self, content: str | list[Any] | None
586
+ ) -> str | list[dict[str, Any]]:
587
+ """Convert OpenAI content to Anthropic format."""
588
+ if content is None:
589
+ return ""
590
+
591
+ if isinstance(content, str):
592
+ # Check if the string contains thinking blocks
593
+ thinking_pattern = r'<thinking signature="([^"]*)">(.*?)</thinking>'
594
+ matches = re.findall(thinking_pattern, content, re.DOTALL)
595
+
596
+ if matches:
597
+ # Convert string with thinking blocks to list format
598
+ anthropic_content: list[dict[str, Any]] = []
599
+ last_end = 0
600
+
601
+ for match in re.finditer(thinking_pattern, content, re.DOTALL):
602
+ # Add any text before the thinking block
603
+ if match.start() > last_end:
604
+ text_before = content[last_end : match.start()].strip()
605
+ if text_before:
606
+ anthropic_content.append(
607
+ {"type": "text", "text": text_before}
608
+ )
609
+
610
+ # Add the thinking block
611
+ signature = match.group(1)
612
+ thinking_text = match.group(2)
613
+ thinking_block: dict[str, Any] = {
614
+ "type": "thinking",
615
+ "thinking": thinking_text, # Changed from "text" to "thinking"
616
+ }
617
+ if signature and signature != "None":
618
+ thinking_block["signature"] = signature
619
+ anthropic_content.append(thinking_block)
620
+
621
+ last_end = match.end()
622
+
623
+ # Add any remaining text after the last thinking block
624
+ if last_end < len(content):
625
+ remaining_text = content[last_end:].strip()
626
+ if remaining_text:
627
+ anthropic_content.append(
628
+ {"type": "text", "text": remaining_text}
629
+ )
630
+
631
+ return anthropic_content
632
+ else:
633
+ return content
634
+
635
+ # content must be a list at this point
636
+ anthropic_content = []
637
+ for block in content:
638
+ # Handle both Pydantic objects and dicts
639
+ if hasattr(block, "type"):
640
+ # This is a Pydantic object
641
+ block_type = getattr(block, "type", None)
642
+ if (
643
+ block_type == "text"
644
+ and hasattr(block, "text")
645
+ and block.text is not None
646
+ ):
647
+ anthropic_content.append(
648
+ {
649
+ "type": "text",
650
+ "text": block.text,
651
+ }
652
+ )
653
+ elif (
654
+ block_type == "image_url"
655
+ and hasattr(block, "image_url")
656
+ and block.image_url is not None
657
+ ):
658
+ # Get URL from image_url
659
+ if hasattr(block.image_url, "url"):
660
+ url = block.image_url.url
661
+ elif isinstance(block.image_url, dict):
662
+ url = block.image_url.get("url", "")
663
+ else:
664
+ url = ""
665
+
666
+ if url.startswith("data:"):
667
+ # Base64 encoded image
668
+ try:
669
+ media_type, data = url.split(";base64,")
670
+ media_type = media_type.split(":")[1]
671
+ anthropic_content.append(
672
+ {
673
+ "type": "image",
674
+ "source": {
675
+ "type": "base64",
676
+ "media_type": media_type,
677
+ "data": data,
678
+ },
679
+ }
680
+ )
681
+ except ValueError:
682
+ logger.warning(
683
+ "invalid_base64_image_url",
684
+ url=url[:100] + "..." if len(url) > 100 else url,
685
+ operation="convert_content_to_anthropic",
686
+ )
687
+ else:
688
+ # URL-based image (not directly supported by Anthropic)
689
+ anthropic_content.append(
690
+ {
691
+ "type": "text",
692
+ "text": f"[Image: {url}]",
693
+ }
694
+ )
695
+ elif isinstance(block, dict):
696
+ if block.get("type") == "text":
697
+ anthropic_content.append(
698
+ {
699
+ "type": "text",
700
+ "text": block.get("text", ""),
701
+ }
702
+ )
703
+ elif block.get("type") == "image_url":
704
+ # Convert image URL to Anthropic format
705
+ image_url = block.get("image_url", {})
706
+ url = image_url.get("url", "")
707
+
708
+ if url.startswith("data:"):
709
+ # Base64 encoded image
710
+ try:
711
+ media_type, data = url.split(";base64,")
712
+ media_type = media_type.split(":")[1]
713
+ anthropic_content.append(
714
+ {
715
+ "type": "image",
716
+ "source": {
717
+ "type": "base64",
718
+ "media_type": media_type,
719
+ "data": data,
720
+ },
721
+ }
722
+ )
723
+ except ValueError:
724
+ logger.warning(
725
+ "invalid_base64_image_url",
726
+ url=url[:100] + "..." if len(url) > 100 else url,
727
+ operation="convert_content_to_anthropic",
728
+ )
729
+ else:
730
+ # URL-based image (not directly supported by Anthropic)
731
+ anthropic_content.append(
732
+ {
733
+ "type": "text",
734
+ "text": f"[Image: {url}]",
735
+ }
736
+ )
737
+
738
+ return anthropic_content if anthropic_content else ""
739
+
740
+ def _convert_tools_to_anthropic(
741
+ self, tools: list[dict[str, Any]] | list[Any]
742
+ ) -> list[dict[str, Any]]:
743
+ """Convert OpenAI tools to Anthropic format."""
744
+ anthropic_tools = []
745
+
746
+ for tool in tools:
747
+ # Handle both dict and Pydantic model cases
748
+ if isinstance(tool, dict):
749
+ if tool.get("type") == "function":
750
+ func = tool.get("function", {})
751
+ anthropic_tools.append(
752
+ {
753
+ "name": func.get("name", ""),
754
+ "description": func.get("description", ""),
755
+ "input_schema": func.get("parameters", {}),
756
+ }
757
+ )
758
+ elif hasattr(tool, "type") and tool.type == "function":
759
+ # Handle Pydantic OpenAITool model
760
+ anthropic_tools.append(
761
+ {
762
+ "name": tool.function.name,
763
+ "description": tool.function.description or "",
764
+ "input_schema": tool.function.parameters,
765
+ }
766
+ )
767
+
768
+ return anthropic_tools
769
+
770
+ def _convert_functions_to_anthropic(
771
+ self, functions: list[dict[str, Any]]
772
+ ) -> list[dict[str, Any]]:
773
+ """Convert OpenAI functions to Anthropic tools format."""
774
+ anthropic_tools = []
775
+
776
+ for func in functions:
777
+ anthropic_tools.append(
778
+ {
779
+ "name": func.get("name", ""),
780
+ "description": func.get("description", ""),
781
+ "input_schema": func.get("parameters", {}),
782
+ }
783
+ )
784
+
785
+ return anthropic_tools
786
+
787
+ def _convert_tool_choice_to_anthropic(
788
+ self, tool_choice: str | dict[str, Any]
789
+ ) -> dict[str, Any]:
790
+ """Convert OpenAI tool_choice to Anthropic format."""
791
+ if isinstance(tool_choice, str):
792
+ mapping = {
793
+ "none": {"type": "none"},
794
+ "auto": {"type": "auto"},
795
+ "required": {"type": "any"},
796
+ }
797
+ return mapping.get(tool_choice, {"type": "auto"})
798
+
799
+ elif isinstance(tool_choice, dict) and tool_choice.get("type") == "function":
800
+ func = tool_choice.get("function", {})
801
+ return {
802
+ "type": "tool",
803
+ "name": func.get("name", ""),
804
+ }
805
+
806
+ return {"type": "auto"}
807
+
808
+ def _convert_function_call_to_anthropic(
809
+ self, function_call: str | dict[str, Any]
810
+ ) -> dict[str, Any]:
811
+ """Convert OpenAI function_call to Anthropic tool_choice format."""
812
+ if isinstance(function_call, str):
813
+ if function_call == "none":
814
+ return {"type": "none"}
815
+ elif function_call == "auto":
816
+ return {"type": "auto"}
817
+
818
+ elif isinstance(function_call, dict):
819
+ return {
820
+ "type": "tool",
821
+ "name": function_call.get("name", ""),
822
+ }
823
+
824
+ return {"type": "auto"}
825
+
826
+ def _convert_tool_call_to_anthropic(
827
+ self, tool_call: dict[str, Any]
828
+ ) -> dict[str, Any]:
829
+ """Convert OpenAI tool call to Anthropic format."""
830
+ func = tool_call.get("function", {})
831
+
832
+ # Parse arguments string to dict for Anthropic format
833
+ arguments_str = func.get("arguments", "{}")
834
+ try:
835
+ if isinstance(arguments_str, str):
836
+ input_dict = json.loads(arguments_str)
837
+ else:
838
+ input_dict = arguments_str # Already a dict
839
+ except json.JSONDecodeError:
840
+ logger.warning(
841
+ "tool_arguments_parse_failed",
842
+ arguments=arguments_str[:200] + "..."
843
+ if len(str(arguments_str)) > 200
844
+ else str(arguments_str),
845
+ operation="convert_tool_call_to_anthropic",
846
+ )
847
+ input_dict = {}
848
+
849
+ return {
850
+ "type": "tool_use",
851
+ "id": tool_call.get("id", ""),
852
+ "name": func.get("name", ""),
853
+ "input": input_dict,
854
+ }
855
+
856
+ def _convert_stop_reason_to_openai(self, stop_reason: str | None) -> str | None:
857
+ """Convert Anthropic stop reason to OpenAI format."""
858
+ if stop_reason is None:
859
+ return None
860
+
861
+ mapping = {
862
+ "end_turn": "stop",
863
+ "max_tokens": "length",
864
+ "stop_sequence": "stop",
865
+ "tool_use": "tool_calls",
866
+ "pause_turn": "stop",
867
+ "refusal": "content_filter",
868
+ }
869
+
870
+ return mapping.get(stop_reason, "stop")
871
+
872
+ def adapt_error(self, error_body: dict[str, Any]) -> dict[str, Any]:
873
+ """Convert Anthropic error format to OpenAI error format.
874
+
875
+ Args:
876
+ error_body: Anthropic error response
877
+
878
+ Returns:
879
+ OpenAI-formatted error response
880
+ """
881
+ # Extract error details from Anthropic format
882
+ anthropic_error = error_body.get("error", {})
883
+ error_type = anthropic_error.get("type", "internal_server_error")
884
+ error_message = anthropic_error.get("message", "An error occurred")
885
+
886
+ # Map Anthropic error types to OpenAI error types
887
+ error_type_mapping = {
888
+ "invalid_request_error": "invalid_request_error",
889
+ "authentication_error": "invalid_request_error",
890
+ "permission_error": "invalid_request_error",
891
+ "not_found_error": "invalid_request_error",
892
+ "rate_limit_error": "rate_limit_error",
893
+ "internal_server_error": "internal_server_error",
894
+ "overloaded_error": "server_error",
895
+ }
896
+
897
+ openai_error_type = error_type_mapping.get(error_type, "invalid_request_error")
898
+
899
+ # Return OpenAI-formatted error
900
+ return {
901
+ "error": {
902
+ "message": error_message,
903
+ "type": openai_error_type,
904
+ "code": error_type, # Preserve original error type as code
905
+ }
906
+ }
907
+
908
+
909
+ __all__ = [
910
+ "OpenAIAdapter",
911
+ "OpenAIChatCompletionRequest",
912
+ "OpenAIChatCompletionResponse",
913
+ "map_openai_model_to_claude",
914
+ "OPENAI_TO_CLAUDE_MODEL_MAPPING",
915
+ ]