ccproxy-api 0.1.6__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccproxy/api/__init__.py +1 -15
- ccproxy/api/app.py +439 -212
- ccproxy/api/bootstrap.py +30 -0
- ccproxy/api/decorators.py +85 -0
- ccproxy/api/dependencies.py +145 -176
- ccproxy/api/format_validation.py +54 -0
- ccproxy/api/middleware/cors.py +6 -3
- ccproxy/api/middleware/errors.py +402 -530
- ccproxy/api/middleware/hooks.py +563 -0
- ccproxy/api/middleware/normalize_headers.py +59 -0
- ccproxy/api/middleware/request_id.py +35 -16
- ccproxy/api/middleware/streaming_hooks.py +292 -0
- ccproxy/api/routes/__init__.py +5 -14
- ccproxy/api/routes/health.py +39 -672
- ccproxy/api/routes/plugins.py +277 -0
- ccproxy/auth/__init__.py +2 -19
- ccproxy/auth/bearer.py +25 -15
- ccproxy/auth/dependencies.py +123 -157
- ccproxy/auth/exceptions.py +0 -12
- ccproxy/auth/manager.py +35 -49
- ccproxy/auth/managers/__init__.py +10 -0
- ccproxy/auth/managers/base.py +523 -0
- ccproxy/auth/managers/base_enhanced.py +63 -0
- ccproxy/auth/managers/token_snapshot.py +77 -0
- ccproxy/auth/models/base.py +65 -0
- ccproxy/auth/models/credentials.py +40 -0
- ccproxy/auth/oauth/__init__.py +4 -18
- ccproxy/auth/oauth/base.py +533 -0
- ccproxy/auth/oauth/cli_errors.py +37 -0
- ccproxy/auth/oauth/flows.py +430 -0
- ccproxy/auth/oauth/protocol.py +366 -0
- ccproxy/auth/oauth/registry.py +408 -0
- ccproxy/auth/oauth/router.py +396 -0
- ccproxy/auth/oauth/routes.py +186 -113
- ccproxy/auth/oauth/session.py +151 -0
- ccproxy/auth/oauth/templates.py +342 -0
- ccproxy/auth/storage/__init__.py +2 -5
- ccproxy/auth/storage/base.py +279 -5
- ccproxy/auth/storage/generic.py +134 -0
- ccproxy/cli/__init__.py +1 -2
- ccproxy/cli/_settings_help.py +351 -0
- ccproxy/cli/commands/auth.py +1519 -793
- ccproxy/cli/commands/config/commands.py +209 -276
- ccproxy/cli/commands/plugins.py +669 -0
- ccproxy/cli/commands/serve.py +75 -810
- ccproxy/cli/commands/status.py +254 -0
- ccproxy/cli/decorators.py +83 -0
- ccproxy/cli/helpers.py +22 -60
- ccproxy/cli/main.py +359 -10
- ccproxy/cli/options/claude_options.py +0 -25
- ccproxy/config/__init__.py +7 -11
- ccproxy/config/core.py +227 -0
- ccproxy/config/env_generator.py +232 -0
- ccproxy/config/runtime.py +67 -0
- ccproxy/config/security.py +36 -3
- ccproxy/config/settings.py +382 -441
- ccproxy/config/toml_generator.py +299 -0
- ccproxy/config/utils.py +452 -0
- ccproxy/core/__init__.py +7 -271
- ccproxy/{_version.py → core/_version.py} +16 -3
- ccproxy/core/async_task_manager.py +516 -0
- ccproxy/core/async_utils.py +47 -14
- ccproxy/core/auth/__init__.py +6 -0
- ccproxy/core/constants.py +16 -50
- ccproxy/core/errors.py +53 -0
- ccproxy/core/id_utils.py +20 -0
- ccproxy/core/interfaces.py +16 -123
- ccproxy/core/logging.py +473 -18
- ccproxy/core/plugins/__init__.py +77 -0
- ccproxy/core/plugins/cli_discovery.py +211 -0
- ccproxy/core/plugins/declaration.py +455 -0
- ccproxy/core/plugins/discovery.py +604 -0
- ccproxy/core/plugins/factories.py +967 -0
- ccproxy/core/plugins/hooks/__init__.py +30 -0
- ccproxy/core/plugins/hooks/base.py +58 -0
- ccproxy/core/plugins/hooks/events.py +46 -0
- ccproxy/core/plugins/hooks/implementations/__init__.py +16 -0
- ccproxy/core/plugins/hooks/implementations/formatters/__init__.py +11 -0
- ccproxy/core/plugins/hooks/implementations/formatters/json.py +552 -0
- ccproxy/core/plugins/hooks/implementations/formatters/raw.py +370 -0
- ccproxy/core/plugins/hooks/implementations/http_tracer.py +431 -0
- ccproxy/core/plugins/hooks/layers.py +44 -0
- ccproxy/core/plugins/hooks/manager.py +186 -0
- ccproxy/core/plugins/hooks/registry.py +139 -0
- ccproxy/core/plugins/hooks/thread_manager.py +203 -0
- ccproxy/core/plugins/hooks/types.py +22 -0
- ccproxy/core/plugins/interfaces.py +416 -0
- ccproxy/core/plugins/loader.py +166 -0
- ccproxy/core/plugins/middleware.py +233 -0
- ccproxy/core/plugins/models.py +59 -0
- ccproxy/core/plugins/protocol.py +180 -0
- ccproxy/core/plugins/runtime.py +519 -0
- ccproxy/{observability/context.py → core/request_context.py} +137 -94
- ccproxy/core/status_report.py +211 -0
- ccproxy/core/transformers.py +13 -8
- ccproxy/data/claude_headers_fallback.json +558 -0
- ccproxy/data/codex_headers_fallback.json +121 -0
- ccproxy/http/__init__.py +30 -0
- ccproxy/http/base.py +95 -0
- ccproxy/http/client.py +323 -0
- ccproxy/http/hooks.py +642 -0
- ccproxy/http/pool.py +279 -0
- ccproxy/llms/formatters/__init__.py +7 -0
- ccproxy/llms/formatters/anthropic_to_openai/__init__.py +55 -0
- ccproxy/llms/formatters/anthropic_to_openai/errors.py +65 -0
- ccproxy/llms/formatters/anthropic_to_openai/requests.py +356 -0
- ccproxy/llms/formatters/anthropic_to_openai/responses.py +153 -0
- ccproxy/llms/formatters/anthropic_to_openai/streams.py +1546 -0
- ccproxy/llms/formatters/base.py +140 -0
- ccproxy/llms/formatters/base_model.py +33 -0
- ccproxy/llms/formatters/common/__init__.py +51 -0
- ccproxy/llms/formatters/common/identifiers.py +48 -0
- ccproxy/llms/formatters/common/streams.py +254 -0
- ccproxy/llms/formatters/common/thinking.py +74 -0
- ccproxy/llms/formatters/common/usage.py +135 -0
- ccproxy/llms/formatters/constants.py +55 -0
- ccproxy/llms/formatters/context.py +116 -0
- ccproxy/llms/formatters/mapping.py +33 -0
- ccproxy/llms/formatters/openai_to_anthropic/__init__.py +55 -0
- ccproxy/llms/formatters/openai_to_anthropic/_helpers.py +141 -0
- ccproxy/llms/formatters/openai_to_anthropic/errors.py +53 -0
- ccproxy/llms/formatters/openai_to_anthropic/requests.py +674 -0
- ccproxy/llms/formatters/openai_to_anthropic/responses.py +285 -0
- ccproxy/llms/formatters/openai_to_anthropic/streams.py +530 -0
- ccproxy/llms/formatters/openai_to_openai/__init__.py +53 -0
- ccproxy/llms/formatters/openai_to_openai/_helpers.py +325 -0
- ccproxy/llms/formatters/openai_to_openai/errors.py +6 -0
- ccproxy/llms/formatters/openai_to_openai/requests.py +388 -0
- ccproxy/llms/formatters/openai_to_openai/responses.py +594 -0
- ccproxy/llms/formatters/openai_to_openai/streams.py +1832 -0
- ccproxy/llms/formatters/utils.py +306 -0
- ccproxy/llms/models/__init__.py +9 -0
- ccproxy/llms/models/anthropic.py +619 -0
- ccproxy/llms/models/openai.py +844 -0
- ccproxy/llms/streaming/__init__.py +26 -0
- ccproxy/llms/streaming/accumulators.py +1074 -0
- ccproxy/llms/streaming/formatters.py +251 -0
- ccproxy/{adapters/openai/streaming.py → llms/streaming/processors.py} +193 -240
- ccproxy/models/__init__.py +8 -159
- ccproxy/models/detection.py +92 -193
- ccproxy/models/provider.py +75 -0
- ccproxy/plugins/access_log/README.md +32 -0
- ccproxy/plugins/access_log/__init__.py +20 -0
- ccproxy/plugins/access_log/config.py +33 -0
- ccproxy/plugins/access_log/formatter.py +126 -0
- ccproxy/plugins/access_log/hook.py +763 -0
- ccproxy/plugins/access_log/logger.py +254 -0
- ccproxy/plugins/access_log/plugin.py +137 -0
- ccproxy/plugins/access_log/writer.py +109 -0
- ccproxy/plugins/analytics/README.md +24 -0
- ccproxy/plugins/analytics/__init__.py +1 -0
- ccproxy/plugins/analytics/config.py +5 -0
- ccproxy/plugins/analytics/ingest.py +85 -0
- ccproxy/plugins/analytics/models.py +97 -0
- ccproxy/plugins/analytics/plugin.py +121 -0
- ccproxy/plugins/analytics/routes.py +163 -0
- ccproxy/plugins/analytics/service.py +284 -0
- ccproxy/plugins/claude_api/README.md +29 -0
- ccproxy/plugins/claude_api/__init__.py +10 -0
- ccproxy/plugins/claude_api/adapter.py +829 -0
- ccproxy/plugins/claude_api/config.py +52 -0
- ccproxy/plugins/claude_api/detection_service.py +461 -0
- ccproxy/plugins/claude_api/health.py +175 -0
- ccproxy/plugins/claude_api/hooks.py +284 -0
- ccproxy/plugins/claude_api/models.py +256 -0
- ccproxy/plugins/claude_api/plugin.py +298 -0
- ccproxy/plugins/claude_api/routes.py +118 -0
- ccproxy/plugins/claude_api/streaming_metrics.py +68 -0
- ccproxy/plugins/claude_api/tasks.py +84 -0
- ccproxy/plugins/claude_sdk/README.md +35 -0
- ccproxy/plugins/claude_sdk/__init__.py +80 -0
- ccproxy/plugins/claude_sdk/adapter.py +749 -0
- ccproxy/plugins/claude_sdk/auth.py +57 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/client.py +63 -39
- ccproxy/plugins/claude_sdk/config.py +210 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/converter.py +6 -6
- ccproxy/plugins/claude_sdk/detection_service.py +163 -0
- ccproxy/{services/claude_sdk_service.py → plugins/claude_sdk/handler.py} +123 -304
- ccproxy/plugins/claude_sdk/health.py +113 -0
- ccproxy/plugins/claude_sdk/hooks.py +115 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/manager.py +42 -32
- ccproxy/{claude_sdk → plugins/claude_sdk}/message_queue.py +8 -8
- ccproxy/{models/claude_sdk.py → plugins/claude_sdk/models.py} +64 -16
- ccproxy/plugins/claude_sdk/options.py +154 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/parser.py +23 -5
- ccproxy/plugins/claude_sdk/plugin.py +269 -0
- ccproxy/plugins/claude_sdk/routes.py +104 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/session_client.py +124 -12
- ccproxy/plugins/claude_sdk/session_pool.py +700 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/stream_handle.py +48 -43
- ccproxy/{claude_sdk → plugins/claude_sdk}/stream_worker.py +22 -18
- ccproxy/{claude_sdk → plugins/claude_sdk}/streaming.py +50 -16
- ccproxy/plugins/claude_sdk/tasks.py +97 -0
- ccproxy/plugins/claude_shared/README.md +18 -0
- ccproxy/plugins/claude_shared/__init__.py +12 -0
- ccproxy/plugins/claude_shared/model_defaults.py +171 -0
- ccproxy/plugins/codex/README.md +35 -0
- ccproxy/plugins/codex/__init__.py +6 -0
- ccproxy/plugins/codex/adapter.py +635 -0
- ccproxy/{config/codex.py → plugins/codex/config.py} +78 -12
- ccproxy/plugins/codex/detection_service.py +544 -0
- ccproxy/plugins/codex/health.py +162 -0
- ccproxy/plugins/codex/hooks.py +263 -0
- ccproxy/plugins/codex/model_defaults.py +39 -0
- ccproxy/plugins/codex/models.py +263 -0
- ccproxy/plugins/codex/plugin.py +275 -0
- ccproxy/plugins/codex/routes.py +129 -0
- ccproxy/plugins/codex/streaming_metrics.py +324 -0
- ccproxy/plugins/codex/tasks.py +106 -0
- ccproxy/plugins/codex/utils/__init__.py +1 -0
- ccproxy/plugins/codex/utils/sse_parser.py +106 -0
- ccproxy/plugins/command_replay/README.md +34 -0
- ccproxy/plugins/command_replay/__init__.py +17 -0
- ccproxy/plugins/command_replay/config.py +133 -0
- ccproxy/plugins/command_replay/formatter.py +432 -0
- ccproxy/plugins/command_replay/hook.py +294 -0
- ccproxy/plugins/command_replay/plugin.py +161 -0
- ccproxy/plugins/copilot/README.md +39 -0
- ccproxy/plugins/copilot/__init__.py +11 -0
- ccproxy/plugins/copilot/adapter.py +465 -0
- ccproxy/plugins/copilot/config.py +155 -0
- ccproxy/plugins/copilot/data/copilot_fallback.json +41 -0
- ccproxy/plugins/copilot/detection_service.py +255 -0
- ccproxy/plugins/copilot/manager.py +275 -0
- ccproxy/plugins/copilot/model_defaults.py +284 -0
- ccproxy/plugins/copilot/models.py +148 -0
- ccproxy/plugins/copilot/oauth/__init__.py +16 -0
- ccproxy/plugins/copilot/oauth/client.py +494 -0
- ccproxy/plugins/copilot/oauth/models.py +385 -0
- ccproxy/plugins/copilot/oauth/provider.py +602 -0
- ccproxy/plugins/copilot/oauth/storage.py +170 -0
- ccproxy/plugins/copilot/plugin.py +360 -0
- ccproxy/plugins/copilot/routes.py +294 -0
- ccproxy/plugins/credential_balancer/README.md +124 -0
- ccproxy/plugins/credential_balancer/__init__.py +6 -0
- ccproxy/plugins/credential_balancer/config.py +270 -0
- ccproxy/plugins/credential_balancer/factory.py +415 -0
- ccproxy/plugins/credential_balancer/hook.py +51 -0
- ccproxy/plugins/credential_balancer/manager.py +587 -0
- ccproxy/plugins/credential_balancer/plugin.py +146 -0
- ccproxy/plugins/dashboard/README.md +25 -0
- ccproxy/plugins/dashboard/__init__.py +1 -0
- ccproxy/plugins/dashboard/config.py +8 -0
- ccproxy/plugins/dashboard/plugin.py +71 -0
- ccproxy/plugins/dashboard/routes.py +67 -0
- ccproxy/plugins/docker/README.md +32 -0
- ccproxy/{docker → plugins/docker}/__init__.py +3 -0
- ccproxy/{docker → plugins/docker}/adapter.py +108 -10
- ccproxy/plugins/docker/config.py +82 -0
- ccproxy/{docker → plugins/docker}/docker_path.py +4 -3
- ccproxy/{docker → plugins/docker}/middleware.py +2 -2
- ccproxy/plugins/docker/plugin.py +198 -0
- ccproxy/{docker → plugins/docker}/stream_process.py +3 -3
- ccproxy/plugins/duckdb_storage/README.md +26 -0
- ccproxy/plugins/duckdb_storage/__init__.py +1 -0
- ccproxy/plugins/duckdb_storage/config.py +22 -0
- ccproxy/plugins/duckdb_storage/plugin.py +128 -0
- ccproxy/plugins/duckdb_storage/routes.py +51 -0
- ccproxy/plugins/duckdb_storage/storage.py +633 -0
- ccproxy/plugins/max_tokens/README.md +38 -0
- ccproxy/plugins/max_tokens/__init__.py +12 -0
- ccproxy/plugins/max_tokens/adapter.py +235 -0
- ccproxy/plugins/max_tokens/config.py +86 -0
- ccproxy/plugins/max_tokens/models.py +53 -0
- ccproxy/plugins/max_tokens/plugin.py +200 -0
- ccproxy/plugins/max_tokens/service.py +271 -0
- ccproxy/plugins/max_tokens/token_limits.json +54 -0
- ccproxy/plugins/metrics/README.md +35 -0
- ccproxy/plugins/metrics/__init__.py +10 -0
- ccproxy/{observability/metrics.py → plugins/metrics/collector.py} +20 -153
- ccproxy/plugins/metrics/config.py +85 -0
- ccproxy/plugins/metrics/grafana/dashboards/ccproxy-dashboard.json +1720 -0
- ccproxy/plugins/metrics/hook.py +403 -0
- ccproxy/plugins/metrics/plugin.py +268 -0
- ccproxy/{observability → plugins/metrics}/pushgateway.py +57 -59
- ccproxy/plugins/metrics/routes.py +107 -0
- ccproxy/plugins/metrics/tasks.py +117 -0
- ccproxy/plugins/oauth_claude/README.md +35 -0
- ccproxy/plugins/oauth_claude/__init__.py +14 -0
- ccproxy/plugins/oauth_claude/client.py +270 -0
- ccproxy/plugins/oauth_claude/config.py +84 -0
- ccproxy/plugins/oauth_claude/manager.py +482 -0
- ccproxy/plugins/oauth_claude/models.py +266 -0
- ccproxy/plugins/oauth_claude/plugin.py +149 -0
- ccproxy/plugins/oauth_claude/provider.py +571 -0
- ccproxy/plugins/oauth_claude/storage.py +212 -0
- ccproxy/plugins/oauth_codex/README.md +38 -0
- ccproxy/plugins/oauth_codex/__init__.py +14 -0
- ccproxy/plugins/oauth_codex/client.py +224 -0
- ccproxy/plugins/oauth_codex/config.py +95 -0
- ccproxy/plugins/oauth_codex/manager.py +256 -0
- ccproxy/plugins/oauth_codex/models.py +239 -0
- ccproxy/plugins/oauth_codex/plugin.py +146 -0
- ccproxy/plugins/oauth_codex/provider.py +574 -0
- ccproxy/plugins/oauth_codex/storage.py +92 -0
- ccproxy/plugins/permissions/README.md +28 -0
- ccproxy/plugins/permissions/__init__.py +22 -0
- ccproxy/plugins/permissions/config.py +28 -0
- ccproxy/{cli/commands/permission_handler.py → plugins/permissions/handlers/cli.py} +49 -25
- ccproxy/plugins/permissions/handlers/protocol.py +33 -0
- ccproxy/plugins/permissions/handlers/terminal.py +675 -0
- ccproxy/{api/routes → plugins/permissions}/mcp.py +34 -7
- ccproxy/{models/permissions.py → plugins/permissions/models.py} +65 -1
- ccproxy/plugins/permissions/plugin.py +153 -0
- ccproxy/{api/routes/permissions.py → plugins/permissions/routes.py} +20 -16
- ccproxy/{api/services/permission_service.py → plugins/permissions/service.py} +65 -11
- ccproxy/{api → plugins/permissions}/ui/permission_handler_protocol.py +1 -1
- ccproxy/{api → plugins/permissions}/ui/terminal_permission_handler.py +66 -10
- ccproxy/plugins/pricing/README.md +34 -0
- ccproxy/plugins/pricing/__init__.py +6 -0
- ccproxy/{pricing → plugins/pricing}/cache.py +7 -6
- ccproxy/{config/pricing.py → plugins/pricing/config.py} +32 -6
- ccproxy/plugins/pricing/exceptions.py +35 -0
- ccproxy/plugins/pricing/loader.py +440 -0
- ccproxy/{pricing → plugins/pricing}/models.py +13 -23
- ccproxy/plugins/pricing/plugin.py +169 -0
- ccproxy/plugins/pricing/service.py +191 -0
- ccproxy/plugins/pricing/tasks.py +300 -0
- ccproxy/{pricing → plugins/pricing}/updater.py +86 -72
- ccproxy/plugins/pricing/utils.py +99 -0
- ccproxy/plugins/request_tracer/README.md +40 -0
- ccproxy/plugins/request_tracer/__init__.py +7 -0
- ccproxy/plugins/request_tracer/config.py +120 -0
- ccproxy/plugins/request_tracer/hook.py +415 -0
- ccproxy/plugins/request_tracer/plugin.py +255 -0
- ccproxy/scheduler/__init__.py +2 -14
- ccproxy/scheduler/core.py +26 -41
- ccproxy/scheduler/manager.py +63 -107
- ccproxy/scheduler/registry.py +6 -32
- ccproxy/scheduler/tasks.py +346 -314
- ccproxy/services/__init__.py +0 -1
- ccproxy/services/adapters/__init__.py +11 -0
- ccproxy/services/adapters/base.py +123 -0
- ccproxy/services/adapters/chain_composer.py +88 -0
- ccproxy/services/adapters/chain_validation.py +44 -0
- ccproxy/services/adapters/chat_accumulator.py +200 -0
- ccproxy/services/adapters/delta_utils.py +142 -0
- ccproxy/services/adapters/format_adapter.py +136 -0
- ccproxy/services/adapters/format_context.py +11 -0
- ccproxy/services/adapters/format_registry.py +158 -0
- ccproxy/services/adapters/http_adapter.py +1045 -0
- ccproxy/services/adapters/mock_adapter.py +118 -0
- ccproxy/services/adapters/protocols.py +35 -0
- ccproxy/services/adapters/simple_converters.py +571 -0
- ccproxy/services/auth_registry.py +180 -0
- ccproxy/services/cache/__init__.py +6 -0
- ccproxy/services/cache/response_cache.py +261 -0
- ccproxy/services/cli_detection.py +437 -0
- ccproxy/services/config/__init__.py +6 -0
- ccproxy/services/config/proxy_configuration.py +111 -0
- ccproxy/services/container.py +256 -0
- ccproxy/services/factories.py +380 -0
- ccproxy/services/handler_config.py +76 -0
- ccproxy/services/interfaces.py +298 -0
- ccproxy/services/mocking/__init__.py +6 -0
- ccproxy/services/mocking/mock_handler.py +291 -0
- ccproxy/services/tracing/__init__.py +7 -0
- ccproxy/services/tracing/interfaces.py +61 -0
- ccproxy/services/tracing/null_tracer.py +57 -0
- ccproxy/streaming/__init__.py +23 -0
- ccproxy/streaming/buffer.py +1056 -0
- ccproxy/streaming/deferred.py +897 -0
- ccproxy/streaming/handler.py +117 -0
- ccproxy/streaming/interfaces.py +77 -0
- ccproxy/streaming/simple_adapter.py +39 -0
- ccproxy/streaming/sse.py +109 -0
- ccproxy/streaming/sse_parser.py +127 -0
- ccproxy/templates/__init__.py +6 -0
- ccproxy/templates/plugin_scaffold.py +695 -0
- ccproxy/testing/endpoints/__init__.py +33 -0
- ccproxy/testing/endpoints/cli.py +215 -0
- ccproxy/testing/endpoints/config.py +874 -0
- ccproxy/testing/endpoints/console.py +57 -0
- ccproxy/testing/endpoints/models.py +100 -0
- ccproxy/testing/endpoints/runner.py +1903 -0
- ccproxy/testing/endpoints/tools.py +308 -0
- ccproxy/testing/mock_responses.py +70 -1
- ccproxy/testing/response_handlers.py +20 -0
- ccproxy/utils/__init__.py +0 -6
- ccproxy/utils/binary_resolver.py +476 -0
- ccproxy/utils/caching.py +327 -0
- ccproxy/utils/cli_logging.py +101 -0
- ccproxy/utils/command_line.py +251 -0
- ccproxy/utils/headers.py +228 -0
- ccproxy/utils/model_mapper.py +120 -0
- ccproxy/utils/startup_helpers.py +95 -342
- ccproxy/utils/version_checker.py +279 -6
- ccproxy_api-0.2.0.dist-info/METADATA +212 -0
- ccproxy_api-0.2.0.dist-info/RECORD +417 -0
- {ccproxy_api-0.1.6.dist-info → ccproxy_api-0.2.0.dist-info}/WHEEL +1 -1
- ccproxy_api-0.2.0.dist-info/entry_points.txt +24 -0
- ccproxy/__init__.py +0 -4
- ccproxy/adapters/__init__.py +0 -11
- ccproxy/adapters/base.py +0 -80
- ccproxy/adapters/codex/__init__.py +0 -11
- ccproxy/adapters/openai/__init__.py +0 -42
- ccproxy/adapters/openai/adapter.py +0 -953
- ccproxy/adapters/openai/models.py +0 -412
- ccproxy/adapters/openai/response_adapter.py +0 -355
- ccproxy/adapters/openai/response_models.py +0 -178
- ccproxy/api/middleware/headers.py +0 -49
- ccproxy/api/middleware/logging.py +0 -180
- ccproxy/api/middleware/request_content_logging.py +0 -297
- ccproxy/api/middleware/server_header.py +0 -58
- ccproxy/api/responses.py +0 -89
- ccproxy/api/routes/claude.py +0 -371
- ccproxy/api/routes/codex.py +0 -1231
- ccproxy/api/routes/metrics.py +0 -1029
- ccproxy/api/routes/proxy.py +0 -211
- ccproxy/api/services/__init__.py +0 -6
- ccproxy/auth/conditional.py +0 -84
- ccproxy/auth/credentials_adapter.py +0 -93
- ccproxy/auth/models.py +0 -118
- ccproxy/auth/oauth/models.py +0 -48
- ccproxy/auth/openai/__init__.py +0 -13
- ccproxy/auth/openai/credentials.py +0 -166
- ccproxy/auth/openai/oauth_client.py +0 -334
- ccproxy/auth/openai/storage.py +0 -184
- ccproxy/auth/storage/json_file.py +0 -158
- ccproxy/auth/storage/keyring.py +0 -189
- ccproxy/claude_sdk/__init__.py +0 -18
- ccproxy/claude_sdk/options.py +0 -194
- ccproxy/claude_sdk/session_pool.py +0 -550
- ccproxy/cli/docker/__init__.py +0 -34
- ccproxy/cli/docker/adapter_factory.py +0 -157
- ccproxy/cli/docker/params.py +0 -274
- ccproxy/config/auth.py +0 -153
- ccproxy/config/claude.py +0 -348
- ccproxy/config/cors.py +0 -79
- ccproxy/config/discovery.py +0 -95
- ccproxy/config/docker_settings.py +0 -264
- ccproxy/config/observability.py +0 -158
- ccproxy/config/reverse_proxy.py +0 -31
- ccproxy/config/scheduler.py +0 -108
- ccproxy/config/server.py +0 -86
- ccproxy/config/validators.py +0 -231
- ccproxy/core/codex_transformers.py +0 -389
- ccproxy/core/http.py +0 -328
- ccproxy/core/http_transformers.py +0 -812
- ccproxy/core/proxy.py +0 -143
- ccproxy/core/validators.py +0 -288
- ccproxy/models/errors.py +0 -42
- ccproxy/models/messages.py +0 -269
- ccproxy/models/requests.py +0 -107
- ccproxy/models/responses.py +0 -270
- ccproxy/models/types.py +0 -102
- ccproxy/observability/__init__.py +0 -51
- ccproxy/observability/access_logger.py +0 -457
- ccproxy/observability/sse_events.py +0 -303
- ccproxy/observability/stats_printer.py +0 -753
- ccproxy/observability/storage/__init__.py +0 -1
- ccproxy/observability/storage/duckdb_simple.py +0 -677
- ccproxy/observability/storage/models.py +0 -70
- ccproxy/observability/streaming_response.py +0 -107
- ccproxy/pricing/__init__.py +0 -19
- ccproxy/pricing/loader.py +0 -251
- ccproxy/services/claude_detection_service.py +0 -269
- ccproxy/services/codex_detection_service.py +0 -263
- ccproxy/services/credentials/__init__.py +0 -55
- ccproxy/services/credentials/config.py +0 -105
- ccproxy/services/credentials/manager.py +0 -561
- ccproxy/services/credentials/oauth_client.py +0 -481
- ccproxy/services/proxy_service.py +0 -1827
- ccproxy/static/.keep +0 -0
- ccproxy/utils/cost_calculator.py +0 -210
- ccproxy/utils/disconnection_monitor.py +0 -83
- ccproxy/utils/model_mapping.py +0 -199
- ccproxy/utils/models_provider.py +0 -150
- ccproxy/utils/simple_request_logger.py +0 -284
- ccproxy/utils/streaming_metrics.py +0 -199
- ccproxy_api-0.1.6.dist-info/METADATA +0 -615
- ccproxy_api-0.1.6.dist-info/RECORD +0 -189
- ccproxy_api-0.1.6.dist-info/entry_points.txt +0 -4
- /ccproxy/{api/middleware/auth.py → auth/models/__init__.py} +0 -0
- /ccproxy/{claude_sdk → plugins/claude_sdk}/exceptions.py +0 -0
- /ccproxy/{docker → plugins/docker}/models.py +0 -0
- /ccproxy/{docker → plugins/docker}/protocol.py +0 -0
- /ccproxy/{docker → plugins/docker}/validators.py +0 -0
- /ccproxy/{auth/oauth/storage.py → plugins/permissions/handlers/__init__.py} +0 -0
- /ccproxy/{api → plugins/permissions}/ui/__init__.py +0 -0
- {ccproxy_api-0.1.6.dist-info → ccproxy_api-0.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,953 +0,0 @@
|
|
|
1
|
-
"""OpenAI API adapter implementation.
|
|
2
|
-
|
|
3
|
-
This module provides the OpenAI adapter that implements the APIAdapter interface
|
|
4
|
-
for converting between OpenAI and Anthropic API formats.
|
|
5
|
-
"""
|
|
6
|
-
|
|
7
|
-
from __future__ import annotations
|
|
8
|
-
|
|
9
|
-
import json
|
|
10
|
-
import re
|
|
11
|
-
import time
|
|
12
|
-
from collections.abc import AsyncIterator
|
|
13
|
-
from typing import Any, Literal, cast
|
|
14
|
-
|
|
15
|
-
import structlog
|
|
16
|
-
from pydantic import ValidationError
|
|
17
|
-
|
|
18
|
-
from ccproxy.core.interfaces import APIAdapter
|
|
19
|
-
from ccproxy.utils.model_mapping import map_model_to_claude
|
|
20
|
-
|
|
21
|
-
from .models import (
|
|
22
|
-
OpenAIChatCompletionRequest,
|
|
23
|
-
OpenAIChatCompletionResponse,
|
|
24
|
-
OpenAIChoice,
|
|
25
|
-
OpenAIResponseMessage,
|
|
26
|
-
OpenAIUsage,
|
|
27
|
-
format_openai_tool_call,
|
|
28
|
-
generate_openai_response_id,
|
|
29
|
-
generate_openai_system_fingerprint,
|
|
30
|
-
)
|
|
31
|
-
from .streaming import OpenAIStreamProcessor
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
logger = structlog.get_logger(__name__)
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
class OpenAIAdapter(APIAdapter):
|
|
38
|
-
"""OpenAI API adapter for converting between OpenAI and Anthropic formats."""
|
|
39
|
-
|
|
40
|
-
def __init__(self, include_sdk_content_as_xml: bool = False) -> None:
|
|
41
|
-
"""Initialize the OpenAI adapter."""
|
|
42
|
-
self.include_sdk_content_as_xml = include_sdk_content_as_xml
|
|
43
|
-
|
|
44
|
-
def adapt_request(self, request: dict[str, Any]) -> dict[str, Any]:
|
|
45
|
-
"""Convert OpenAI request format to Anthropic format.
|
|
46
|
-
|
|
47
|
-
Args:
|
|
48
|
-
request: OpenAI format request
|
|
49
|
-
|
|
50
|
-
Returns:
|
|
51
|
-
Anthropic format request
|
|
52
|
-
|
|
53
|
-
Raises:
|
|
54
|
-
ValueError: If the request format is invalid or unsupported
|
|
55
|
-
"""
|
|
56
|
-
try:
|
|
57
|
-
# Parse OpenAI request
|
|
58
|
-
openai_req = OpenAIChatCompletionRequest(**request)
|
|
59
|
-
except ValidationError as e:
|
|
60
|
-
raise ValueError(f"Invalid OpenAI request format: {e}") from e
|
|
61
|
-
|
|
62
|
-
# Map OpenAI model to Claude model
|
|
63
|
-
model = map_model_to_claude(openai_req.model)
|
|
64
|
-
|
|
65
|
-
# Convert messages
|
|
66
|
-
messages, system_prompt = self._convert_messages_to_anthropic(
|
|
67
|
-
openai_req.messages
|
|
68
|
-
)
|
|
69
|
-
|
|
70
|
-
# Build base Anthropic request
|
|
71
|
-
anthropic_request = {
|
|
72
|
-
"model": model,
|
|
73
|
-
"messages": messages,
|
|
74
|
-
"max_tokens": openai_req.max_tokens or 4096,
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
# Add system prompt if present
|
|
78
|
-
if system_prompt:
|
|
79
|
-
anthropic_request["system"] = system_prompt
|
|
80
|
-
|
|
81
|
-
# Add optional parameters
|
|
82
|
-
self._handle_optional_parameters(openai_req, anthropic_request)
|
|
83
|
-
|
|
84
|
-
# Handle metadata
|
|
85
|
-
self._handle_metadata(openai_req, anthropic_request)
|
|
86
|
-
|
|
87
|
-
# Handle response format
|
|
88
|
-
anthropic_request = self._handle_response_format(openai_req, anthropic_request)
|
|
89
|
-
|
|
90
|
-
# Handle thinking configuration
|
|
91
|
-
anthropic_request = self._handle_thinking_parameters(
|
|
92
|
-
openai_req, anthropic_request
|
|
93
|
-
)
|
|
94
|
-
|
|
95
|
-
# Log unsupported parameters
|
|
96
|
-
self._log_unsupported_parameters(openai_req)
|
|
97
|
-
|
|
98
|
-
# Handle tools and tool choice
|
|
99
|
-
self._handle_tools(openai_req, anthropic_request)
|
|
100
|
-
|
|
101
|
-
logger.debug(
|
|
102
|
-
"format_conversion_completed",
|
|
103
|
-
from_format="openai",
|
|
104
|
-
to_format="anthropic",
|
|
105
|
-
original_model=openai_req.model,
|
|
106
|
-
anthropic_model=anthropic_request.get("model"),
|
|
107
|
-
has_tools=bool(anthropic_request.get("tools")),
|
|
108
|
-
has_system=bool(anthropic_request.get("system")),
|
|
109
|
-
message_count=len(cast(list[Any], anthropic_request["messages"])),
|
|
110
|
-
operation="adapt_request",
|
|
111
|
-
)
|
|
112
|
-
return anthropic_request
|
|
113
|
-
|
|
114
|
-
def _handle_optional_parameters(
|
|
115
|
-
self,
|
|
116
|
-
openai_req: OpenAIChatCompletionRequest,
|
|
117
|
-
anthropic_request: dict[str, Any],
|
|
118
|
-
) -> None:
|
|
119
|
-
"""Handle optional parameters like temperature, top_p, stream, and stop."""
|
|
120
|
-
if openai_req.temperature is not None:
|
|
121
|
-
anthropic_request["temperature"] = openai_req.temperature
|
|
122
|
-
|
|
123
|
-
if openai_req.top_p is not None:
|
|
124
|
-
anthropic_request["top_p"] = openai_req.top_p
|
|
125
|
-
|
|
126
|
-
if openai_req.stream is not None:
|
|
127
|
-
anthropic_request["stream"] = openai_req.stream
|
|
128
|
-
|
|
129
|
-
if openai_req.stop is not None:
|
|
130
|
-
if isinstance(openai_req.stop, str):
|
|
131
|
-
anthropic_request["stop_sequences"] = [openai_req.stop]
|
|
132
|
-
else:
|
|
133
|
-
anthropic_request["stop_sequences"] = openai_req.stop
|
|
134
|
-
|
|
135
|
-
def _handle_metadata(
|
|
136
|
-
self,
|
|
137
|
-
openai_req: OpenAIChatCompletionRequest,
|
|
138
|
-
anthropic_request: dict[str, Any],
|
|
139
|
-
) -> None:
|
|
140
|
-
"""Handle metadata and user field combination."""
|
|
141
|
-
metadata = {}
|
|
142
|
-
if openai_req.user:
|
|
143
|
-
metadata["user_id"] = openai_req.user
|
|
144
|
-
if openai_req.metadata:
|
|
145
|
-
metadata.update(openai_req.metadata)
|
|
146
|
-
if metadata:
|
|
147
|
-
anthropic_request["metadata"] = metadata
|
|
148
|
-
|
|
149
|
-
def _handle_response_format(
|
|
150
|
-
self,
|
|
151
|
-
openai_req: OpenAIChatCompletionRequest,
|
|
152
|
-
anthropic_request: dict[str, Any],
|
|
153
|
-
) -> dict[str, Any]:
|
|
154
|
-
"""Handle response format by modifying system prompt for JSON mode."""
|
|
155
|
-
if openai_req.response_format:
|
|
156
|
-
format_type = (
|
|
157
|
-
openai_req.response_format.type if openai_req.response_format else None
|
|
158
|
-
)
|
|
159
|
-
system_prompt = anthropic_request.get("system")
|
|
160
|
-
|
|
161
|
-
if format_type == "json_object" and system_prompt is not None:
|
|
162
|
-
system_prompt += "\nYou must respond with valid JSON only."
|
|
163
|
-
anthropic_request["system"] = system_prompt
|
|
164
|
-
elif format_type == "json_schema" and system_prompt is not None:
|
|
165
|
-
# For JSON schema, we can add more specific instructions
|
|
166
|
-
if openai_req.response_format and hasattr(
|
|
167
|
-
openai_req.response_format, "json_schema"
|
|
168
|
-
):
|
|
169
|
-
system_prompt += f"\nYou must respond with valid JSON that conforms to this schema: {openai_req.response_format.json_schema}"
|
|
170
|
-
anthropic_request["system"] = system_prompt
|
|
171
|
-
|
|
172
|
-
return anthropic_request
|
|
173
|
-
|
|
174
|
-
def _handle_thinking_parameters(
|
|
175
|
-
self,
|
|
176
|
-
openai_req: OpenAIChatCompletionRequest,
|
|
177
|
-
anthropic_request: dict[str, Any],
|
|
178
|
-
) -> dict[str, Any]:
|
|
179
|
-
"""Handle reasoning_effort and thinking configuration for o1/o3 models."""
|
|
180
|
-
# Automatically enable thinking for o1 models even without explicit reasoning_effort
|
|
181
|
-
if (
|
|
182
|
-
openai_req.reasoning_effort
|
|
183
|
-
or openai_req.model.startswith("o1")
|
|
184
|
-
or openai_req.model.startswith("o3")
|
|
185
|
-
):
|
|
186
|
-
# Map reasoning effort to thinking tokens
|
|
187
|
-
thinking_tokens_map = {
|
|
188
|
-
"low": 1000,
|
|
189
|
-
"medium": 5000,
|
|
190
|
-
"high": 10000,
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
# Default thinking tokens based on model if reasoning_effort not specified
|
|
194
|
-
default_thinking_tokens = 5000 # medium by default
|
|
195
|
-
if openai_req.model.startswith("o3"):
|
|
196
|
-
default_thinking_tokens = 10000 # high for o3 models
|
|
197
|
-
elif openai_req.model == "o1-mini":
|
|
198
|
-
default_thinking_tokens = 3000 # lower for mini model
|
|
199
|
-
|
|
200
|
-
thinking_tokens = (
|
|
201
|
-
thinking_tokens_map.get(
|
|
202
|
-
openai_req.reasoning_effort, default_thinking_tokens
|
|
203
|
-
)
|
|
204
|
-
if openai_req.reasoning_effort
|
|
205
|
-
else default_thinking_tokens
|
|
206
|
-
)
|
|
207
|
-
|
|
208
|
-
anthropic_request["thinking"] = {
|
|
209
|
-
"type": "enabled",
|
|
210
|
-
"budget_tokens": thinking_tokens,
|
|
211
|
-
}
|
|
212
|
-
|
|
213
|
-
# Ensure max_tokens is greater than budget_tokens
|
|
214
|
-
current_max_tokens = cast(int, anthropic_request.get("max_tokens", 4096))
|
|
215
|
-
if current_max_tokens <= thinking_tokens:
|
|
216
|
-
# Set max_tokens to be 2x thinking tokens + some buffer for response
|
|
217
|
-
anthropic_request["max_tokens"] = thinking_tokens + max(
|
|
218
|
-
thinking_tokens, 4096
|
|
219
|
-
)
|
|
220
|
-
logger.debug(
|
|
221
|
-
"max_tokens_adjusted_for_thinking",
|
|
222
|
-
original_max_tokens=current_max_tokens,
|
|
223
|
-
thinking_tokens=thinking_tokens,
|
|
224
|
-
new_max_tokens=anthropic_request["max_tokens"],
|
|
225
|
-
operation="adapt_request",
|
|
226
|
-
)
|
|
227
|
-
|
|
228
|
-
# When thinking is enabled, temperature must be 1.0
|
|
229
|
-
if (
|
|
230
|
-
anthropic_request.get("temperature") is not None
|
|
231
|
-
and anthropic_request["temperature"] != 1.0
|
|
232
|
-
):
|
|
233
|
-
logger.debug(
|
|
234
|
-
"temperature_adjusted_for_thinking",
|
|
235
|
-
original_temperature=anthropic_request["temperature"],
|
|
236
|
-
new_temperature=1.0,
|
|
237
|
-
operation="adapt_request",
|
|
238
|
-
)
|
|
239
|
-
anthropic_request["temperature"] = 1.0
|
|
240
|
-
elif "temperature" not in anthropic_request:
|
|
241
|
-
# Set default temperature to 1.0 for thinking mode
|
|
242
|
-
anthropic_request["temperature"] = 1.0
|
|
243
|
-
|
|
244
|
-
logger.debug(
|
|
245
|
-
"thinking_enabled",
|
|
246
|
-
reasoning_effort=openai_req.reasoning_effort,
|
|
247
|
-
model=openai_req.model,
|
|
248
|
-
thinking_tokens=thinking_tokens,
|
|
249
|
-
temperature=anthropic_request["temperature"],
|
|
250
|
-
operation="adapt_request",
|
|
251
|
-
)
|
|
252
|
-
|
|
253
|
-
return anthropic_request
|
|
254
|
-
|
|
255
|
-
def _log_unsupported_parameters(
|
|
256
|
-
self, openai_req: OpenAIChatCompletionRequest
|
|
257
|
-
) -> None:
|
|
258
|
-
"""Log warnings for unsupported OpenAI parameters."""
|
|
259
|
-
if openai_req.seed is not None:
|
|
260
|
-
logger.debug(
|
|
261
|
-
"unsupported_parameter_ignored",
|
|
262
|
-
parameter="seed",
|
|
263
|
-
value=openai_req.seed,
|
|
264
|
-
operation="adapt_request",
|
|
265
|
-
)
|
|
266
|
-
if openai_req.logprobs or openai_req.top_logprobs:
|
|
267
|
-
logger.debug(
|
|
268
|
-
"unsupported_parameters_ignored",
|
|
269
|
-
parameters=["logprobs", "top_logprobs"],
|
|
270
|
-
logprobs=openai_req.logprobs,
|
|
271
|
-
top_logprobs=openai_req.top_logprobs,
|
|
272
|
-
operation="adapt_request",
|
|
273
|
-
)
|
|
274
|
-
if openai_req.store:
|
|
275
|
-
logger.debug(
|
|
276
|
-
"unsupported_parameter_ignored",
|
|
277
|
-
parameter="store",
|
|
278
|
-
value=openai_req.store,
|
|
279
|
-
operation="adapt_request",
|
|
280
|
-
)
|
|
281
|
-
|
|
282
|
-
def _handle_tools(
|
|
283
|
-
self,
|
|
284
|
-
openai_req: OpenAIChatCompletionRequest,
|
|
285
|
-
anthropic_request: dict[str, Any],
|
|
286
|
-
) -> None:
|
|
287
|
-
"""Handle tools, functions, and tool choice conversion."""
|
|
288
|
-
# Handle tools/functions
|
|
289
|
-
if openai_req.tools:
|
|
290
|
-
anthropic_request["tools"] = self._convert_tools_to_anthropic(
|
|
291
|
-
openai_req.tools
|
|
292
|
-
)
|
|
293
|
-
elif openai_req.functions:
|
|
294
|
-
# Convert deprecated functions to tools
|
|
295
|
-
anthropic_request["tools"] = self._convert_functions_to_anthropic(
|
|
296
|
-
openai_req.functions
|
|
297
|
-
)
|
|
298
|
-
|
|
299
|
-
# Handle tool choice
|
|
300
|
-
if openai_req.tool_choice:
|
|
301
|
-
# Convert tool choice - can be string or OpenAIToolChoice object
|
|
302
|
-
if isinstance(openai_req.tool_choice, str):
|
|
303
|
-
anthropic_request["tool_choice"] = (
|
|
304
|
-
self._convert_tool_choice_to_anthropic(openai_req.tool_choice)
|
|
305
|
-
)
|
|
306
|
-
else:
|
|
307
|
-
# Convert OpenAIToolChoice object to dict
|
|
308
|
-
tool_choice_dict = {
|
|
309
|
-
"type": openai_req.tool_choice.type,
|
|
310
|
-
"function": openai_req.tool_choice.function,
|
|
311
|
-
}
|
|
312
|
-
anthropic_request["tool_choice"] = (
|
|
313
|
-
self._convert_tool_choice_to_anthropic(tool_choice_dict)
|
|
314
|
-
)
|
|
315
|
-
elif openai_req.function_call:
|
|
316
|
-
# Convert deprecated function_call to tool_choice
|
|
317
|
-
anthropic_request["tool_choice"] = self._convert_function_call_to_anthropic(
|
|
318
|
-
openai_req.function_call
|
|
319
|
-
)
|
|
320
|
-
|
|
321
|
-
def adapt_response(self, response: dict[str, Any]) -> dict[str, Any]:
|
|
322
|
-
"""Convert Anthropic response format to OpenAI format.
|
|
323
|
-
|
|
324
|
-
Args:
|
|
325
|
-
response: Anthropic format response
|
|
326
|
-
|
|
327
|
-
Returns:
|
|
328
|
-
OpenAI format response
|
|
329
|
-
|
|
330
|
-
Raises:
|
|
331
|
-
ValueError: If the response format is invalid or unsupported
|
|
332
|
-
"""
|
|
333
|
-
try:
|
|
334
|
-
# Extract original model from response metadata if available
|
|
335
|
-
original_model = response.get("model", "gpt-4")
|
|
336
|
-
|
|
337
|
-
# Generate response ID
|
|
338
|
-
request_id = generate_openai_response_id()
|
|
339
|
-
|
|
340
|
-
# Convert content and extract tool calls
|
|
341
|
-
content, tool_calls = self._convert_content_blocks(response)
|
|
342
|
-
|
|
343
|
-
# Create OpenAI message
|
|
344
|
-
message = self._create_openai_message(content, tool_calls)
|
|
345
|
-
|
|
346
|
-
# Create choice with proper finish reason
|
|
347
|
-
choice = self._create_openai_choice(message, response)
|
|
348
|
-
|
|
349
|
-
# Create usage information
|
|
350
|
-
usage = self._create_openai_usage(response)
|
|
351
|
-
|
|
352
|
-
# Create final OpenAI response
|
|
353
|
-
openai_response = OpenAIChatCompletionResponse(
|
|
354
|
-
id=request_id,
|
|
355
|
-
object="chat.completion",
|
|
356
|
-
created=int(time.time()),
|
|
357
|
-
model=original_model,
|
|
358
|
-
choices=[choice],
|
|
359
|
-
usage=usage,
|
|
360
|
-
system_fingerprint=generate_openai_system_fingerprint(),
|
|
361
|
-
)
|
|
362
|
-
|
|
363
|
-
logger.debug(
|
|
364
|
-
"format_conversion_completed",
|
|
365
|
-
from_format="anthropic",
|
|
366
|
-
to_format="openai",
|
|
367
|
-
response_id=request_id,
|
|
368
|
-
original_model=original_model,
|
|
369
|
-
finish_reason=choice.finish_reason,
|
|
370
|
-
content_length=len(content) if content else 0,
|
|
371
|
-
tool_calls_count=len(tool_calls),
|
|
372
|
-
input_tokens=usage.prompt_tokens,
|
|
373
|
-
output_tokens=usage.completion_tokens,
|
|
374
|
-
operation="adapt_response",
|
|
375
|
-
choice=choice,
|
|
376
|
-
)
|
|
377
|
-
return openai_response.model_dump()
|
|
378
|
-
|
|
379
|
-
except ValidationError as e:
|
|
380
|
-
raise ValueError(f"Invalid Anthropic response format: {e}") from e
|
|
381
|
-
|
|
382
|
-
def _convert_content_blocks(
|
|
383
|
-
self, response: dict[str, Any]
|
|
384
|
-
) -> tuple[str, list[Any]]:
|
|
385
|
-
"""Convert Anthropic content blocks to OpenAI format content and tool calls."""
|
|
386
|
-
content = ""
|
|
387
|
-
tool_calls: list[Any] = []
|
|
388
|
-
|
|
389
|
-
if "content" in response and response["content"]:
|
|
390
|
-
for block in response["content"]:
|
|
391
|
-
if block.get("type") == "text":
|
|
392
|
-
text_content = block.get("text", "")
|
|
393
|
-
# Forward text content as-is (already formatted if needed)
|
|
394
|
-
content += text_content
|
|
395
|
-
elif block.get("type") == "system_message":
|
|
396
|
-
# Handle custom system_message content blocks
|
|
397
|
-
system_text = block.get("text", "")
|
|
398
|
-
source = block.get("source", "claude_code_sdk")
|
|
399
|
-
# Format as text with clear source attribution
|
|
400
|
-
content += f"[{source}]: {system_text}"
|
|
401
|
-
elif block.get("type") == "tool_use_sdk":
|
|
402
|
-
# Handle custom tool_use_sdk content blocks - convert to standard tool_calls
|
|
403
|
-
tool_call_block = {
|
|
404
|
-
"type": "tool_use",
|
|
405
|
-
"id": block.get("id", ""),
|
|
406
|
-
"name": block.get("name", ""),
|
|
407
|
-
"input": block.get("input", {}),
|
|
408
|
-
}
|
|
409
|
-
tool_calls.append(format_openai_tool_call(tool_call_block))
|
|
410
|
-
elif block.get("type") == "tool_result_sdk":
|
|
411
|
-
# Handle custom tool_result_sdk content blocks - add as text with source attribution
|
|
412
|
-
source = block.get("source", "claude_code_sdk")
|
|
413
|
-
tool_use_id = block.get("tool_use_id", "")
|
|
414
|
-
result_content = block.get("content", "")
|
|
415
|
-
is_error = block.get("is_error", False)
|
|
416
|
-
error_indicator = " (ERROR)" if is_error else ""
|
|
417
|
-
content += f"[{source} tool_result {tool_use_id}{error_indicator}]: {result_content}"
|
|
418
|
-
elif block.get("type") == "result_message":
|
|
419
|
-
# Handle custom result_message content blocks - add as text with source attribution
|
|
420
|
-
source = block.get("source", "claude_code_sdk")
|
|
421
|
-
result_data = block.get("data", {})
|
|
422
|
-
session_id = result_data.get("session_id", "")
|
|
423
|
-
stop_reason = result_data.get("stop_reason", "")
|
|
424
|
-
usage = result_data.get("usage", {})
|
|
425
|
-
cost_usd = result_data.get("total_cost_usd")
|
|
426
|
-
formatted_text = f"[{source} result {session_id}]: stop_reason={stop_reason}, usage={usage}"
|
|
427
|
-
if cost_usd is not None:
|
|
428
|
-
formatted_text += f", cost_usd={cost_usd}"
|
|
429
|
-
content += formatted_text
|
|
430
|
-
elif block.get("type") == "thinking":
|
|
431
|
-
# Handle thinking blocks - we can include them with a marker
|
|
432
|
-
thinking_text = block.get("thinking", "")
|
|
433
|
-
signature = block.get("signature")
|
|
434
|
-
if thinking_text:
|
|
435
|
-
content += f'<thinking signature="{signature}">{thinking_text}</thinking>\n'
|
|
436
|
-
elif block.get("type") == "tool_use":
|
|
437
|
-
# Handle legacy tool_use content blocks
|
|
438
|
-
tool_calls.append(format_openai_tool_call(block))
|
|
439
|
-
else:
|
|
440
|
-
logger.warning(
|
|
441
|
-
"unsupported_content_block_type", type=block.get("type")
|
|
442
|
-
)
|
|
443
|
-
|
|
444
|
-
return content, tool_calls
|
|
445
|
-
|
|
446
|
-
def _create_openai_message(
|
|
447
|
-
self, content: str, tool_calls: list[Any]
|
|
448
|
-
) -> OpenAIResponseMessage:
|
|
449
|
-
"""Create OpenAI message with proper content handling."""
|
|
450
|
-
# When there are tool calls but no content, use empty string instead of None
|
|
451
|
-
# Otherwise, if content is empty string, convert to None
|
|
452
|
-
final_content: str | None = content
|
|
453
|
-
if tool_calls and not content:
|
|
454
|
-
final_content = ""
|
|
455
|
-
elif content == "":
|
|
456
|
-
final_content = None
|
|
457
|
-
|
|
458
|
-
return OpenAIResponseMessage(
|
|
459
|
-
role="assistant",
|
|
460
|
-
content=final_content,
|
|
461
|
-
tool_calls=tool_calls if tool_calls else None,
|
|
462
|
-
)
|
|
463
|
-
|
|
464
|
-
def _create_openai_choice(
|
|
465
|
-
self, message: OpenAIResponseMessage, response: dict[str, Any]
|
|
466
|
-
) -> OpenAIChoice:
|
|
467
|
-
"""Create OpenAI choice with proper finish reason handling."""
|
|
468
|
-
# Map stop reason
|
|
469
|
-
finish_reason = self._convert_stop_reason_to_openai(response.get("stop_reason"))
|
|
470
|
-
|
|
471
|
-
# Ensure finish_reason is a valid literal type
|
|
472
|
-
if finish_reason not in ["stop", "length", "tool_calls", "content_filter"]:
|
|
473
|
-
finish_reason = "stop"
|
|
474
|
-
|
|
475
|
-
# Cast to proper literal type
|
|
476
|
-
valid_finish_reason = cast(
|
|
477
|
-
Literal["stop", "length", "tool_calls", "content_filter"], finish_reason
|
|
478
|
-
)
|
|
479
|
-
|
|
480
|
-
return OpenAIChoice(
|
|
481
|
-
index=0,
|
|
482
|
-
message=message,
|
|
483
|
-
finish_reason=valid_finish_reason,
|
|
484
|
-
logprobs=None, # Anthropic doesn't support logprobs
|
|
485
|
-
)
|
|
486
|
-
|
|
487
|
-
def _create_openai_usage(self, response: dict[str, Any]) -> OpenAIUsage:
|
|
488
|
-
"""Create OpenAI usage information from Anthropic response."""
|
|
489
|
-
usage_info = response.get("usage", {})
|
|
490
|
-
return OpenAIUsage(
|
|
491
|
-
prompt_tokens=usage_info.get("input_tokens", 0),
|
|
492
|
-
completion_tokens=usage_info.get("output_tokens", 0),
|
|
493
|
-
total_tokens=usage_info.get("input_tokens", 0)
|
|
494
|
-
+ usage_info.get("output_tokens", 0),
|
|
495
|
-
)
|
|
496
|
-
|
|
497
|
-
async def adapt_stream(
|
|
498
|
-
self, stream: AsyncIterator[dict[str, Any]]
|
|
499
|
-
) -> AsyncIterator[dict[str, Any]]:
|
|
500
|
-
"""Convert Anthropic streaming response to OpenAI streaming format.
|
|
501
|
-
|
|
502
|
-
Args:
|
|
503
|
-
stream: Anthropic streaming response
|
|
504
|
-
|
|
505
|
-
Yields:
|
|
506
|
-
OpenAI format streaming chunks
|
|
507
|
-
|
|
508
|
-
Raises:
|
|
509
|
-
ValueError: If the stream format is invalid or unsupported
|
|
510
|
-
"""
|
|
511
|
-
# Create stream processor with dict output format
|
|
512
|
-
processor = OpenAIStreamProcessor(
|
|
513
|
-
enable_usage=True,
|
|
514
|
-
enable_tool_calls=True,
|
|
515
|
-
output_format="dict", # Output dict objects instead of SSE strings
|
|
516
|
-
)
|
|
517
|
-
|
|
518
|
-
try:
|
|
519
|
-
# Process the stream - now yields dict objects directly
|
|
520
|
-
async for chunk in processor.process_stream(stream):
|
|
521
|
-
yield chunk # type: ignore[misc] # chunk is guaranteed to be dict when output_format="dict"
|
|
522
|
-
except Exception as e:
|
|
523
|
-
logger.error(
|
|
524
|
-
"streaming_conversion_failed",
|
|
525
|
-
error=str(e),
|
|
526
|
-
error_type=type(e).__name__,
|
|
527
|
-
operation="adapt_stream",
|
|
528
|
-
exc_info=True,
|
|
529
|
-
)
|
|
530
|
-
raise ValueError(f"Error processing streaming response: {e}") from e
|
|
531
|
-
|
|
532
|
-
def _convert_messages_to_anthropic(
|
|
533
|
-
self, openai_messages: list[Any]
|
|
534
|
-
) -> tuple[list[dict[str, Any]], str | None]:
|
|
535
|
-
"""Convert OpenAI messages to Anthropic format."""
|
|
536
|
-
messages = []
|
|
537
|
-
system_prompt = None
|
|
538
|
-
|
|
539
|
-
for msg in openai_messages:
|
|
540
|
-
if msg.role in ["system", "developer"]:
|
|
541
|
-
# System and developer messages become system prompt
|
|
542
|
-
if isinstance(msg.content, str):
|
|
543
|
-
if system_prompt:
|
|
544
|
-
system_prompt += "\n" + msg.content
|
|
545
|
-
else:
|
|
546
|
-
system_prompt = msg.content
|
|
547
|
-
elif isinstance(msg.content, list):
|
|
548
|
-
# Extract text from content blocks
|
|
549
|
-
text_parts: list[str] = []
|
|
550
|
-
for block in msg.content:
|
|
551
|
-
if (
|
|
552
|
-
hasattr(block, "type")
|
|
553
|
-
and block.type == "text"
|
|
554
|
-
and hasattr(block, "text")
|
|
555
|
-
and block.text
|
|
556
|
-
):
|
|
557
|
-
text_parts.append(block.text)
|
|
558
|
-
text_content = " ".join(text_parts)
|
|
559
|
-
if system_prompt:
|
|
560
|
-
system_prompt += "\n" + text_content
|
|
561
|
-
else:
|
|
562
|
-
system_prompt = text_content
|
|
563
|
-
|
|
564
|
-
elif msg.role in ["user", "assistant"]:
|
|
565
|
-
# Convert user/assistant messages
|
|
566
|
-
anthropic_msg = {
|
|
567
|
-
"role": msg.role,
|
|
568
|
-
"content": self._convert_content_to_anthropic(msg.content),
|
|
569
|
-
}
|
|
570
|
-
|
|
571
|
-
# Add tool calls if present
|
|
572
|
-
if hasattr(msg, "tool_calls") and msg.tool_calls:
|
|
573
|
-
# Ensure content is a list
|
|
574
|
-
if isinstance(anthropic_msg["content"], str):
|
|
575
|
-
anthropic_msg["content"] = [
|
|
576
|
-
{"type": "text", "text": anthropic_msg["content"]}
|
|
577
|
-
]
|
|
578
|
-
if not isinstance(anthropic_msg["content"], list):
|
|
579
|
-
anthropic_msg["content"] = []
|
|
580
|
-
|
|
581
|
-
# Content is now guaranteed to be a list
|
|
582
|
-
content_list = anthropic_msg["content"]
|
|
583
|
-
for tool_call in msg.tool_calls:
|
|
584
|
-
content_list.append(
|
|
585
|
-
self._convert_tool_call_to_anthropic(tool_call)
|
|
586
|
-
)
|
|
587
|
-
|
|
588
|
-
messages.append(anthropic_msg)
|
|
589
|
-
|
|
590
|
-
elif msg.role == "tool":
|
|
591
|
-
# Tool result messages
|
|
592
|
-
if messages and messages[-1]["role"] == "user":
|
|
593
|
-
# Add to previous user message
|
|
594
|
-
if isinstance(messages[-1]["content"], str):
|
|
595
|
-
messages[-1]["content"] = [
|
|
596
|
-
{"type": "text", "text": messages[-1]["content"]}
|
|
597
|
-
]
|
|
598
|
-
|
|
599
|
-
tool_result = {
|
|
600
|
-
"type": "tool_result",
|
|
601
|
-
"tool_use_id": getattr(msg, "tool_call_id", "unknown")
|
|
602
|
-
or "unknown",
|
|
603
|
-
"content": msg.content or "",
|
|
604
|
-
}
|
|
605
|
-
if isinstance(messages[-1]["content"], list):
|
|
606
|
-
messages[-1]["content"].append(tool_result)
|
|
607
|
-
else:
|
|
608
|
-
# Create new user message with tool result
|
|
609
|
-
tool_result = {
|
|
610
|
-
"type": "tool_result",
|
|
611
|
-
"tool_use_id": getattr(msg, "tool_call_id", "unknown")
|
|
612
|
-
or "unknown",
|
|
613
|
-
"content": msg.content or "",
|
|
614
|
-
}
|
|
615
|
-
messages.append(
|
|
616
|
-
{
|
|
617
|
-
"role": "user",
|
|
618
|
-
"content": [tool_result],
|
|
619
|
-
}
|
|
620
|
-
)
|
|
621
|
-
|
|
622
|
-
return messages, system_prompt
|
|
623
|
-
|
|
624
|
-
def _convert_content_to_anthropic(
|
|
625
|
-
self, content: str | list[Any] | None
|
|
626
|
-
) -> str | list[dict[str, Any]]:
|
|
627
|
-
"""Convert OpenAI content to Anthropic format."""
|
|
628
|
-
if content is None:
|
|
629
|
-
return ""
|
|
630
|
-
|
|
631
|
-
if isinstance(content, str):
|
|
632
|
-
# Check if the string contains thinking blocks
|
|
633
|
-
thinking_pattern = r'<thinking signature="([^"]*)">(.*?)</thinking>'
|
|
634
|
-
matches = re.findall(thinking_pattern, content, re.DOTALL)
|
|
635
|
-
|
|
636
|
-
if matches:
|
|
637
|
-
# Convert string with thinking blocks to list format
|
|
638
|
-
anthropic_content: list[dict[str, Any]] = []
|
|
639
|
-
last_end = 0
|
|
640
|
-
|
|
641
|
-
for match in re.finditer(thinking_pattern, content, re.DOTALL):
|
|
642
|
-
# Add any text before the thinking block
|
|
643
|
-
if match.start() > last_end:
|
|
644
|
-
text_before = content[last_end : match.start()].strip()
|
|
645
|
-
if text_before:
|
|
646
|
-
anthropic_content.append(
|
|
647
|
-
{"type": "text", "text": text_before}
|
|
648
|
-
)
|
|
649
|
-
|
|
650
|
-
# Add the thinking block
|
|
651
|
-
signature = match.group(1)
|
|
652
|
-
thinking_text = match.group(2)
|
|
653
|
-
thinking_block: dict[str, Any] = {
|
|
654
|
-
"type": "thinking",
|
|
655
|
-
"thinking": thinking_text, # Changed from "text" to "thinking"
|
|
656
|
-
}
|
|
657
|
-
if signature and signature != "None":
|
|
658
|
-
thinking_block["signature"] = signature
|
|
659
|
-
anthropic_content.append(thinking_block)
|
|
660
|
-
|
|
661
|
-
last_end = match.end()
|
|
662
|
-
|
|
663
|
-
# Add any remaining text after the last thinking block
|
|
664
|
-
if last_end < len(content):
|
|
665
|
-
remaining_text = content[last_end:].strip()
|
|
666
|
-
if remaining_text:
|
|
667
|
-
anthropic_content.append(
|
|
668
|
-
{"type": "text", "text": remaining_text}
|
|
669
|
-
)
|
|
670
|
-
|
|
671
|
-
return anthropic_content
|
|
672
|
-
else:
|
|
673
|
-
return content
|
|
674
|
-
|
|
675
|
-
# content must be a list at this point
|
|
676
|
-
anthropic_content = []
|
|
677
|
-
for block in content:
|
|
678
|
-
# Handle both Pydantic objects and dicts
|
|
679
|
-
if hasattr(block, "type"):
|
|
680
|
-
# This is a Pydantic object
|
|
681
|
-
block_type = getattr(block, "type", None)
|
|
682
|
-
if (
|
|
683
|
-
block_type == "text"
|
|
684
|
-
and hasattr(block, "text")
|
|
685
|
-
and block.text is not None
|
|
686
|
-
):
|
|
687
|
-
anthropic_content.append(
|
|
688
|
-
{
|
|
689
|
-
"type": "text",
|
|
690
|
-
"text": block.text,
|
|
691
|
-
}
|
|
692
|
-
)
|
|
693
|
-
elif (
|
|
694
|
-
block_type == "image_url"
|
|
695
|
-
and hasattr(block, "image_url")
|
|
696
|
-
and block.image_url is not None
|
|
697
|
-
):
|
|
698
|
-
# Get URL from image_url
|
|
699
|
-
if hasattr(block.image_url, "url"):
|
|
700
|
-
url = block.image_url.url
|
|
701
|
-
elif isinstance(block.image_url, dict):
|
|
702
|
-
url = block.image_url.get("url", "")
|
|
703
|
-
else:
|
|
704
|
-
url = ""
|
|
705
|
-
|
|
706
|
-
if url.startswith("data:"):
|
|
707
|
-
# Base64 encoded image
|
|
708
|
-
try:
|
|
709
|
-
media_type, data = url.split(";base64,")
|
|
710
|
-
media_type = media_type.split(":")[1]
|
|
711
|
-
anthropic_content.append(
|
|
712
|
-
{
|
|
713
|
-
"type": "image",
|
|
714
|
-
"source": {
|
|
715
|
-
"type": "base64",
|
|
716
|
-
"media_type": media_type,
|
|
717
|
-
"data": data,
|
|
718
|
-
},
|
|
719
|
-
}
|
|
720
|
-
)
|
|
721
|
-
except ValueError:
|
|
722
|
-
logger.warning(
|
|
723
|
-
"invalid_base64_image_url",
|
|
724
|
-
url=url[:100] + "..." if len(url) > 100 else url,
|
|
725
|
-
operation="convert_content_to_anthropic",
|
|
726
|
-
)
|
|
727
|
-
else:
|
|
728
|
-
# URL-based image (not directly supported by Anthropic)
|
|
729
|
-
anthropic_content.append(
|
|
730
|
-
{
|
|
731
|
-
"type": "text",
|
|
732
|
-
"text": f"[Image: {url}]",
|
|
733
|
-
}
|
|
734
|
-
)
|
|
735
|
-
elif isinstance(block, dict):
|
|
736
|
-
if block.get("type") == "text":
|
|
737
|
-
anthropic_content.append(
|
|
738
|
-
{
|
|
739
|
-
"type": "text",
|
|
740
|
-
"text": block.get("text", ""),
|
|
741
|
-
}
|
|
742
|
-
)
|
|
743
|
-
elif block.get("type") == "image_url":
|
|
744
|
-
# Convert image URL to Anthropic format
|
|
745
|
-
image_url = block.get("image_url", {})
|
|
746
|
-
url = image_url.get("url", "")
|
|
747
|
-
|
|
748
|
-
if url.startswith("data:"):
|
|
749
|
-
# Base64 encoded image
|
|
750
|
-
try:
|
|
751
|
-
media_type, data = url.split(";base64,")
|
|
752
|
-
media_type = media_type.split(":")[1]
|
|
753
|
-
anthropic_content.append(
|
|
754
|
-
{
|
|
755
|
-
"type": "image",
|
|
756
|
-
"source": {
|
|
757
|
-
"type": "base64",
|
|
758
|
-
"media_type": media_type,
|
|
759
|
-
"data": data,
|
|
760
|
-
},
|
|
761
|
-
}
|
|
762
|
-
)
|
|
763
|
-
except ValueError:
|
|
764
|
-
logger.warning(
|
|
765
|
-
"invalid_base64_image_url",
|
|
766
|
-
url=url[:100] + "..." if len(url) > 100 else url,
|
|
767
|
-
operation="convert_content_to_anthropic",
|
|
768
|
-
)
|
|
769
|
-
else:
|
|
770
|
-
# URL-based image (not directly supported by Anthropic)
|
|
771
|
-
anthropic_content.append(
|
|
772
|
-
{
|
|
773
|
-
"type": "text",
|
|
774
|
-
"text": f"[Image: {url}]",
|
|
775
|
-
}
|
|
776
|
-
)
|
|
777
|
-
|
|
778
|
-
return anthropic_content if anthropic_content else ""
|
|
779
|
-
|
|
780
|
-
def _convert_tools_to_anthropic(
|
|
781
|
-
self, tools: list[dict[str, Any]] | list[Any]
|
|
782
|
-
) -> list[dict[str, Any]]:
|
|
783
|
-
"""Convert OpenAI tools to Anthropic format."""
|
|
784
|
-
anthropic_tools = []
|
|
785
|
-
|
|
786
|
-
for tool in tools:
|
|
787
|
-
# Handle both dict and Pydantic model cases
|
|
788
|
-
if isinstance(tool, dict):
|
|
789
|
-
if tool.get("type") == "function":
|
|
790
|
-
func = tool.get("function", {})
|
|
791
|
-
anthropic_tools.append(
|
|
792
|
-
{
|
|
793
|
-
"name": func.get("name", ""),
|
|
794
|
-
"description": func.get("description", ""),
|
|
795
|
-
"input_schema": func.get("parameters", {}),
|
|
796
|
-
}
|
|
797
|
-
)
|
|
798
|
-
elif hasattr(tool, "type") and tool.type == "function":
|
|
799
|
-
# Handle Pydantic OpenAITool model
|
|
800
|
-
anthropic_tools.append(
|
|
801
|
-
{
|
|
802
|
-
"name": tool.function.name,
|
|
803
|
-
"description": tool.function.description or "",
|
|
804
|
-
"input_schema": tool.function.parameters,
|
|
805
|
-
}
|
|
806
|
-
)
|
|
807
|
-
|
|
808
|
-
return anthropic_tools
|
|
809
|
-
|
|
810
|
-
def _convert_functions_to_anthropic(
|
|
811
|
-
self, functions: list[dict[str, Any]]
|
|
812
|
-
) -> list[dict[str, Any]]:
|
|
813
|
-
"""Convert OpenAI functions to Anthropic tools format."""
|
|
814
|
-
anthropic_tools = []
|
|
815
|
-
|
|
816
|
-
for func in functions:
|
|
817
|
-
anthropic_tools.append(
|
|
818
|
-
{
|
|
819
|
-
"name": func.get("name", ""),
|
|
820
|
-
"description": func.get("description", ""),
|
|
821
|
-
"input_schema": func.get("parameters", {}),
|
|
822
|
-
}
|
|
823
|
-
)
|
|
824
|
-
|
|
825
|
-
return anthropic_tools
|
|
826
|
-
|
|
827
|
-
def _convert_tool_choice_to_anthropic(
|
|
828
|
-
self, tool_choice: str | dict[str, Any]
|
|
829
|
-
) -> dict[str, Any]:
|
|
830
|
-
"""Convert OpenAI tool_choice to Anthropic format."""
|
|
831
|
-
if isinstance(tool_choice, str):
|
|
832
|
-
mapping = {
|
|
833
|
-
"none": {"type": "none"},
|
|
834
|
-
"auto": {"type": "auto"},
|
|
835
|
-
"required": {"type": "any"},
|
|
836
|
-
}
|
|
837
|
-
return mapping.get(tool_choice, {"type": "auto"})
|
|
838
|
-
|
|
839
|
-
elif isinstance(tool_choice, dict) and tool_choice.get("type") == "function":
|
|
840
|
-
func = tool_choice.get("function", {})
|
|
841
|
-
return {
|
|
842
|
-
"type": "tool",
|
|
843
|
-
"name": func.get("name", ""),
|
|
844
|
-
}
|
|
845
|
-
|
|
846
|
-
return {"type": "auto"}
|
|
847
|
-
|
|
848
|
-
def _convert_function_call_to_anthropic(
|
|
849
|
-
self, function_call: str | dict[str, Any]
|
|
850
|
-
) -> dict[str, Any]:
|
|
851
|
-
"""Convert OpenAI function_call to Anthropic tool_choice format."""
|
|
852
|
-
if isinstance(function_call, str):
|
|
853
|
-
if function_call == "none":
|
|
854
|
-
return {"type": "none"}
|
|
855
|
-
elif function_call == "auto":
|
|
856
|
-
return {"type": "auto"}
|
|
857
|
-
|
|
858
|
-
elif isinstance(function_call, dict):
|
|
859
|
-
return {
|
|
860
|
-
"type": "tool",
|
|
861
|
-
"name": function_call.get("name", ""),
|
|
862
|
-
}
|
|
863
|
-
|
|
864
|
-
return {"type": "auto"}
|
|
865
|
-
|
|
866
|
-
def _convert_tool_call_to_anthropic(
|
|
867
|
-
self, tool_call: dict[str, Any]
|
|
868
|
-
) -> dict[str, Any]:
|
|
869
|
-
"""Convert OpenAI tool call to Anthropic format."""
|
|
870
|
-
func = tool_call.get("function", {})
|
|
871
|
-
|
|
872
|
-
# Parse arguments string to dict for Anthropic format
|
|
873
|
-
arguments_str = func.get("arguments", "{}")
|
|
874
|
-
try:
|
|
875
|
-
if isinstance(arguments_str, str):
|
|
876
|
-
input_dict = json.loads(arguments_str)
|
|
877
|
-
else:
|
|
878
|
-
input_dict = arguments_str # Already a dict
|
|
879
|
-
except json.JSONDecodeError:
|
|
880
|
-
logger.warning(
|
|
881
|
-
"tool_arguments_parse_failed",
|
|
882
|
-
arguments=arguments_str[:200] + "..."
|
|
883
|
-
if len(str(arguments_str)) > 200
|
|
884
|
-
else str(arguments_str),
|
|
885
|
-
operation="convert_tool_call_to_anthropic",
|
|
886
|
-
)
|
|
887
|
-
input_dict = {}
|
|
888
|
-
|
|
889
|
-
return {
|
|
890
|
-
"type": "tool_use",
|
|
891
|
-
"id": tool_call.get("id", ""),
|
|
892
|
-
"name": func.get("name", ""),
|
|
893
|
-
"input": input_dict,
|
|
894
|
-
}
|
|
895
|
-
|
|
896
|
-
def _convert_stop_reason_to_openai(self, stop_reason: str | None) -> str | None:
|
|
897
|
-
"""Convert Anthropic stop reason to OpenAI format."""
|
|
898
|
-
if stop_reason is None:
|
|
899
|
-
return None
|
|
900
|
-
|
|
901
|
-
mapping = {
|
|
902
|
-
"end_turn": "stop",
|
|
903
|
-
"max_tokens": "length",
|
|
904
|
-
"stop_sequence": "stop",
|
|
905
|
-
"tool_use": "tool_calls",
|
|
906
|
-
"pause_turn": "stop",
|
|
907
|
-
"refusal": "content_filter",
|
|
908
|
-
}
|
|
909
|
-
|
|
910
|
-
return mapping.get(stop_reason, "stop")
|
|
911
|
-
|
|
912
|
-
def adapt_error(self, error_body: dict[str, Any]) -> dict[str, Any]:
|
|
913
|
-
"""Convert Anthropic error format to OpenAI error format.
|
|
914
|
-
|
|
915
|
-
Args:
|
|
916
|
-
error_body: Anthropic error response
|
|
917
|
-
|
|
918
|
-
Returns:
|
|
919
|
-
OpenAI-formatted error response
|
|
920
|
-
"""
|
|
921
|
-
# Extract error details from Anthropic format
|
|
922
|
-
anthropic_error = error_body.get("error", {})
|
|
923
|
-
error_type = anthropic_error.get("type", "internal_server_error")
|
|
924
|
-
error_message = anthropic_error.get("message", "An error occurred")
|
|
925
|
-
|
|
926
|
-
# Map Anthropic error types to OpenAI error types
|
|
927
|
-
error_type_mapping = {
|
|
928
|
-
"invalid_request_error": "invalid_request_error",
|
|
929
|
-
"authentication_error": "invalid_request_error",
|
|
930
|
-
"permission_error": "invalid_request_error",
|
|
931
|
-
"not_found_error": "invalid_request_error",
|
|
932
|
-
"rate_limit_error": "rate_limit_error",
|
|
933
|
-
"internal_server_error": "internal_server_error",
|
|
934
|
-
"overloaded_error": "server_error",
|
|
935
|
-
}
|
|
936
|
-
|
|
937
|
-
openai_error_type = error_type_mapping.get(error_type, "invalid_request_error")
|
|
938
|
-
|
|
939
|
-
# Return OpenAI-formatted error
|
|
940
|
-
return {
|
|
941
|
-
"error": {
|
|
942
|
-
"message": error_message,
|
|
943
|
-
"type": openai_error_type,
|
|
944
|
-
"code": error_type, # Preserve original error type as code
|
|
945
|
-
}
|
|
946
|
-
}
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
__all__ = [
|
|
950
|
-
"OpenAIAdapter",
|
|
951
|
-
"OpenAIChatCompletionRequest",
|
|
952
|
-
"OpenAIChatCompletionResponse",
|
|
953
|
-
]
|