ccproxy-api 0.1.6__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccproxy/api/__init__.py +1 -15
- ccproxy/api/app.py +439 -212
- ccproxy/api/bootstrap.py +30 -0
- ccproxy/api/decorators.py +85 -0
- ccproxy/api/dependencies.py +145 -176
- ccproxy/api/format_validation.py +54 -0
- ccproxy/api/middleware/cors.py +6 -3
- ccproxy/api/middleware/errors.py +402 -530
- ccproxy/api/middleware/hooks.py +563 -0
- ccproxy/api/middleware/normalize_headers.py +59 -0
- ccproxy/api/middleware/request_id.py +35 -16
- ccproxy/api/middleware/streaming_hooks.py +292 -0
- ccproxy/api/routes/__init__.py +5 -14
- ccproxy/api/routes/health.py +39 -672
- ccproxy/api/routes/plugins.py +277 -0
- ccproxy/auth/__init__.py +2 -19
- ccproxy/auth/bearer.py +25 -15
- ccproxy/auth/dependencies.py +123 -157
- ccproxy/auth/exceptions.py +0 -12
- ccproxy/auth/manager.py +35 -49
- ccproxy/auth/managers/__init__.py +10 -0
- ccproxy/auth/managers/base.py +523 -0
- ccproxy/auth/managers/base_enhanced.py +63 -0
- ccproxy/auth/managers/token_snapshot.py +77 -0
- ccproxy/auth/models/base.py +65 -0
- ccproxy/auth/models/credentials.py +40 -0
- ccproxy/auth/oauth/__init__.py +4 -18
- ccproxy/auth/oauth/base.py +533 -0
- ccproxy/auth/oauth/cli_errors.py +37 -0
- ccproxy/auth/oauth/flows.py +430 -0
- ccproxy/auth/oauth/protocol.py +366 -0
- ccproxy/auth/oauth/registry.py +408 -0
- ccproxy/auth/oauth/router.py +396 -0
- ccproxy/auth/oauth/routes.py +186 -113
- ccproxy/auth/oauth/session.py +151 -0
- ccproxy/auth/oauth/templates.py +342 -0
- ccproxy/auth/storage/__init__.py +2 -5
- ccproxy/auth/storage/base.py +279 -5
- ccproxy/auth/storage/generic.py +134 -0
- ccproxy/cli/__init__.py +1 -2
- ccproxy/cli/_settings_help.py +351 -0
- ccproxy/cli/commands/auth.py +1519 -793
- ccproxy/cli/commands/config/commands.py +209 -276
- ccproxy/cli/commands/plugins.py +669 -0
- ccproxy/cli/commands/serve.py +75 -810
- ccproxy/cli/commands/status.py +254 -0
- ccproxy/cli/decorators.py +83 -0
- ccproxy/cli/helpers.py +22 -60
- ccproxy/cli/main.py +359 -10
- ccproxy/cli/options/claude_options.py +0 -25
- ccproxy/config/__init__.py +7 -11
- ccproxy/config/core.py +227 -0
- ccproxy/config/env_generator.py +232 -0
- ccproxy/config/runtime.py +67 -0
- ccproxy/config/security.py +36 -3
- ccproxy/config/settings.py +382 -441
- ccproxy/config/toml_generator.py +299 -0
- ccproxy/config/utils.py +452 -0
- ccproxy/core/__init__.py +7 -271
- ccproxy/{_version.py → core/_version.py} +16 -3
- ccproxy/core/async_task_manager.py +516 -0
- ccproxy/core/async_utils.py +47 -14
- ccproxy/core/auth/__init__.py +6 -0
- ccproxy/core/constants.py +16 -50
- ccproxy/core/errors.py +53 -0
- ccproxy/core/id_utils.py +20 -0
- ccproxy/core/interfaces.py +16 -123
- ccproxy/core/logging.py +473 -18
- ccproxy/core/plugins/__init__.py +77 -0
- ccproxy/core/plugins/cli_discovery.py +211 -0
- ccproxy/core/plugins/declaration.py +455 -0
- ccproxy/core/plugins/discovery.py +604 -0
- ccproxy/core/plugins/factories.py +967 -0
- ccproxy/core/plugins/hooks/__init__.py +30 -0
- ccproxy/core/plugins/hooks/base.py +58 -0
- ccproxy/core/plugins/hooks/events.py +46 -0
- ccproxy/core/plugins/hooks/implementations/__init__.py +16 -0
- ccproxy/core/plugins/hooks/implementations/formatters/__init__.py +11 -0
- ccproxy/core/plugins/hooks/implementations/formatters/json.py +552 -0
- ccproxy/core/plugins/hooks/implementations/formatters/raw.py +370 -0
- ccproxy/core/plugins/hooks/implementations/http_tracer.py +431 -0
- ccproxy/core/plugins/hooks/layers.py +44 -0
- ccproxy/core/plugins/hooks/manager.py +186 -0
- ccproxy/core/plugins/hooks/registry.py +139 -0
- ccproxy/core/plugins/hooks/thread_manager.py +203 -0
- ccproxy/core/plugins/hooks/types.py +22 -0
- ccproxy/core/plugins/interfaces.py +416 -0
- ccproxy/core/plugins/loader.py +166 -0
- ccproxy/core/plugins/middleware.py +233 -0
- ccproxy/core/plugins/models.py +59 -0
- ccproxy/core/plugins/protocol.py +180 -0
- ccproxy/core/plugins/runtime.py +519 -0
- ccproxy/{observability/context.py → core/request_context.py} +137 -94
- ccproxy/core/status_report.py +211 -0
- ccproxy/core/transformers.py +13 -8
- ccproxy/data/claude_headers_fallback.json +558 -0
- ccproxy/data/codex_headers_fallback.json +121 -0
- ccproxy/http/__init__.py +30 -0
- ccproxy/http/base.py +95 -0
- ccproxy/http/client.py +323 -0
- ccproxy/http/hooks.py +642 -0
- ccproxy/http/pool.py +279 -0
- ccproxy/llms/formatters/__init__.py +7 -0
- ccproxy/llms/formatters/anthropic_to_openai/__init__.py +55 -0
- ccproxy/llms/formatters/anthropic_to_openai/errors.py +65 -0
- ccproxy/llms/formatters/anthropic_to_openai/requests.py +356 -0
- ccproxy/llms/formatters/anthropic_to_openai/responses.py +153 -0
- ccproxy/llms/formatters/anthropic_to_openai/streams.py +1546 -0
- ccproxy/llms/formatters/base.py +140 -0
- ccproxy/llms/formatters/base_model.py +33 -0
- ccproxy/llms/formatters/common/__init__.py +51 -0
- ccproxy/llms/formatters/common/identifiers.py +48 -0
- ccproxy/llms/formatters/common/streams.py +254 -0
- ccproxy/llms/formatters/common/thinking.py +74 -0
- ccproxy/llms/formatters/common/usage.py +135 -0
- ccproxy/llms/formatters/constants.py +55 -0
- ccproxy/llms/formatters/context.py +116 -0
- ccproxy/llms/formatters/mapping.py +33 -0
- ccproxy/llms/formatters/openai_to_anthropic/__init__.py +55 -0
- ccproxy/llms/formatters/openai_to_anthropic/_helpers.py +141 -0
- ccproxy/llms/formatters/openai_to_anthropic/errors.py +53 -0
- ccproxy/llms/formatters/openai_to_anthropic/requests.py +674 -0
- ccproxy/llms/formatters/openai_to_anthropic/responses.py +285 -0
- ccproxy/llms/formatters/openai_to_anthropic/streams.py +530 -0
- ccproxy/llms/formatters/openai_to_openai/__init__.py +53 -0
- ccproxy/llms/formatters/openai_to_openai/_helpers.py +325 -0
- ccproxy/llms/formatters/openai_to_openai/errors.py +6 -0
- ccproxy/llms/formatters/openai_to_openai/requests.py +388 -0
- ccproxy/llms/formatters/openai_to_openai/responses.py +594 -0
- ccproxy/llms/formatters/openai_to_openai/streams.py +1832 -0
- ccproxy/llms/formatters/utils.py +306 -0
- ccproxy/llms/models/__init__.py +9 -0
- ccproxy/llms/models/anthropic.py +619 -0
- ccproxy/llms/models/openai.py +844 -0
- ccproxy/llms/streaming/__init__.py +26 -0
- ccproxy/llms/streaming/accumulators.py +1074 -0
- ccproxy/llms/streaming/formatters.py +251 -0
- ccproxy/{adapters/openai/streaming.py → llms/streaming/processors.py} +193 -240
- ccproxy/models/__init__.py +8 -159
- ccproxy/models/detection.py +92 -193
- ccproxy/models/provider.py +75 -0
- ccproxy/plugins/access_log/README.md +32 -0
- ccproxy/plugins/access_log/__init__.py +20 -0
- ccproxy/plugins/access_log/config.py +33 -0
- ccproxy/plugins/access_log/formatter.py +126 -0
- ccproxy/plugins/access_log/hook.py +763 -0
- ccproxy/plugins/access_log/logger.py +254 -0
- ccproxy/plugins/access_log/plugin.py +137 -0
- ccproxy/plugins/access_log/writer.py +109 -0
- ccproxy/plugins/analytics/README.md +24 -0
- ccproxy/plugins/analytics/__init__.py +1 -0
- ccproxy/plugins/analytics/config.py +5 -0
- ccproxy/plugins/analytics/ingest.py +85 -0
- ccproxy/plugins/analytics/models.py +97 -0
- ccproxy/plugins/analytics/plugin.py +121 -0
- ccproxy/plugins/analytics/routes.py +163 -0
- ccproxy/plugins/analytics/service.py +284 -0
- ccproxy/plugins/claude_api/README.md +29 -0
- ccproxy/plugins/claude_api/__init__.py +10 -0
- ccproxy/plugins/claude_api/adapter.py +829 -0
- ccproxy/plugins/claude_api/config.py +52 -0
- ccproxy/plugins/claude_api/detection_service.py +461 -0
- ccproxy/plugins/claude_api/health.py +175 -0
- ccproxy/plugins/claude_api/hooks.py +284 -0
- ccproxy/plugins/claude_api/models.py +256 -0
- ccproxy/plugins/claude_api/plugin.py +298 -0
- ccproxy/plugins/claude_api/routes.py +118 -0
- ccproxy/plugins/claude_api/streaming_metrics.py +68 -0
- ccproxy/plugins/claude_api/tasks.py +84 -0
- ccproxy/plugins/claude_sdk/README.md +35 -0
- ccproxy/plugins/claude_sdk/__init__.py +80 -0
- ccproxy/plugins/claude_sdk/adapter.py +749 -0
- ccproxy/plugins/claude_sdk/auth.py +57 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/client.py +63 -39
- ccproxy/plugins/claude_sdk/config.py +210 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/converter.py +6 -6
- ccproxy/plugins/claude_sdk/detection_service.py +163 -0
- ccproxy/{services/claude_sdk_service.py → plugins/claude_sdk/handler.py} +123 -304
- ccproxy/plugins/claude_sdk/health.py +113 -0
- ccproxy/plugins/claude_sdk/hooks.py +115 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/manager.py +42 -32
- ccproxy/{claude_sdk → plugins/claude_sdk}/message_queue.py +8 -8
- ccproxy/{models/claude_sdk.py → plugins/claude_sdk/models.py} +64 -16
- ccproxy/plugins/claude_sdk/options.py +154 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/parser.py +23 -5
- ccproxy/plugins/claude_sdk/plugin.py +269 -0
- ccproxy/plugins/claude_sdk/routes.py +104 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/session_client.py +124 -12
- ccproxy/plugins/claude_sdk/session_pool.py +700 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/stream_handle.py +48 -43
- ccproxy/{claude_sdk → plugins/claude_sdk}/stream_worker.py +22 -18
- ccproxy/{claude_sdk → plugins/claude_sdk}/streaming.py +50 -16
- ccproxy/plugins/claude_sdk/tasks.py +97 -0
- ccproxy/plugins/claude_shared/README.md +18 -0
- ccproxy/plugins/claude_shared/__init__.py +12 -0
- ccproxy/plugins/claude_shared/model_defaults.py +171 -0
- ccproxy/plugins/codex/README.md +35 -0
- ccproxy/plugins/codex/__init__.py +6 -0
- ccproxy/plugins/codex/adapter.py +635 -0
- ccproxy/{config/codex.py → plugins/codex/config.py} +78 -12
- ccproxy/plugins/codex/detection_service.py +544 -0
- ccproxy/plugins/codex/health.py +162 -0
- ccproxy/plugins/codex/hooks.py +263 -0
- ccproxy/plugins/codex/model_defaults.py +39 -0
- ccproxy/plugins/codex/models.py +263 -0
- ccproxy/plugins/codex/plugin.py +275 -0
- ccproxy/plugins/codex/routes.py +129 -0
- ccproxy/plugins/codex/streaming_metrics.py +324 -0
- ccproxy/plugins/codex/tasks.py +106 -0
- ccproxy/plugins/codex/utils/__init__.py +1 -0
- ccproxy/plugins/codex/utils/sse_parser.py +106 -0
- ccproxy/plugins/command_replay/README.md +34 -0
- ccproxy/plugins/command_replay/__init__.py +17 -0
- ccproxy/plugins/command_replay/config.py +133 -0
- ccproxy/plugins/command_replay/formatter.py +432 -0
- ccproxy/plugins/command_replay/hook.py +294 -0
- ccproxy/plugins/command_replay/plugin.py +161 -0
- ccproxy/plugins/copilot/README.md +39 -0
- ccproxy/plugins/copilot/__init__.py +11 -0
- ccproxy/plugins/copilot/adapter.py +465 -0
- ccproxy/plugins/copilot/config.py +155 -0
- ccproxy/plugins/copilot/data/copilot_fallback.json +41 -0
- ccproxy/plugins/copilot/detection_service.py +255 -0
- ccproxy/plugins/copilot/manager.py +275 -0
- ccproxy/plugins/copilot/model_defaults.py +284 -0
- ccproxy/plugins/copilot/models.py +148 -0
- ccproxy/plugins/copilot/oauth/__init__.py +16 -0
- ccproxy/plugins/copilot/oauth/client.py +494 -0
- ccproxy/plugins/copilot/oauth/models.py +385 -0
- ccproxy/plugins/copilot/oauth/provider.py +602 -0
- ccproxy/plugins/copilot/oauth/storage.py +170 -0
- ccproxy/plugins/copilot/plugin.py +360 -0
- ccproxy/plugins/copilot/routes.py +294 -0
- ccproxy/plugins/credential_balancer/README.md +124 -0
- ccproxy/plugins/credential_balancer/__init__.py +6 -0
- ccproxy/plugins/credential_balancer/config.py +270 -0
- ccproxy/plugins/credential_balancer/factory.py +415 -0
- ccproxy/plugins/credential_balancer/hook.py +51 -0
- ccproxy/plugins/credential_balancer/manager.py +587 -0
- ccproxy/plugins/credential_balancer/plugin.py +146 -0
- ccproxy/plugins/dashboard/README.md +25 -0
- ccproxy/plugins/dashboard/__init__.py +1 -0
- ccproxy/plugins/dashboard/config.py +8 -0
- ccproxy/plugins/dashboard/plugin.py +71 -0
- ccproxy/plugins/dashboard/routes.py +67 -0
- ccproxy/plugins/docker/README.md +32 -0
- ccproxy/{docker → plugins/docker}/__init__.py +3 -0
- ccproxy/{docker → plugins/docker}/adapter.py +108 -10
- ccproxy/plugins/docker/config.py +82 -0
- ccproxy/{docker → plugins/docker}/docker_path.py +4 -3
- ccproxy/{docker → plugins/docker}/middleware.py +2 -2
- ccproxy/plugins/docker/plugin.py +198 -0
- ccproxy/{docker → plugins/docker}/stream_process.py +3 -3
- ccproxy/plugins/duckdb_storage/README.md +26 -0
- ccproxy/plugins/duckdb_storage/__init__.py +1 -0
- ccproxy/plugins/duckdb_storage/config.py +22 -0
- ccproxy/plugins/duckdb_storage/plugin.py +128 -0
- ccproxy/plugins/duckdb_storage/routes.py +51 -0
- ccproxy/plugins/duckdb_storage/storage.py +633 -0
- ccproxy/plugins/max_tokens/README.md +38 -0
- ccproxy/plugins/max_tokens/__init__.py +12 -0
- ccproxy/plugins/max_tokens/adapter.py +235 -0
- ccproxy/plugins/max_tokens/config.py +86 -0
- ccproxy/plugins/max_tokens/models.py +53 -0
- ccproxy/plugins/max_tokens/plugin.py +200 -0
- ccproxy/plugins/max_tokens/service.py +271 -0
- ccproxy/plugins/max_tokens/token_limits.json +54 -0
- ccproxy/plugins/metrics/README.md +35 -0
- ccproxy/plugins/metrics/__init__.py +10 -0
- ccproxy/{observability/metrics.py → plugins/metrics/collector.py} +20 -153
- ccproxy/plugins/metrics/config.py +85 -0
- ccproxy/plugins/metrics/grafana/dashboards/ccproxy-dashboard.json +1720 -0
- ccproxy/plugins/metrics/hook.py +403 -0
- ccproxy/plugins/metrics/plugin.py +268 -0
- ccproxy/{observability → plugins/metrics}/pushgateway.py +57 -59
- ccproxy/plugins/metrics/routes.py +107 -0
- ccproxy/plugins/metrics/tasks.py +117 -0
- ccproxy/plugins/oauth_claude/README.md +35 -0
- ccproxy/plugins/oauth_claude/__init__.py +14 -0
- ccproxy/plugins/oauth_claude/client.py +270 -0
- ccproxy/plugins/oauth_claude/config.py +84 -0
- ccproxy/plugins/oauth_claude/manager.py +482 -0
- ccproxy/plugins/oauth_claude/models.py +266 -0
- ccproxy/plugins/oauth_claude/plugin.py +149 -0
- ccproxy/plugins/oauth_claude/provider.py +571 -0
- ccproxy/plugins/oauth_claude/storage.py +212 -0
- ccproxy/plugins/oauth_codex/README.md +38 -0
- ccproxy/plugins/oauth_codex/__init__.py +14 -0
- ccproxy/plugins/oauth_codex/client.py +224 -0
- ccproxy/plugins/oauth_codex/config.py +95 -0
- ccproxy/plugins/oauth_codex/manager.py +256 -0
- ccproxy/plugins/oauth_codex/models.py +239 -0
- ccproxy/plugins/oauth_codex/plugin.py +146 -0
- ccproxy/plugins/oauth_codex/provider.py +574 -0
- ccproxy/plugins/oauth_codex/storage.py +92 -0
- ccproxy/plugins/permissions/README.md +28 -0
- ccproxy/plugins/permissions/__init__.py +22 -0
- ccproxy/plugins/permissions/config.py +28 -0
- ccproxy/{cli/commands/permission_handler.py → plugins/permissions/handlers/cli.py} +49 -25
- ccproxy/plugins/permissions/handlers/protocol.py +33 -0
- ccproxy/plugins/permissions/handlers/terminal.py +675 -0
- ccproxy/{api/routes → plugins/permissions}/mcp.py +34 -7
- ccproxy/{models/permissions.py → plugins/permissions/models.py} +65 -1
- ccproxy/plugins/permissions/plugin.py +153 -0
- ccproxy/{api/routes/permissions.py → plugins/permissions/routes.py} +20 -16
- ccproxy/{api/services/permission_service.py → plugins/permissions/service.py} +65 -11
- ccproxy/{api → plugins/permissions}/ui/permission_handler_protocol.py +1 -1
- ccproxy/{api → plugins/permissions}/ui/terminal_permission_handler.py +66 -10
- ccproxy/plugins/pricing/README.md +34 -0
- ccproxy/plugins/pricing/__init__.py +6 -0
- ccproxy/{pricing → plugins/pricing}/cache.py +7 -6
- ccproxy/{config/pricing.py → plugins/pricing/config.py} +32 -6
- ccproxy/plugins/pricing/exceptions.py +35 -0
- ccproxy/plugins/pricing/loader.py +440 -0
- ccproxy/{pricing → plugins/pricing}/models.py +13 -23
- ccproxy/plugins/pricing/plugin.py +169 -0
- ccproxy/plugins/pricing/service.py +191 -0
- ccproxy/plugins/pricing/tasks.py +300 -0
- ccproxy/{pricing → plugins/pricing}/updater.py +86 -72
- ccproxy/plugins/pricing/utils.py +99 -0
- ccproxy/plugins/request_tracer/README.md +40 -0
- ccproxy/plugins/request_tracer/__init__.py +7 -0
- ccproxy/plugins/request_tracer/config.py +120 -0
- ccproxy/plugins/request_tracer/hook.py +415 -0
- ccproxy/plugins/request_tracer/plugin.py +255 -0
- ccproxy/scheduler/__init__.py +2 -14
- ccproxy/scheduler/core.py +26 -41
- ccproxy/scheduler/manager.py +63 -107
- ccproxy/scheduler/registry.py +6 -32
- ccproxy/scheduler/tasks.py +346 -314
- ccproxy/services/__init__.py +0 -1
- ccproxy/services/adapters/__init__.py +11 -0
- ccproxy/services/adapters/base.py +123 -0
- ccproxy/services/adapters/chain_composer.py +88 -0
- ccproxy/services/adapters/chain_validation.py +44 -0
- ccproxy/services/adapters/chat_accumulator.py +200 -0
- ccproxy/services/adapters/delta_utils.py +142 -0
- ccproxy/services/adapters/format_adapter.py +136 -0
- ccproxy/services/adapters/format_context.py +11 -0
- ccproxy/services/adapters/format_registry.py +158 -0
- ccproxy/services/adapters/http_adapter.py +1045 -0
- ccproxy/services/adapters/mock_adapter.py +118 -0
- ccproxy/services/adapters/protocols.py +35 -0
- ccproxy/services/adapters/simple_converters.py +571 -0
- ccproxy/services/auth_registry.py +180 -0
- ccproxy/services/cache/__init__.py +6 -0
- ccproxy/services/cache/response_cache.py +261 -0
- ccproxy/services/cli_detection.py +437 -0
- ccproxy/services/config/__init__.py +6 -0
- ccproxy/services/config/proxy_configuration.py +111 -0
- ccproxy/services/container.py +256 -0
- ccproxy/services/factories.py +380 -0
- ccproxy/services/handler_config.py +76 -0
- ccproxy/services/interfaces.py +298 -0
- ccproxy/services/mocking/__init__.py +6 -0
- ccproxy/services/mocking/mock_handler.py +291 -0
- ccproxy/services/tracing/__init__.py +7 -0
- ccproxy/services/tracing/interfaces.py +61 -0
- ccproxy/services/tracing/null_tracer.py +57 -0
- ccproxy/streaming/__init__.py +23 -0
- ccproxy/streaming/buffer.py +1056 -0
- ccproxy/streaming/deferred.py +897 -0
- ccproxy/streaming/handler.py +117 -0
- ccproxy/streaming/interfaces.py +77 -0
- ccproxy/streaming/simple_adapter.py +39 -0
- ccproxy/streaming/sse.py +109 -0
- ccproxy/streaming/sse_parser.py +127 -0
- ccproxy/templates/__init__.py +6 -0
- ccproxy/templates/plugin_scaffold.py +695 -0
- ccproxy/testing/endpoints/__init__.py +33 -0
- ccproxy/testing/endpoints/cli.py +215 -0
- ccproxy/testing/endpoints/config.py +874 -0
- ccproxy/testing/endpoints/console.py +57 -0
- ccproxy/testing/endpoints/models.py +100 -0
- ccproxy/testing/endpoints/runner.py +1903 -0
- ccproxy/testing/endpoints/tools.py +308 -0
- ccproxy/testing/mock_responses.py +70 -1
- ccproxy/testing/response_handlers.py +20 -0
- ccproxy/utils/__init__.py +0 -6
- ccproxy/utils/binary_resolver.py +476 -0
- ccproxy/utils/caching.py +327 -0
- ccproxy/utils/cli_logging.py +101 -0
- ccproxy/utils/command_line.py +251 -0
- ccproxy/utils/headers.py +228 -0
- ccproxy/utils/model_mapper.py +120 -0
- ccproxy/utils/startup_helpers.py +95 -342
- ccproxy/utils/version_checker.py +279 -6
- ccproxy_api-0.2.0.dist-info/METADATA +212 -0
- ccproxy_api-0.2.0.dist-info/RECORD +417 -0
- {ccproxy_api-0.1.6.dist-info → ccproxy_api-0.2.0.dist-info}/WHEEL +1 -1
- ccproxy_api-0.2.0.dist-info/entry_points.txt +24 -0
- ccproxy/__init__.py +0 -4
- ccproxy/adapters/__init__.py +0 -11
- ccproxy/adapters/base.py +0 -80
- ccproxy/adapters/codex/__init__.py +0 -11
- ccproxy/adapters/openai/__init__.py +0 -42
- ccproxy/adapters/openai/adapter.py +0 -953
- ccproxy/adapters/openai/models.py +0 -412
- ccproxy/adapters/openai/response_adapter.py +0 -355
- ccproxy/adapters/openai/response_models.py +0 -178
- ccproxy/api/middleware/headers.py +0 -49
- ccproxy/api/middleware/logging.py +0 -180
- ccproxy/api/middleware/request_content_logging.py +0 -297
- ccproxy/api/middleware/server_header.py +0 -58
- ccproxy/api/responses.py +0 -89
- ccproxy/api/routes/claude.py +0 -371
- ccproxy/api/routes/codex.py +0 -1231
- ccproxy/api/routes/metrics.py +0 -1029
- ccproxy/api/routes/proxy.py +0 -211
- ccproxy/api/services/__init__.py +0 -6
- ccproxy/auth/conditional.py +0 -84
- ccproxy/auth/credentials_adapter.py +0 -93
- ccproxy/auth/models.py +0 -118
- ccproxy/auth/oauth/models.py +0 -48
- ccproxy/auth/openai/__init__.py +0 -13
- ccproxy/auth/openai/credentials.py +0 -166
- ccproxy/auth/openai/oauth_client.py +0 -334
- ccproxy/auth/openai/storage.py +0 -184
- ccproxy/auth/storage/json_file.py +0 -158
- ccproxy/auth/storage/keyring.py +0 -189
- ccproxy/claude_sdk/__init__.py +0 -18
- ccproxy/claude_sdk/options.py +0 -194
- ccproxy/claude_sdk/session_pool.py +0 -550
- ccproxy/cli/docker/__init__.py +0 -34
- ccproxy/cli/docker/adapter_factory.py +0 -157
- ccproxy/cli/docker/params.py +0 -274
- ccproxy/config/auth.py +0 -153
- ccproxy/config/claude.py +0 -348
- ccproxy/config/cors.py +0 -79
- ccproxy/config/discovery.py +0 -95
- ccproxy/config/docker_settings.py +0 -264
- ccproxy/config/observability.py +0 -158
- ccproxy/config/reverse_proxy.py +0 -31
- ccproxy/config/scheduler.py +0 -108
- ccproxy/config/server.py +0 -86
- ccproxy/config/validators.py +0 -231
- ccproxy/core/codex_transformers.py +0 -389
- ccproxy/core/http.py +0 -328
- ccproxy/core/http_transformers.py +0 -812
- ccproxy/core/proxy.py +0 -143
- ccproxy/core/validators.py +0 -288
- ccproxy/models/errors.py +0 -42
- ccproxy/models/messages.py +0 -269
- ccproxy/models/requests.py +0 -107
- ccproxy/models/responses.py +0 -270
- ccproxy/models/types.py +0 -102
- ccproxy/observability/__init__.py +0 -51
- ccproxy/observability/access_logger.py +0 -457
- ccproxy/observability/sse_events.py +0 -303
- ccproxy/observability/stats_printer.py +0 -753
- ccproxy/observability/storage/__init__.py +0 -1
- ccproxy/observability/storage/duckdb_simple.py +0 -677
- ccproxy/observability/storage/models.py +0 -70
- ccproxy/observability/streaming_response.py +0 -107
- ccproxy/pricing/__init__.py +0 -19
- ccproxy/pricing/loader.py +0 -251
- ccproxy/services/claude_detection_service.py +0 -269
- ccproxy/services/codex_detection_service.py +0 -263
- ccproxy/services/credentials/__init__.py +0 -55
- ccproxy/services/credentials/config.py +0 -105
- ccproxy/services/credentials/manager.py +0 -561
- ccproxy/services/credentials/oauth_client.py +0 -481
- ccproxy/services/proxy_service.py +0 -1827
- ccproxy/static/.keep +0 -0
- ccproxy/utils/cost_calculator.py +0 -210
- ccproxy/utils/disconnection_monitor.py +0 -83
- ccproxy/utils/model_mapping.py +0 -199
- ccproxy/utils/models_provider.py +0 -150
- ccproxy/utils/simple_request_logger.py +0 -284
- ccproxy/utils/streaming_metrics.py +0 -199
- ccproxy_api-0.1.6.dist-info/METADATA +0 -615
- ccproxy_api-0.1.6.dist-info/RECORD +0 -189
- ccproxy_api-0.1.6.dist-info/entry_points.txt +0 -4
- /ccproxy/{api/middleware/auth.py → auth/models/__init__.py} +0 -0
- /ccproxy/{claude_sdk → plugins/claude_sdk}/exceptions.py +0 -0
- /ccproxy/{docker → plugins/docker}/models.py +0 -0
- /ccproxy/{docker → plugins/docker}/protocol.py +0 -0
- /ccproxy/{docker → plugins/docker}/validators.py +0 -0
- /ccproxy/{auth/oauth/storage.py → plugins/permissions/handlers/__init__.py} +0 -0
- /ccproxy/{api → plugins/permissions}/ui/__init__.py +0 -0
- {ccproxy_api-0.1.6.dist-info → ccproxy_api-0.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,674 @@
|
|
|
1
|
+
"""Request conversion entry points for OpenAI→Anthropic adapters."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from ccproxy.core.constants import DEFAULT_MAX_TOKENS
|
|
9
|
+
from ccproxy.core.logging import get_logger
|
|
10
|
+
from ccproxy.llms.models import anthropic as anthropic_models
|
|
11
|
+
from ccproxy.llms.models import openai as openai_models
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
logger = get_logger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _sanitize_tool_results(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
18
|
+
"""Remove orphaned tool_result blocks that don't have matching tool_use blocks.
|
|
19
|
+
|
|
20
|
+
The Anthropic API requires that each tool_result block must have a corresponding
|
|
21
|
+
tool_use block in the immediately preceding assistant message. This function removes
|
|
22
|
+
tool_result blocks that don't meet this requirement, converting them to text to
|
|
23
|
+
preserve information.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
messages: List of Anthropic format messages
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
Sanitized messages with orphaned tool_results removed or converted to text
|
|
30
|
+
"""
|
|
31
|
+
if not messages:
|
|
32
|
+
return messages
|
|
33
|
+
|
|
34
|
+
sanitized = []
|
|
35
|
+
for i, msg in enumerate(messages):
|
|
36
|
+
if msg.get("role") == "user" and isinstance(msg.get("content"), list):
|
|
37
|
+
# Find tool_use_ids from the immediately preceding assistant message
|
|
38
|
+
valid_tool_use_ids: set[str] = set()
|
|
39
|
+
if i > 0 and messages[i - 1].get("role") == "assistant":
|
|
40
|
+
prev_content = messages[i - 1].get("content", [])
|
|
41
|
+
if isinstance(prev_content, list):
|
|
42
|
+
for block in prev_content:
|
|
43
|
+
if isinstance(block, dict) and block.get("type") == "tool_use":
|
|
44
|
+
tool_id = block.get("id")
|
|
45
|
+
if tool_id:
|
|
46
|
+
valid_tool_use_ids.add(tool_id)
|
|
47
|
+
|
|
48
|
+
# Filter content blocks
|
|
49
|
+
new_content = []
|
|
50
|
+
orphaned_results = []
|
|
51
|
+
for block in msg["content"]:
|
|
52
|
+
if isinstance(block, dict) and block.get("type") == "tool_result":
|
|
53
|
+
tool_use_id = block.get("tool_use_id")
|
|
54
|
+
if tool_use_id in valid_tool_use_ids:
|
|
55
|
+
new_content.append(block)
|
|
56
|
+
else:
|
|
57
|
+
# Track orphaned tool_result for conversion to text
|
|
58
|
+
orphaned_results.append(block)
|
|
59
|
+
logger.warning(
|
|
60
|
+
"orphaned_tool_result_removed",
|
|
61
|
+
tool_use_id=tool_use_id,
|
|
62
|
+
valid_ids=list(valid_tool_use_ids),
|
|
63
|
+
message_index=i,
|
|
64
|
+
category="message_sanitization",
|
|
65
|
+
)
|
|
66
|
+
else:
|
|
67
|
+
new_content.append(block)
|
|
68
|
+
|
|
69
|
+
# Convert orphaned results to text block to preserve information
|
|
70
|
+
if orphaned_results:
|
|
71
|
+
orphan_text = "[Previous tool results from compacted history]\n"
|
|
72
|
+
for orphan in orphaned_results:
|
|
73
|
+
content = orphan.get("content", "")
|
|
74
|
+
if isinstance(content, list):
|
|
75
|
+
text_parts = []
|
|
76
|
+
for c in content:
|
|
77
|
+
if isinstance(c, dict) and c.get("type") == "text":
|
|
78
|
+
text_parts.append(c.get("text", ""))
|
|
79
|
+
content = "\n".join(text_parts)
|
|
80
|
+
# Truncate long content
|
|
81
|
+
content_str = str(content)
|
|
82
|
+
if len(content_str) > 500:
|
|
83
|
+
content_str = content_str[:500] + "..."
|
|
84
|
+
orphan_text += f"- Tool {orphan.get('tool_use_id', 'unknown')}: {content_str}\n"
|
|
85
|
+
|
|
86
|
+
# Add as text block at the beginning
|
|
87
|
+
new_content.insert(0, {"type": "text", "text": orphan_text})
|
|
88
|
+
|
|
89
|
+
# Update message content (only if we have content left)
|
|
90
|
+
if new_content:
|
|
91
|
+
sanitized.append({**msg, "content": new_content})
|
|
92
|
+
# If no content left, skip this message entirely
|
|
93
|
+
else:
|
|
94
|
+
sanitized.append(msg)
|
|
95
|
+
|
|
96
|
+
return sanitized
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
async def convert__openai_chat_to_anthropic_message__request(
|
|
100
|
+
request: openai_models.ChatCompletionRequest,
|
|
101
|
+
) -> anthropic_models.CreateMessageRequest:
|
|
102
|
+
"""Convert OpenAI ChatCompletionRequest to Anthropic CreateMessageRequest using typed models."""
|
|
103
|
+
model = request.model.strip() if request.model else ""
|
|
104
|
+
|
|
105
|
+
# Determine max tokens
|
|
106
|
+
max_tokens = request.max_completion_tokens
|
|
107
|
+
if max_tokens is None:
|
|
108
|
+
# Access deprecated field with warning suppressed for backward compatibility
|
|
109
|
+
import warnings
|
|
110
|
+
|
|
111
|
+
with warnings.catch_warnings():
|
|
112
|
+
warnings.simplefilter("ignore", DeprecationWarning)
|
|
113
|
+
max_tokens = request.max_tokens
|
|
114
|
+
if max_tokens is None:
|
|
115
|
+
max_tokens = DEFAULT_MAX_TOKENS
|
|
116
|
+
|
|
117
|
+
# Extract system message if present
|
|
118
|
+
system_value: str | None = None
|
|
119
|
+
out_messages: list[dict[str, Any]] = []
|
|
120
|
+
|
|
121
|
+
for msg in request.messages or []:
|
|
122
|
+
role = msg.role
|
|
123
|
+
content = msg.content
|
|
124
|
+
tool_calls = getattr(msg, "tool_calls", None)
|
|
125
|
+
|
|
126
|
+
if role == "system":
|
|
127
|
+
if isinstance(content, str):
|
|
128
|
+
system_value = content
|
|
129
|
+
elif isinstance(content, list):
|
|
130
|
+
texts = [
|
|
131
|
+
part.text
|
|
132
|
+
for part in content
|
|
133
|
+
if hasattr(part, "type")
|
|
134
|
+
and part.type == "text"
|
|
135
|
+
and hasattr(part, "text")
|
|
136
|
+
]
|
|
137
|
+
system_value = " ".join([t for t in texts if t]) or None
|
|
138
|
+
elif role == "assistant":
|
|
139
|
+
if tool_calls:
|
|
140
|
+
blocks = []
|
|
141
|
+
if content: # Add text content if present
|
|
142
|
+
blocks.append({"type": "text", "text": str(content)})
|
|
143
|
+
for tc in tool_calls:
|
|
144
|
+
func_info = tc.function
|
|
145
|
+
tool_name = func_info.name if func_info else None
|
|
146
|
+
tool_args = func_info.arguments if func_info else "{}"
|
|
147
|
+
blocks.append(
|
|
148
|
+
{
|
|
149
|
+
"type": "tool_use",
|
|
150
|
+
"id": tc.id,
|
|
151
|
+
"name": str(tool_name) if tool_name is not None else "",
|
|
152
|
+
"input": json.loads(str(tool_args)),
|
|
153
|
+
}
|
|
154
|
+
)
|
|
155
|
+
out_messages.append({"role": "assistant", "content": blocks})
|
|
156
|
+
elif content is not None:
|
|
157
|
+
out_messages.append({"role": "assistant", "content": content})
|
|
158
|
+
|
|
159
|
+
elif role == "tool":
|
|
160
|
+
tool_call_id = getattr(msg, "tool_call_id", None)
|
|
161
|
+
out_messages.append(
|
|
162
|
+
{
|
|
163
|
+
"role": "user", # Anthropic uses 'user' role for tool results
|
|
164
|
+
"content": [
|
|
165
|
+
{
|
|
166
|
+
"type": "tool_result",
|
|
167
|
+
"tool_use_id": tool_call_id,
|
|
168
|
+
"content": str(content),
|
|
169
|
+
}
|
|
170
|
+
],
|
|
171
|
+
}
|
|
172
|
+
)
|
|
173
|
+
elif role == "user":
|
|
174
|
+
if content is None:
|
|
175
|
+
continue
|
|
176
|
+
if isinstance(content, list):
|
|
177
|
+
user_blocks: list[dict[str, Any]] = []
|
|
178
|
+
text_accum: list[str] = []
|
|
179
|
+
for part in content:
|
|
180
|
+
# Handle both dict and Pydantic object inputs
|
|
181
|
+
if isinstance(part, dict):
|
|
182
|
+
ptype = part.get("type")
|
|
183
|
+
if ptype == "text":
|
|
184
|
+
t = part.get("text")
|
|
185
|
+
if isinstance(t, str):
|
|
186
|
+
text_accum.append(t)
|
|
187
|
+
elif ptype == "image_url":
|
|
188
|
+
image_info = part.get("image_url")
|
|
189
|
+
if isinstance(image_info, dict):
|
|
190
|
+
url = image_info.get("url")
|
|
191
|
+
if isinstance(url, str) and url.startswith("data:"):
|
|
192
|
+
try:
|
|
193
|
+
header, b64data = url.split(",", 1)
|
|
194
|
+
mediatype = header.split(";")[0].split(":", 1)[
|
|
195
|
+
1
|
|
196
|
+
]
|
|
197
|
+
user_blocks.append(
|
|
198
|
+
{
|
|
199
|
+
"type": "image",
|
|
200
|
+
"source": {
|
|
201
|
+
"type": "base64",
|
|
202
|
+
"media_type": str(mediatype),
|
|
203
|
+
"data": str(b64data),
|
|
204
|
+
},
|
|
205
|
+
}
|
|
206
|
+
)
|
|
207
|
+
except Exception:
|
|
208
|
+
pass
|
|
209
|
+
elif hasattr(part, "type"):
|
|
210
|
+
# Pydantic object case
|
|
211
|
+
ptype = part.type
|
|
212
|
+
if ptype == "text" and hasattr(part, "text"):
|
|
213
|
+
t = part.text
|
|
214
|
+
if isinstance(t, str):
|
|
215
|
+
text_accum.append(t)
|
|
216
|
+
elif ptype == "image_url" and hasattr(part, "image_url"):
|
|
217
|
+
url = part.image_url.url if part.image_url else None
|
|
218
|
+
if isinstance(url, str) and url.startswith("data:"):
|
|
219
|
+
try:
|
|
220
|
+
header, b64data = url.split(",", 1)
|
|
221
|
+
mediatype = header.split(";")[0].split(":", 1)[1]
|
|
222
|
+
user_blocks.append(
|
|
223
|
+
{
|
|
224
|
+
"type": "image",
|
|
225
|
+
"source": {
|
|
226
|
+
"type": "base64",
|
|
227
|
+
"media_type": str(mediatype),
|
|
228
|
+
"data": str(b64data),
|
|
229
|
+
},
|
|
230
|
+
}
|
|
231
|
+
)
|
|
232
|
+
except Exception:
|
|
233
|
+
pass
|
|
234
|
+
if user_blocks:
|
|
235
|
+
# If we have images, always use list format
|
|
236
|
+
if text_accum:
|
|
237
|
+
user_blocks.insert(
|
|
238
|
+
0, {"type": "text", "text": " ".join(text_accum)}
|
|
239
|
+
)
|
|
240
|
+
out_messages.append({"role": "user", "content": user_blocks})
|
|
241
|
+
elif len(text_accum) > 1:
|
|
242
|
+
# Multiple text parts - use list format
|
|
243
|
+
text_blocks = [{"type": "text", "text": " ".join(text_accum)}]
|
|
244
|
+
out_messages.append({"role": "user", "content": text_blocks})
|
|
245
|
+
elif len(text_accum) == 1:
|
|
246
|
+
# Single text part - use string format
|
|
247
|
+
out_messages.append({"role": "user", "content": text_accum[0]})
|
|
248
|
+
else:
|
|
249
|
+
# No content - use empty string
|
|
250
|
+
out_messages.append({"role": "user", "content": ""})
|
|
251
|
+
else:
|
|
252
|
+
out_messages.append({"role": "user", "content": content})
|
|
253
|
+
|
|
254
|
+
# Sanitize tool_result blocks to ensure they have matching tool_use blocks
|
|
255
|
+
out_messages = _sanitize_tool_results(out_messages)
|
|
256
|
+
|
|
257
|
+
payload_data: dict[str, Any] = {
|
|
258
|
+
"model": model,
|
|
259
|
+
"messages": out_messages,
|
|
260
|
+
"max_tokens": max_tokens,
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
# Inject system guidance for response_format JSON modes
|
|
264
|
+
resp_fmt = request.response_format
|
|
265
|
+
if resp_fmt is not None:
|
|
266
|
+
inject: str | None = None
|
|
267
|
+
if resp_fmt.type == "json_object":
|
|
268
|
+
inject = (
|
|
269
|
+
"Respond ONLY with a valid JSON object. "
|
|
270
|
+
"Do not include any additional text, markdown, or explanation."
|
|
271
|
+
)
|
|
272
|
+
elif resp_fmt.type == "json_schema" and hasattr(resp_fmt, "json_schema"):
|
|
273
|
+
schema = resp_fmt.json_schema
|
|
274
|
+
try:
|
|
275
|
+
if schema is not None:
|
|
276
|
+
schema_str = json.dumps(
|
|
277
|
+
schema.model_dump()
|
|
278
|
+
if hasattr(schema, "model_dump")
|
|
279
|
+
else schema,
|
|
280
|
+
ensure_ascii=False,
|
|
281
|
+
separators=(",", ":"),
|
|
282
|
+
)
|
|
283
|
+
else:
|
|
284
|
+
schema_str = "{}"
|
|
285
|
+
except Exception:
|
|
286
|
+
schema_str = str(schema or {})
|
|
287
|
+
inject = (
|
|
288
|
+
"Respond ONLY with a JSON object that strictly conforms to this JSON Schema:\n"
|
|
289
|
+
f"{schema_str}"
|
|
290
|
+
)
|
|
291
|
+
if inject:
|
|
292
|
+
if system_value:
|
|
293
|
+
system_value = f"{system_value}\n\n{inject}"
|
|
294
|
+
else:
|
|
295
|
+
system_value = inject
|
|
296
|
+
|
|
297
|
+
if system_value is not None:
|
|
298
|
+
# Ensure system value is a string, not a complex object
|
|
299
|
+
if isinstance(system_value, str):
|
|
300
|
+
payload_data["system"] = system_value
|
|
301
|
+
else:
|
|
302
|
+
# If system_value is not a string, try to extract text content
|
|
303
|
+
try:
|
|
304
|
+
if isinstance(system_value, list):
|
|
305
|
+
# Handle list format: [{"type": "text", "text": "...", "cache_control": {...}}]
|
|
306
|
+
text_parts = []
|
|
307
|
+
for part in system_value:
|
|
308
|
+
if isinstance(part, dict) and part.get("type") == "text":
|
|
309
|
+
text_content = part.get("text")
|
|
310
|
+
if isinstance(text_content, str):
|
|
311
|
+
text_parts.append(text_content)
|
|
312
|
+
if text_parts:
|
|
313
|
+
payload_data["system"] = " ".join(text_parts)
|
|
314
|
+
elif (
|
|
315
|
+
isinstance(system_value, dict)
|
|
316
|
+
and system_value.get("type") == "text"
|
|
317
|
+
):
|
|
318
|
+
# Handle single dict format: {"type": "text", "text": "...", "cache_control": {...}}
|
|
319
|
+
text_content = system_value.get("text")
|
|
320
|
+
if isinstance(text_content, str):
|
|
321
|
+
payload_data["system"] = text_content
|
|
322
|
+
except Exception:
|
|
323
|
+
# Fallback: convert to string representation
|
|
324
|
+
payload_data["system"] = str(system_value)
|
|
325
|
+
if request.stream is not None:
|
|
326
|
+
payload_data["stream"] = request.stream
|
|
327
|
+
|
|
328
|
+
# Tools mapping (OpenAI function tools -> Anthropic tool definitions)
|
|
329
|
+
tools_in = request.tools or []
|
|
330
|
+
if tools_in:
|
|
331
|
+
anth_tools: list[dict[str, Any]] = []
|
|
332
|
+
for t in tools_in:
|
|
333
|
+
if t.type == "function" and t.function is not None:
|
|
334
|
+
fn = t.function
|
|
335
|
+
anth_tools.append(
|
|
336
|
+
{
|
|
337
|
+
"type": "custom",
|
|
338
|
+
"name": fn.name,
|
|
339
|
+
"description": fn.description,
|
|
340
|
+
"input_schema": fn.parameters.model_dump()
|
|
341
|
+
if hasattr(fn.parameters, "model_dump")
|
|
342
|
+
else (fn.parameters or {}),
|
|
343
|
+
}
|
|
344
|
+
)
|
|
345
|
+
if anth_tools:
|
|
346
|
+
payload_data["tools"] = anth_tools
|
|
347
|
+
|
|
348
|
+
# tool_choice mapping
|
|
349
|
+
tool_choice = request.tool_choice
|
|
350
|
+
parallel_tool_calls = request.parallel_tool_calls
|
|
351
|
+
disable_parallel = None
|
|
352
|
+
if isinstance(parallel_tool_calls, bool):
|
|
353
|
+
disable_parallel = not parallel_tool_calls
|
|
354
|
+
|
|
355
|
+
if tool_choice is not None:
|
|
356
|
+
anth_choice: dict[str, Any] | None = None
|
|
357
|
+
if isinstance(tool_choice, str):
|
|
358
|
+
if tool_choice == "none":
|
|
359
|
+
anth_choice = {"type": "none"}
|
|
360
|
+
elif tool_choice == "auto":
|
|
361
|
+
anth_choice = {"type": "auto"}
|
|
362
|
+
elif tool_choice == "required":
|
|
363
|
+
anth_choice = {"type": "any"}
|
|
364
|
+
elif isinstance(tool_choice, dict):
|
|
365
|
+
# Handle dict input like {"type": "function", "function": {"name": "search"}}
|
|
366
|
+
if tool_choice.get("type") == "function" and isinstance(
|
|
367
|
+
tool_choice.get("function"), dict
|
|
368
|
+
):
|
|
369
|
+
anth_choice = {
|
|
370
|
+
"type": "tool",
|
|
371
|
+
"name": tool_choice["function"].get("name"),
|
|
372
|
+
}
|
|
373
|
+
elif hasattr(tool_choice, "type") and hasattr(tool_choice, "function"):
|
|
374
|
+
# e.g., ChatCompletionNamedToolChoice pydantic model
|
|
375
|
+
if tool_choice.type == "function" and tool_choice.function is not None:
|
|
376
|
+
anth_choice = {
|
|
377
|
+
"type": "tool",
|
|
378
|
+
"name": tool_choice.function.name,
|
|
379
|
+
}
|
|
380
|
+
if anth_choice is not None:
|
|
381
|
+
if disable_parallel is not None and anth_choice["type"] in {
|
|
382
|
+
"auto",
|
|
383
|
+
"any",
|
|
384
|
+
"tool",
|
|
385
|
+
}:
|
|
386
|
+
anth_choice["disable_parallel_tool_use"] = disable_parallel
|
|
387
|
+
payload_data["tool_choice"] = anth_choice
|
|
388
|
+
|
|
389
|
+
# Thinking configuration
|
|
390
|
+
thinking_cfg = derive_thinking_config(model, request)
|
|
391
|
+
if thinking_cfg is not None:
|
|
392
|
+
payload_data["thinking"] = thinking_cfg
|
|
393
|
+
# Ensure token budget fits under max_tokens
|
|
394
|
+
budget = thinking_cfg.get("budget_tokens", 0)
|
|
395
|
+
if isinstance(budget, int) and max_tokens <= budget:
|
|
396
|
+
payload_data["max_tokens"] = budget + 64
|
|
397
|
+
# Temperature constraint when thinking enabled
|
|
398
|
+
payload_data["temperature"] = 1.0
|
|
399
|
+
|
|
400
|
+
# Validate against Anthropic model to ensure shape
|
|
401
|
+
return anthropic_models.CreateMessageRequest.model_validate(payload_data)
|
|
402
|
+
|
|
403
|
+
|
|
404
|
+
def convert__openai_responses_to_anthropic_message__request(
|
|
405
|
+
request: openai_models.ResponseRequest,
|
|
406
|
+
) -> anthropic_models.CreateMessageRequest:
|
|
407
|
+
model = request.model
|
|
408
|
+
stream = bool(request.stream)
|
|
409
|
+
max_out = request.max_output_tokens
|
|
410
|
+
|
|
411
|
+
messages: list[dict[str, Any]] = []
|
|
412
|
+
system_parts: list[str] = []
|
|
413
|
+
input_val = request.input
|
|
414
|
+
|
|
415
|
+
if isinstance(input_val, str):
|
|
416
|
+
messages.append({"role": "user", "content": input_val})
|
|
417
|
+
elif isinstance(input_val, list):
|
|
418
|
+
for item in input_val:
|
|
419
|
+
if isinstance(item, dict) and item.get("type") == "message":
|
|
420
|
+
role = item.get("role", "user")
|
|
421
|
+
content_list = item.get("content", [])
|
|
422
|
+
text_parts: list[str] = []
|
|
423
|
+
for part in content_list:
|
|
424
|
+
if isinstance(part, dict) and part.get("type") in {
|
|
425
|
+
"input_text",
|
|
426
|
+
"text",
|
|
427
|
+
}:
|
|
428
|
+
text = part.get("text")
|
|
429
|
+
if isinstance(text, str):
|
|
430
|
+
text_parts.append(text)
|
|
431
|
+
content_text = " ".join(text_parts)
|
|
432
|
+
if role == "system":
|
|
433
|
+
system_parts.append(content_text)
|
|
434
|
+
elif role in {"user", "assistant"}:
|
|
435
|
+
messages.append({"role": role, "content": content_text})
|
|
436
|
+
elif hasattr(item, "type") and item.type == "message":
|
|
437
|
+
role = getattr(item, "role", "user")
|
|
438
|
+
content_list = getattr(item, "content", []) or []
|
|
439
|
+
text_parts_alt: list[str] = []
|
|
440
|
+
for part in content_list:
|
|
441
|
+
if hasattr(part, "type") and part.type in {"input_text", "text"}:
|
|
442
|
+
text = getattr(part, "text", None)
|
|
443
|
+
if isinstance(text, str):
|
|
444
|
+
text_parts_alt.append(text)
|
|
445
|
+
content_text = " ".join(text_parts_alt)
|
|
446
|
+
if role == "system":
|
|
447
|
+
system_parts.append(content_text)
|
|
448
|
+
elif role in {"user", "assistant"}:
|
|
449
|
+
messages.append({"role": role, "content": content_text})
|
|
450
|
+
|
|
451
|
+
payload_data: dict[str, Any] = {"model": model, "messages": messages}
|
|
452
|
+
if max_out is None:
|
|
453
|
+
max_out = DEFAULT_MAX_TOKENS
|
|
454
|
+
payload_data["max_tokens"] = int(max_out)
|
|
455
|
+
if stream:
|
|
456
|
+
payload_data["stream"] = True
|
|
457
|
+
|
|
458
|
+
if system_parts:
|
|
459
|
+
payload_data["system"] = "\n".join(system_parts)
|
|
460
|
+
|
|
461
|
+
tools_in = request.tools or []
|
|
462
|
+
if tools_in:
|
|
463
|
+
anth_tools: list[dict[str, Any]] = []
|
|
464
|
+
for tool in tools_in:
|
|
465
|
+
if isinstance(tool, dict):
|
|
466
|
+
if tool.get("type") == "function":
|
|
467
|
+
fn = tool.get("function")
|
|
468
|
+
parameters = tool.get("parameters")
|
|
469
|
+
if isinstance(fn, dict):
|
|
470
|
+
name = fn.get("name") or tool.get("name")
|
|
471
|
+
description = fn.get("description") or tool.get("description")
|
|
472
|
+
schema = fn.get("parameters") or parameters or {}
|
|
473
|
+
else:
|
|
474
|
+
name = tool.get("name")
|
|
475
|
+
description = tool.get("description")
|
|
476
|
+
schema = parameters or {}
|
|
477
|
+
|
|
478
|
+
anth_tools.append(
|
|
479
|
+
{
|
|
480
|
+
"type": "custom",
|
|
481
|
+
"name": name,
|
|
482
|
+
"description": description,
|
|
483
|
+
"input_schema": schema,
|
|
484
|
+
}
|
|
485
|
+
)
|
|
486
|
+
elif (
|
|
487
|
+
hasattr(tool, "type")
|
|
488
|
+
and tool.type == "function"
|
|
489
|
+
and hasattr(tool, "function")
|
|
490
|
+
and tool.function is not None
|
|
491
|
+
):
|
|
492
|
+
fn = tool.function
|
|
493
|
+
anth_tools.append(
|
|
494
|
+
{
|
|
495
|
+
"type": "custom",
|
|
496
|
+
"name": tool.name,
|
|
497
|
+
"description": tool.description,
|
|
498
|
+
"input_schema": fn.parameters.model_dump()
|
|
499
|
+
if hasattr(fn.parameters, "model_dump")
|
|
500
|
+
else (fn.parameters or {}),
|
|
501
|
+
}
|
|
502
|
+
)
|
|
503
|
+
if anth_tools:
|
|
504
|
+
payload_data["tools"] = anth_tools
|
|
505
|
+
|
|
506
|
+
tool_choice = request.tool_choice
|
|
507
|
+
parallel_tool_calls = request.parallel_tool_calls
|
|
508
|
+
disable_parallel = None
|
|
509
|
+
if isinstance(parallel_tool_calls, bool):
|
|
510
|
+
disable_parallel = not parallel_tool_calls
|
|
511
|
+
|
|
512
|
+
if tool_choice is not None:
|
|
513
|
+
anth_choice: dict[str, Any] | None = None
|
|
514
|
+
if isinstance(tool_choice, str):
|
|
515
|
+
if tool_choice == "none":
|
|
516
|
+
anth_choice = {"type": "none"}
|
|
517
|
+
elif tool_choice == "auto":
|
|
518
|
+
anth_choice = {"type": "auto"}
|
|
519
|
+
elif tool_choice == "required":
|
|
520
|
+
anth_choice = {"type": "any"}
|
|
521
|
+
elif isinstance(tool_choice, dict):
|
|
522
|
+
if tool_choice.get("type") == "function" and isinstance(
|
|
523
|
+
tool_choice.get("function"), dict
|
|
524
|
+
):
|
|
525
|
+
anth_choice = {
|
|
526
|
+
"type": "tool",
|
|
527
|
+
"name": tool_choice["function"].get("name"),
|
|
528
|
+
}
|
|
529
|
+
elif hasattr(tool_choice, "type") and hasattr(tool_choice, "function"):
|
|
530
|
+
if tool_choice.type == "function" and tool_choice.function is not None:
|
|
531
|
+
anth_choice = {"type": "tool", "name": tool_choice.function.name}
|
|
532
|
+
if anth_choice is not None:
|
|
533
|
+
if disable_parallel is not None and anth_choice["type"] in {
|
|
534
|
+
"auto",
|
|
535
|
+
"any",
|
|
536
|
+
"tool",
|
|
537
|
+
}:
|
|
538
|
+
anth_choice["disable_parallel_tool_use"] = disable_parallel
|
|
539
|
+
payload_data["tool_choice"] = anth_choice
|
|
540
|
+
|
|
541
|
+
text_cfg = request.text
|
|
542
|
+
inject: str | None = None
|
|
543
|
+
if text_cfg is not None:
|
|
544
|
+
fmt = None
|
|
545
|
+
if isinstance(text_cfg, dict):
|
|
546
|
+
fmt = text_cfg.get("format")
|
|
547
|
+
elif hasattr(text_cfg, "format"):
|
|
548
|
+
fmt = text_cfg.format
|
|
549
|
+
if fmt is not None:
|
|
550
|
+
if isinstance(fmt, dict):
|
|
551
|
+
fmt_type = fmt.get("type")
|
|
552
|
+
if fmt_type == "json_schema":
|
|
553
|
+
schema = fmt.get("json_schema") or fmt.get("schema") or {}
|
|
554
|
+
try:
|
|
555
|
+
inject_schema = json.dumps(schema, separators=(",", ":"))
|
|
556
|
+
except Exception:
|
|
557
|
+
inject_schema = str(schema)
|
|
558
|
+
inject = (
|
|
559
|
+
"Respond ONLY with JSON strictly conforming to this JSON Schema:\n"
|
|
560
|
+
f"{inject_schema}"
|
|
561
|
+
)
|
|
562
|
+
elif fmt_type == "json_object":
|
|
563
|
+
inject = (
|
|
564
|
+
"Respond ONLY with a valid JSON object. "
|
|
565
|
+
"No prose. Do not wrap in markdown."
|
|
566
|
+
)
|
|
567
|
+
elif hasattr(fmt, "type"):
|
|
568
|
+
if fmt.type == "json_object":
|
|
569
|
+
inject = (
|
|
570
|
+
"Respond ONLY with a valid JSON object. "
|
|
571
|
+
"No prose. Do not wrap in markdown."
|
|
572
|
+
)
|
|
573
|
+
elif fmt.type == "json_schema" and (
|
|
574
|
+
hasattr(fmt, "json_schema") or hasattr(fmt, "schema")
|
|
575
|
+
):
|
|
576
|
+
schema_obj = getattr(fmt, "json_schema", None) or getattr(
|
|
577
|
+
fmt, "schema", None
|
|
578
|
+
)
|
|
579
|
+
try:
|
|
580
|
+
schema_data = (
|
|
581
|
+
schema_obj.model_dump()
|
|
582
|
+
if schema_obj and hasattr(schema_obj, "model_dump")
|
|
583
|
+
else schema_obj
|
|
584
|
+
)
|
|
585
|
+
inject_schema = json.dumps(schema_data, separators=(",", ":"))
|
|
586
|
+
except Exception:
|
|
587
|
+
inject_schema = str(schema_obj)
|
|
588
|
+
inject = (
|
|
589
|
+
"Respond ONLY with JSON strictly conforming to this JSON Schema:\n"
|
|
590
|
+
f"{inject_schema}"
|
|
591
|
+
)
|
|
592
|
+
|
|
593
|
+
if inject:
|
|
594
|
+
existing_system = payload_data.get("system")
|
|
595
|
+
payload_data["system"] = (
|
|
596
|
+
f"{existing_system}\n\n{inject}" if existing_system else inject
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
text_instructions: str | None = None
|
|
600
|
+
if isinstance(text_cfg, dict):
|
|
601
|
+
text_instructions = text_cfg.get("instructions")
|
|
602
|
+
elif text_cfg and hasattr(text_cfg, "instructions"):
|
|
603
|
+
text_instructions = text_cfg.instructions
|
|
604
|
+
|
|
605
|
+
if isinstance(text_instructions, str) and text_instructions:
|
|
606
|
+
existing_system = payload_data.get("system")
|
|
607
|
+
payload_data["system"] = (
|
|
608
|
+
f"{existing_system}\n\n{text_instructions}"
|
|
609
|
+
if existing_system
|
|
610
|
+
else text_instructions
|
|
611
|
+
)
|
|
612
|
+
|
|
613
|
+
if isinstance(request.instructions, str) and request.instructions:
|
|
614
|
+
existing_system = payload_data.get("system")
|
|
615
|
+
payload_data["system"] = (
|
|
616
|
+
f"{existing_system}\n\n{request.instructions}"
|
|
617
|
+
if existing_system
|
|
618
|
+
else request.instructions
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
# Skip thinking config for ResponseRequest as it doesn't have the required fields
|
|
622
|
+
thinking_cfg = None
|
|
623
|
+
if thinking_cfg is not None:
|
|
624
|
+
payload_data["thinking"] = thinking_cfg
|
|
625
|
+
budget = thinking_cfg.get("budget_tokens", 0)
|
|
626
|
+
if isinstance(budget, int) and payload_data.get("max_tokens", 0) <= budget:
|
|
627
|
+
payload_data["max_tokens"] = budget + 64
|
|
628
|
+
payload_data["temperature"] = 1.0
|
|
629
|
+
|
|
630
|
+
return anthropic_models.CreateMessageRequest.model_validate(payload_data)
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
def derive_thinking_config(
|
|
634
|
+
model: str, request: openai_models.ChatCompletionRequest
|
|
635
|
+
) -> dict[str, Any] | None:
|
|
636
|
+
"""Derive Anthropic thinking config from OpenAI fields and model name.
|
|
637
|
+
|
|
638
|
+
Rules:
|
|
639
|
+
- If model matches o1/o3 families, enable thinking by default with model-specific budget
|
|
640
|
+
- Map reasoning_effort: low=1000, medium=5000, high=10000
|
|
641
|
+
- o3*: 10000; o1-mini: 3000; other o1*: 5000
|
|
642
|
+
- If thinking is enabled, return {"type":"enabled","budget_tokens":N}
|
|
643
|
+
- Otherwise return None
|
|
644
|
+
"""
|
|
645
|
+
# Explicit reasoning_effort mapping
|
|
646
|
+
effort = getattr(request, "reasoning_effort", None)
|
|
647
|
+
effort = effort.strip().lower() if isinstance(effort, str) else ""
|
|
648
|
+
effort_budgets = {"low": 1000, "medium": 5000, "high": 10000}
|
|
649
|
+
|
|
650
|
+
budget: int | None = None
|
|
651
|
+
if effort in effort_budgets:
|
|
652
|
+
budget = effort_budgets[effort]
|
|
653
|
+
|
|
654
|
+
m = model.lower()
|
|
655
|
+
# Model defaults if budget not set by effort
|
|
656
|
+
if budget is None:
|
|
657
|
+
if m.startswith("o3"):
|
|
658
|
+
budget = 10000
|
|
659
|
+
elif m.startswith("o1-mini"):
|
|
660
|
+
budget = 3000
|
|
661
|
+
elif m.startswith("o1"):
|
|
662
|
+
budget = 5000
|
|
663
|
+
|
|
664
|
+
if budget is None:
|
|
665
|
+
return None
|
|
666
|
+
|
|
667
|
+
return {"type": "enabled", "budget_tokens": budget}
|
|
668
|
+
|
|
669
|
+
|
|
670
|
+
__all__ = [
|
|
671
|
+
"convert__openai_chat_to_anthropic_message__request",
|
|
672
|
+
"convert__openai_responses_to_anthropic_message__request",
|
|
673
|
+
"_sanitize_tool_results", # Exposed for testing
|
|
674
|
+
]
|