ccproxy-api 0.1.6__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccproxy/api/__init__.py +1 -15
- ccproxy/api/app.py +439 -212
- ccproxy/api/bootstrap.py +30 -0
- ccproxy/api/decorators.py +85 -0
- ccproxy/api/dependencies.py +145 -176
- ccproxy/api/format_validation.py +54 -0
- ccproxy/api/middleware/cors.py +6 -3
- ccproxy/api/middleware/errors.py +402 -530
- ccproxy/api/middleware/hooks.py +563 -0
- ccproxy/api/middleware/normalize_headers.py +59 -0
- ccproxy/api/middleware/request_id.py +35 -16
- ccproxy/api/middleware/streaming_hooks.py +292 -0
- ccproxy/api/routes/__init__.py +5 -14
- ccproxy/api/routes/health.py +39 -672
- ccproxy/api/routes/plugins.py +277 -0
- ccproxy/auth/__init__.py +2 -19
- ccproxy/auth/bearer.py +25 -15
- ccproxy/auth/dependencies.py +123 -157
- ccproxy/auth/exceptions.py +0 -12
- ccproxy/auth/manager.py +35 -49
- ccproxy/auth/managers/__init__.py +10 -0
- ccproxy/auth/managers/base.py +523 -0
- ccproxy/auth/managers/base_enhanced.py +63 -0
- ccproxy/auth/managers/token_snapshot.py +77 -0
- ccproxy/auth/models/base.py +65 -0
- ccproxy/auth/models/credentials.py +40 -0
- ccproxy/auth/oauth/__init__.py +4 -18
- ccproxy/auth/oauth/base.py +533 -0
- ccproxy/auth/oauth/cli_errors.py +37 -0
- ccproxy/auth/oauth/flows.py +430 -0
- ccproxy/auth/oauth/protocol.py +366 -0
- ccproxy/auth/oauth/registry.py +408 -0
- ccproxy/auth/oauth/router.py +396 -0
- ccproxy/auth/oauth/routes.py +186 -113
- ccproxy/auth/oauth/session.py +151 -0
- ccproxy/auth/oauth/templates.py +342 -0
- ccproxy/auth/storage/__init__.py +2 -5
- ccproxy/auth/storage/base.py +279 -5
- ccproxy/auth/storage/generic.py +134 -0
- ccproxy/cli/__init__.py +1 -2
- ccproxy/cli/_settings_help.py +351 -0
- ccproxy/cli/commands/auth.py +1519 -793
- ccproxy/cli/commands/config/commands.py +209 -276
- ccproxy/cli/commands/plugins.py +669 -0
- ccproxy/cli/commands/serve.py +75 -810
- ccproxy/cli/commands/status.py +254 -0
- ccproxy/cli/decorators.py +83 -0
- ccproxy/cli/helpers.py +22 -60
- ccproxy/cli/main.py +359 -10
- ccproxy/cli/options/claude_options.py +0 -25
- ccproxy/config/__init__.py +7 -11
- ccproxy/config/core.py +227 -0
- ccproxy/config/env_generator.py +232 -0
- ccproxy/config/runtime.py +67 -0
- ccproxy/config/security.py +36 -3
- ccproxy/config/settings.py +382 -441
- ccproxy/config/toml_generator.py +299 -0
- ccproxy/config/utils.py +452 -0
- ccproxy/core/__init__.py +7 -271
- ccproxy/{_version.py → core/_version.py} +16 -3
- ccproxy/core/async_task_manager.py +516 -0
- ccproxy/core/async_utils.py +47 -14
- ccproxy/core/auth/__init__.py +6 -0
- ccproxy/core/constants.py +16 -50
- ccproxy/core/errors.py +53 -0
- ccproxy/core/id_utils.py +20 -0
- ccproxy/core/interfaces.py +16 -123
- ccproxy/core/logging.py +473 -18
- ccproxy/core/plugins/__init__.py +77 -0
- ccproxy/core/plugins/cli_discovery.py +211 -0
- ccproxy/core/plugins/declaration.py +455 -0
- ccproxy/core/plugins/discovery.py +604 -0
- ccproxy/core/plugins/factories.py +967 -0
- ccproxy/core/plugins/hooks/__init__.py +30 -0
- ccproxy/core/plugins/hooks/base.py +58 -0
- ccproxy/core/plugins/hooks/events.py +46 -0
- ccproxy/core/plugins/hooks/implementations/__init__.py +16 -0
- ccproxy/core/plugins/hooks/implementations/formatters/__init__.py +11 -0
- ccproxy/core/plugins/hooks/implementations/formatters/json.py +552 -0
- ccproxy/core/plugins/hooks/implementations/formatters/raw.py +370 -0
- ccproxy/core/plugins/hooks/implementations/http_tracer.py +431 -0
- ccproxy/core/plugins/hooks/layers.py +44 -0
- ccproxy/core/plugins/hooks/manager.py +186 -0
- ccproxy/core/plugins/hooks/registry.py +139 -0
- ccproxy/core/plugins/hooks/thread_manager.py +203 -0
- ccproxy/core/plugins/hooks/types.py +22 -0
- ccproxy/core/plugins/interfaces.py +416 -0
- ccproxy/core/plugins/loader.py +166 -0
- ccproxy/core/plugins/middleware.py +233 -0
- ccproxy/core/plugins/models.py +59 -0
- ccproxy/core/plugins/protocol.py +180 -0
- ccproxy/core/plugins/runtime.py +519 -0
- ccproxy/{observability/context.py → core/request_context.py} +137 -94
- ccproxy/core/status_report.py +211 -0
- ccproxy/core/transformers.py +13 -8
- ccproxy/data/claude_headers_fallback.json +558 -0
- ccproxy/data/codex_headers_fallback.json +121 -0
- ccproxy/http/__init__.py +30 -0
- ccproxy/http/base.py +95 -0
- ccproxy/http/client.py +323 -0
- ccproxy/http/hooks.py +642 -0
- ccproxy/http/pool.py +279 -0
- ccproxy/llms/formatters/__init__.py +7 -0
- ccproxy/llms/formatters/anthropic_to_openai/__init__.py +55 -0
- ccproxy/llms/formatters/anthropic_to_openai/errors.py +65 -0
- ccproxy/llms/formatters/anthropic_to_openai/requests.py +356 -0
- ccproxy/llms/formatters/anthropic_to_openai/responses.py +153 -0
- ccproxy/llms/formatters/anthropic_to_openai/streams.py +1546 -0
- ccproxy/llms/formatters/base.py +140 -0
- ccproxy/llms/formatters/base_model.py +33 -0
- ccproxy/llms/formatters/common/__init__.py +51 -0
- ccproxy/llms/formatters/common/identifiers.py +48 -0
- ccproxy/llms/formatters/common/streams.py +254 -0
- ccproxy/llms/formatters/common/thinking.py +74 -0
- ccproxy/llms/formatters/common/usage.py +135 -0
- ccproxy/llms/formatters/constants.py +55 -0
- ccproxy/llms/formatters/context.py +116 -0
- ccproxy/llms/formatters/mapping.py +33 -0
- ccproxy/llms/formatters/openai_to_anthropic/__init__.py +55 -0
- ccproxy/llms/formatters/openai_to_anthropic/_helpers.py +141 -0
- ccproxy/llms/formatters/openai_to_anthropic/errors.py +53 -0
- ccproxy/llms/formatters/openai_to_anthropic/requests.py +674 -0
- ccproxy/llms/formatters/openai_to_anthropic/responses.py +285 -0
- ccproxy/llms/formatters/openai_to_anthropic/streams.py +530 -0
- ccproxy/llms/formatters/openai_to_openai/__init__.py +53 -0
- ccproxy/llms/formatters/openai_to_openai/_helpers.py +325 -0
- ccproxy/llms/formatters/openai_to_openai/errors.py +6 -0
- ccproxy/llms/formatters/openai_to_openai/requests.py +388 -0
- ccproxy/llms/formatters/openai_to_openai/responses.py +594 -0
- ccproxy/llms/formatters/openai_to_openai/streams.py +1832 -0
- ccproxy/llms/formatters/utils.py +306 -0
- ccproxy/llms/models/__init__.py +9 -0
- ccproxy/llms/models/anthropic.py +619 -0
- ccproxy/llms/models/openai.py +844 -0
- ccproxy/llms/streaming/__init__.py +26 -0
- ccproxy/llms/streaming/accumulators.py +1074 -0
- ccproxy/llms/streaming/formatters.py +251 -0
- ccproxy/{adapters/openai/streaming.py → llms/streaming/processors.py} +193 -240
- ccproxy/models/__init__.py +8 -159
- ccproxy/models/detection.py +92 -193
- ccproxy/models/provider.py +75 -0
- ccproxy/plugins/access_log/README.md +32 -0
- ccproxy/plugins/access_log/__init__.py +20 -0
- ccproxy/plugins/access_log/config.py +33 -0
- ccproxy/plugins/access_log/formatter.py +126 -0
- ccproxy/plugins/access_log/hook.py +763 -0
- ccproxy/plugins/access_log/logger.py +254 -0
- ccproxy/plugins/access_log/plugin.py +137 -0
- ccproxy/plugins/access_log/writer.py +109 -0
- ccproxy/plugins/analytics/README.md +24 -0
- ccproxy/plugins/analytics/__init__.py +1 -0
- ccproxy/plugins/analytics/config.py +5 -0
- ccproxy/plugins/analytics/ingest.py +85 -0
- ccproxy/plugins/analytics/models.py +97 -0
- ccproxy/plugins/analytics/plugin.py +121 -0
- ccproxy/plugins/analytics/routes.py +163 -0
- ccproxy/plugins/analytics/service.py +284 -0
- ccproxy/plugins/claude_api/README.md +29 -0
- ccproxy/plugins/claude_api/__init__.py +10 -0
- ccproxy/plugins/claude_api/adapter.py +829 -0
- ccproxy/plugins/claude_api/config.py +52 -0
- ccproxy/plugins/claude_api/detection_service.py +461 -0
- ccproxy/plugins/claude_api/health.py +175 -0
- ccproxy/plugins/claude_api/hooks.py +284 -0
- ccproxy/plugins/claude_api/models.py +256 -0
- ccproxy/plugins/claude_api/plugin.py +298 -0
- ccproxy/plugins/claude_api/routes.py +118 -0
- ccproxy/plugins/claude_api/streaming_metrics.py +68 -0
- ccproxy/plugins/claude_api/tasks.py +84 -0
- ccproxy/plugins/claude_sdk/README.md +35 -0
- ccproxy/plugins/claude_sdk/__init__.py +80 -0
- ccproxy/plugins/claude_sdk/adapter.py +749 -0
- ccproxy/plugins/claude_sdk/auth.py +57 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/client.py +63 -39
- ccproxy/plugins/claude_sdk/config.py +210 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/converter.py +6 -6
- ccproxy/plugins/claude_sdk/detection_service.py +163 -0
- ccproxy/{services/claude_sdk_service.py → plugins/claude_sdk/handler.py} +123 -304
- ccproxy/plugins/claude_sdk/health.py +113 -0
- ccproxy/plugins/claude_sdk/hooks.py +115 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/manager.py +42 -32
- ccproxy/{claude_sdk → plugins/claude_sdk}/message_queue.py +8 -8
- ccproxy/{models/claude_sdk.py → plugins/claude_sdk/models.py} +64 -16
- ccproxy/plugins/claude_sdk/options.py +154 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/parser.py +23 -5
- ccproxy/plugins/claude_sdk/plugin.py +269 -0
- ccproxy/plugins/claude_sdk/routes.py +104 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/session_client.py +124 -12
- ccproxy/plugins/claude_sdk/session_pool.py +700 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/stream_handle.py +48 -43
- ccproxy/{claude_sdk → plugins/claude_sdk}/stream_worker.py +22 -18
- ccproxy/{claude_sdk → plugins/claude_sdk}/streaming.py +50 -16
- ccproxy/plugins/claude_sdk/tasks.py +97 -0
- ccproxy/plugins/claude_shared/README.md +18 -0
- ccproxy/plugins/claude_shared/__init__.py +12 -0
- ccproxy/plugins/claude_shared/model_defaults.py +171 -0
- ccproxy/plugins/codex/README.md +35 -0
- ccproxy/plugins/codex/__init__.py +6 -0
- ccproxy/plugins/codex/adapter.py +635 -0
- ccproxy/{config/codex.py → plugins/codex/config.py} +78 -12
- ccproxy/plugins/codex/detection_service.py +544 -0
- ccproxy/plugins/codex/health.py +162 -0
- ccproxy/plugins/codex/hooks.py +263 -0
- ccproxy/plugins/codex/model_defaults.py +39 -0
- ccproxy/plugins/codex/models.py +263 -0
- ccproxy/plugins/codex/plugin.py +275 -0
- ccproxy/plugins/codex/routes.py +129 -0
- ccproxy/plugins/codex/streaming_metrics.py +324 -0
- ccproxy/plugins/codex/tasks.py +106 -0
- ccproxy/plugins/codex/utils/__init__.py +1 -0
- ccproxy/plugins/codex/utils/sse_parser.py +106 -0
- ccproxy/plugins/command_replay/README.md +34 -0
- ccproxy/plugins/command_replay/__init__.py +17 -0
- ccproxy/plugins/command_replay/config.py +133 -0
- ccproxy/plugins/command_replay/formatter.py +432 -0
- ccproxy/plugins/command_replay/hook.py +294 -0
- ccproxy/plugins/command_replay/plugin.py +161 -0
- ccproxy/plugins/copilot/README.md +39 -0
- ccproxy/plugins/copilot/__init__.py +11 -0
- ccproxy/plugins/copilot/adapter.py +465 -0
- ccproxy/plugins/copilot/config.py +155 -0
- ccproxy/plugins/copilot/data/copilot_fallback.json +41 -0
- ccproxy/plugins/copilot/detection_service.py +255 -0
- ccproxy/plugins/copilot/manager.py +275 -0
- ccproxy/plugins/copilot/model_defaults.py +284 -0
- ccproxy/plugins/copilot/models.py +148 -0
- ccproxy/plugins/copilot/oauth/__init__.py +16 -0
- ccproxy/plugins/copilot/oauth/client.py +494 -0
- ccproxy/plugins/copilot/oauth/models.py +385 -0
- ccproxy/plugins/copilot/oauth/provider.py +602 -0
- ccproxy/plugins/copilot/oauth/storage.py +170 -0
- ccproxy/plugins/copilot/plugin.py +360 -0
- ccproxy/plugins/copilot/routes.py +294 -0
- ccproxy/plugins/credential_balancer/README.md +124 -0
- ccproxy/plugins/credential_balancer/__init__.py +6 -0
- ccproxy/plugins/credential_balancer/config.py +270 -0
- ccproxy/plugins/credential_balancer/factory.py +415 -0
- ccproxy/plugins/credential_balancer/hook.py +51 -0
- ccproxy/plugins/credential_balancer/manager.py +587 -0
- ccproxy/plugins/credential_balancer/plugin.py +146 -0
- ccproxy/plugins/dashboard/README.md +25 -0
- ccproxy/plugins/dashboard/__init__.py +1 -0
- ccproxy/plugins/dashboard/config.py +8 -0
- ccproxy/plugins/dashboard/plugin.py +71 -0
- ccproxy/plugins/dashboard/routes.py +67 -0
- ccproxy/plugins/docker/README.md +32 -0
- ccproxy/{docker → plugins/docker}/__init__.py +3 -0
- ccproxy/{docker → plugins/docker}/adapter.py +108 -10
- ccproxy/plugins/docker/config.py +82 -0
- ccproxy/{docker → plugins/docker}/docker_path.py +4 -3
- ccproxy/{docker → plugins/docker}/middleware.py +2 -2
- ccproxy/plugins/docker/plugin.py +198 -0
- ccproxy/{docker → plugins/docker}/stream_process.py +3 -3
- ccproxy/plugins/duckdb_storage/README.md +26 -0
- ccproxy/plugins/duckdb_storage/__init__.py +1 -0
- ccproxy/plugins/duckdb_storage/config.py +22 -0
- ccproxy/plugins/duckdb_storage/plugin.py +128 -0
- ccproxy/plugins/duckdb_storage/routes.py +51 -0
- ccproxy/plugins/duckdb_storage/storage.py +633 -0
- ccproxy/plugins/max_tokens/README.md +38 -0
- ccproxy/plugins/max_tokens/__init__.py +12 -0
- ccproxy/plugins/max_tokens/adapter.py +235 -0
- ccproxy/plugins/max_tokens/config.py +86 -0
- ccproxy/plugins/max_tokens/models.py +53 -0
- ccproxy/plugins/max_tokens/plugin.py +200 -0
- ccproxy/plugins/max_tokens/service.py +271 -0
- ccproxy/plugins/max_tokens/token_limits.json +54 -0
- ccproxy/plugins/metrics/README.md +35 -0
- ccproxy/plugins/metrics/__init__.py +10 -0
- ccproxy/{observability/metrics.py → plugins/metrics/collector.py} +20 -153
- ccproxy/plugins/metrics/config.py +85 -0
- ccproxy/plugins/metrics/grafana/dashboards/ccproxy-dashboard.json +1720 -0
- ccproxy/plugins/metrics/hook.py +403 -0
- ccproxy/plugins/metrics/plugin.py +268 -0
- ccproxy/{observability → plugins/metrics}/pushgateway.py +57 -59
- ccproxy/plugins/metrics/routes.py +107 -0
- ccproxy/plugins/metrics/tasks.py +117 -0
- ccproxy/plugins/oauth_claude/README.md +35 -0
- ccproxy/plugins/oauth_claude/__init__.py +14 -0
- ccproxy/plugins/oauth_claude/client.py +270 -0
- ccproxy/plugins/oauth_claude/config.py +84 -0
- ccproxy/plugins/oauth_claude/manager.py +482 -0
- ccproxy/plugins/oauth_claude/models.py +266 -0
- ccproxy/plugins/oauth_claude/plugin.py +149 -0
- ccproxy/plugins/oauth_claude/provider.py +571 -0
- ccproxy/plugins/oauth_claude/storage.py +212 -0
- ccproxy/plugins/oauth_codex/README.md +38 -0
- ccproxy/plugins/oauth_codex/__init__.py +14 -0
- ccproxy/plugins/oauth_codex/client.py +224 -0
- ccproxy/plugins/oauth_codex/config.py +95 -0
- ccproxy/plugins/oauth_codex/manager.py +256 -0
- ccproxy/plugins/oauth_codex/models.py +239 -0
- ccproxy/plugins/oauth_codex/plugin.py +146 -0
- ccproxy/plugins/oauth_codex/provider.py +574 -0
- ccproxy/plugins/oauth_codex/storage.py +92 -0
- ccproxy/plugins/permissions/README.md +28 -0
- ccproxy/plugins/permissions/__init__.py +22 -0
- ccproxy/plugins/permissions/config.py +28 -0
- ccproxy/{cli/commands/permission_handler.py → plugins/permissions/handlers/cli.py} +49 -25
- ccproxy/plugins/permissions/handlers/protocol.py +33 -0
- ccproxy/plugins/permissions/handlers/terminal.py +675 -0
- ccproxy/{api/routes → plugins/permissions}/mcp.py +34 -7
- ccproxy/{models/permissions.py → plugins/permissions/models.py} +65 -1
- ccproxy/plugins/permissions/plugin.py +153 -0
- ccproxy/{api/routes/permissions.py → plugins/permissions/routes.py} +20 -16
- ccproxy/{api/services/permission_service.py → plugins/permissions/service.py} +65 -11
- ccproxy/{api → plugins/permissions}/ui/permission_handler_protocol.py +1 -1
- ccproxy/{api → plugins/permissions}/ui/terminal_permission_handler.py +66 -10
- ccproxy/plugins/pricing/README.md +34 -0
- ccproxy/plugins/pricing/__init__.py +6 -0
- ccproxy/{pricing → plugins/pricing}/cache.py +7 -6
- ccproxy/{config/pricing.py → plugins/pricing/config.py} +32 -6
- ccproxy/plugins/pricing/exceptions.py +35 -0
- ccproxy/plugins/pricing/loader.py +440 -0
- ccproxy/{pricing → plugins/pricing}/models.py +13 -23
- ccproxy/plugins/pricing/plugin.py +169 -0
- ccproxy/plugins/pricing/service.py +191 -0
- ccproxy/plugins/pricing/tasks.py +300 -0
- ccproxy/{pricing → plugins/pricing}/updater.py +86 -72
- ccproxy/plugins/pricing/utils.py +99 -0
- ccproxy/plugins/request_tracer/README.md +40 -0
- ccproxy/plugins/request_tracer/__init__.py +7 -0
- ccproxy/plugins/request_tracer/config.py +120 -0
- ccproxy/plugins/request_tracer/hook.py +415 -0
- ccproxy/plugins/request_tracer/plugin.py +255 -0
- ccproxy/scheduler/__init__.py +2 -14
- ccproxy/scheduler/core.py +26 -41
- ccproxy/scheduler/manager.py +63 -107
- ccproxy/scheduler/registry.py +6 -32
- ccproxy/scheduler/tasks.py +346 -314
- ccproxy/services/__init__.py +0 -1
- ccproxy/services/adapters/__init__.py +11 -0
- ccproxy/services/adapters/base.py +123 -0
- ccproxy/services/adapters/chain_composer.py +88 -0
- ccproxy/services/adapters/chain_validation.py +44 -0
- ccproxy/services/adapters/chat_accumulator.py +200 -0
- ccproxy/services/adapters/delta_utils.py +142 -0
- ccproxy/services/adapters/format_adapter.py +136 -0
- ccproxy/services/adapters/format_context.py +11 -0
- ccproxy/services/adapters/format_registry.py +158 -0
- ccproxy/services/adapters/http_adapter.py +1045 -0
- ccproxy/services/adapters/mock_adapter.py +118 -0
- ccproxy/services/adapters/protocols.py +35 -0
- ccproxy/services/adapters/simple_converters.py +571 -0
- ccproxy/services/auth_registry.py +180 -0
- ccproxy/services/cache/__init__.py +6 -0
- ccproxy/services/cache/response_cache.py +261 -0
- ccproxy/services/cli_detection.py +437 -0
- ccproxy/services/config/__init__.py +6 -0
- ccproxy/services/config/proxy_configuration.py +111 -0
- ccproxy/services/container.py +256 -0
- ccproxy/services/factories.py +380 -0
- ccproxy/services/handler_config.py +76 -0
- ccproxy/services/interfaces.py +298 -0
- ccproxy/services/mocking/__init__.py +6 -0
- ccproxy/services/mocking/mock_handler.py +291 -0
- ccproxy/services/tracing/__init__.py +7 -0
- ccproxy/services/tracing/interfaces.py +61 -0
- ccproxy/services/tracing/null_tracer.py +57 -0
- ccproxy/streaming/__init__.py +23 -0
- ccproxy/streaming/buffer.py +1056 -0
- ccproxy/streaming/deferred.py +897 -0
- ccproxy/streaming/handler.py +117 -0
- ccproxy/streaming/interfaces.py +77 -0
- ccproxy/streaming/simple_adapter.py +39 -0
- ccproxy/streaming/sse.py +109 -0
- ccproxy/streaming/sse_parser.py +127 -0
- ccproxy/templates/__init__.py +6 -0
- ccproxy/templates/plugin_scaffold.py +695 -0
- ccproxy/testing/endpoints/__init__.py +33 -0
- ccproxy/testing/endpoints/cli.py +215 -0
- ccproxy/testing/endpoints/config.py +874 -0
- ccproxy/testing/endpoints/console.py +57 -0
- ccproxy/testing/endpoints/models.py +100 -0
- ccproxy/testing/endpoints/runner.py +1903 -0
- ccproxy/testing/endpoints/tools.py +308 -0
- ccproxy/testing/mock_responses.py +70 -1
- ccproxy/testing/response_handlers.py +20 -0
- ccproxy/utils/__init__.py +0 -6
- ccproxy/utils/binary_resolver.py +476 -0
- ccproxy/utils/caching.py +327 -0
- ccproxy/utils/cli_logging.py +101 -0
- ccproxy/utils/command_line.py +251 -0
- ccproxy/utils/headers.py +228 -0
- ccproxy/utils/model_mapper.py +120 -0
- ccproxy/utils/startup_helpers.py +95 -342
- ccproxy/utils/version_checker.py +279 -6
- ccproxy_api-0.2.0.dist-info/METADATA +212 -0
- ccproxy_api-0.2.0.dist-info/RECORD +417 -0
- {ccproxy_api-0.1.6.dist-info → ccproxy_api-0.2.0.dist-info}/WHEEL +1 -1
- ccproxy_api-0.2.0.dist-info/entry_points.txt +24 -0
- ccproxy/__init__.py +0 -4
- ccproxy/adapters/__init__.py +0 -11
- ccproxy/adapters/base.py +0 -80
- ccproxy/adapters/codex/__init__.py +0 -11
- ccproxy/adapters/openai/__init__.py +0 -42
- ccproxy/adapters/openai/adapter.py +0 -953
- ccproxy/adapters/openai/models.py +0 -412
- ccproxy/adapters/openai/response_adapter.py +0 -355
- ccproxy/adapters/openai/response_models.py +0 -178
- ccproxy/api/middleware/headers.py +0 -49
- ccproxy/api/middleware/logging.py +0 -180
- ccproxy/api/middleware/request_content_logging.py +0 -297
- ccproxy/api/middleware/server_header.py +0 -58
- ccproxy/api/responses.py +0 -89
- ccproxy/api/routes/claude.py +0 -371
- ccproxy/api/routes/codex.py +0 -1231
- ccproxy/api/routes/metrics.py +0 -1029
- ccproxy/api/routes/proxy.py +0 -211
- ccproxy/api/services/__init__.py +0 -6
- ccproxy/auth/conditional.py +0 -84
- ccproxy/auth/credentials_adapter.py +0 -93
- ccproxy/auth/models.py +0 -118
- ccproxy/auth/oauth/models.py +0 -48
- ccproxy/auth/openai/__init__.py +0 -13
- ccproxy/auth/openai/credentials.py +0 -166
- ccproxy/auth/openai/oauth_client.py +0 -334
- ccproxy/auth/openai/storage.py +0 -184
- ccproxy/auth/storage/json_file.py +0 -158
- ccproxy/auth/storage/keyring.py +0 -189
- ccproxy/claude_sdk/__init__.py +0 -18
- ccproxy/claude_sdk/options.py +0 -194
- ccproxy/claude_sdk/session_pool.py +0 -550
- ccproxy/cli/docker/__init__.py +0 -34
- ccproxy/cli/docker/adapter_factory.py +0 -157
- ccproxy/cli/docker/params.py +0 -274
- ccproxy/config/auth.py +0 -153
- ccproxy/config/claude.py +0 -348
- ccproxy/config/cors.py +0 -79
- ccproxy/config/discovery.py +0 -95
- ccproxy/config/docker_settings.py +0 -264
- ccproxy/config/observability.py +0 -158
- ccproxy/config/reverse_proxy.py +0 -31
- ccproxy/config/scheduler.py +0 -108
- ccproxy/config/server.py +0 -86
- ccproxy/config/validators.py +0 -231
- ccproxy/core/codex_transformers.py +0 -389
- ccproxy/core/http.py +0 -328
- ccproxy/core/http_transformers.py +0 -812
- ccproxy/core/proxy.py +0 -143
- ccproxy/core/validators.py +0 -288
- ccproxy/models/errors.py +0 -42
- ccproxy/models/messages.py +0 -269
- ccproxy/models/requests.py +0 -107
- ccproxy/models/responses.py +0 -270
- ccproxy/models/types.py +0 -102
- ccproxy/observability/__init__.py +0 -51
- ccproxy/observability/access_logger.py +0 -457
- ccproxy/observability/sse_events.py +0 -303
- ccproxy/observability/stats_printer.py +0 -753
- ccproxy/observability/storage/__init__.py +0 -1
- ccproxy/observability/storage/duckdb_simple.py +0 -677
- ccproxy/observability/storage/models.py +0 -70
- ccproxy/observability/streaming_response.py +0 -107
- ccproxy/pricing/__init__.py +0 -19
- ccproxy/pricing/loader.py +0 -251
- ccproxy/services/claude_detection_service.py +0 -269
- ccproxy/services/codex_detection_service.py +0 -263
- ccproxy/services/credentials/__init__.py +0 -55
- ccproxy/services/credentials/config.py +0 -105
- ccproxy/services/credentials/manager.py +0 -561
- ccproxy/services/credentials/oauth_client.py +0 -481
- ccproxy/services/proxy_service.py +0 -1827
- ccproxy/static/.keep +0 -0
- ccproxy/utils/cost_calculator.py +0 -210
- ccproxy/utils/disconnection_monitor.py +0 -83
- ccproxy/utils/model_mapping.py +0 -199
- ccproxy/utils/models_provider.py +0 -150
- ccproxy/utils/simple_request_logger.py +0 -284
- ccproxy/utils/streaming_metrics.py +0 -199
- ccproxy_api-0.1.6.dist-info/METADATA +0 -615
- ccproxy_api-0.1.6.dist-info/RECORD +0 -189
- ccproxy_api-0.1.6.dist-info/entry_points.txt +0 -4
- /ccproxy/{api/middleware/auth.py → auth/models/__init__.py} +0 -0
- /ccproxy/{claude_sdk → plugins/claude_sdk}/exceptions.py +0 -0
- /ccproxy/{docker → plugins/docker}/models.py +0 -0
- /ccproxy/{docker → plugins/docker}/protocol.py +0 -0
- /ccproxy/{docker → plugins/docker}/validators.py +0 -0
- /ccproxy/{auth/oauth/storage.py → plugins/permissions/handlers/__init__.py} +0 -0
- /ccproxy/{api → plugins/permissions}/ui/__init__.py +0 -0
- {ccproxy_api-0.1.6.dist-info → ccproxy_api-0.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,874 @@
|
|
|
1
|
+
"""Configuration and static data for endpoint test execution."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Any, Protocol
|
|
8
|
+
|
|
9
|
+
from ccproxy.llms.models.anthropic import MessageResponse, MessageStartEvent
|
|
10
|
+
from ccproxy.llms.models.openai import (
|
|
11
|
+
BaseStreamEvent,
|
|
12
|
+
ChatCompletionChunk,
|
|
13
|
+
ChatCompletionResponse,
|
|
14
|
+
ResponseObject,
|
|
15
|
+
)
|
|
16
|
+
from ccproxy.llms.streaming.accumulators import (
|
|
17
|
+
ClaudeAccumulator,
|
|
18
|
+
OpenAIAccumulator,
|
|
19
|
+
ResponsesAccumulator,
|
|
20
|
+
StreamAccumulator,
|
|
21
|
+
)
|
|
22
|
+
from ccproxy.plugins.claude_api import factory as claude_api_factory
|
|
23
|
+
from ccproxy.plugins.claude_sdk.plugin import factory as claude_sdk_factory
|
|
24
|
+
from ccproxy.plugins.codex import factory as codex_factory
|
|
25
|
+
from ccproxy.plugins.copilot import factory as copilot_factory
|
|
26
|
+
|
|
27
|
+
from .models import EndpointTest
|
|
28
|
+
from .tools import ANTHROPIC_TOOLS, CODEX_TOOLS, OPENAI_TOOLS
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# Centralized message payloads per provider
|
|
32
|
+
MESSAGE_PAYLOADS: dict[str, Any] = {
|
|
33
|
+
"openai": [{"role": "user", "content": "Hello"}],
|
|
34
|
+
"anthropic": [{"role": "user", "content": "Hello"}],
|
|
35
|
+
"response_api": [
|
|
36
|
+
{
|
|
37
|
+
"type": "message",
|
|
38
|
+
"role": "user",
|
|
39
|
+
"content": [{"type": "input_text", "text": "Hello"}],
|
|
40
|
+
}
|
|
41
|
+
],
|
|
42
|
+
"response_api_structured": [
|
|
43
|
+
{
|
|
44
|
+
"type": "message",
|
|
45
|
+
"role": "user",
|
|
46
|
+
"content": [
|
|
47
|
+
{"type": "input_text", "text": "What is 2+2? Answer in one word."}
|
|
48
|
+
],
|
|
49
|
+
}
|
|
50
|
+
],
|
|
51
|
+
# Tool testing payloads
|
|
52
|
+
"openai_tools": [
|
|
53
|
+
{
|
|
54
|
+
"role": "user",
|
|
55
|
+
"content": "What's the weather like in New York, and how far is it from Los Angeles?",
|
|
56
|
+
}
|
|
57
|
+
],
|
|
58
|
+
"anthropic_tools": [
|
|
59
|
+
{
|
|
60
|
+
"role": "user",
|
|
61
|
+
"content": "What's the weather like in New York, and how far is it from Los Angeles?",
|
|
62
|
+
}
|
|
63
|
+
],
|
|
64
|
+
"responses_tools": [
|
|
65
|
+
{
|
|
66
|
+
"role": "user",
|
|
67
|
+
"content": "What's the weather like in New York, and how far is it from Los Angeles?",
|
|
68
|
+
}
|
|
69
|
+
],
|
|
70
|
+
# Thinking mode payloads
|
|
71
|
+
"openai_thinking": [
|
|
72
|
+
{
|
|
73
|
+
"role": "user",
|
|
74
|
+
"content": "I need to calculate the factorial of 5. Can you help me think through this step by step?",
|
|
75
|
+
}
|
|
76
|
+
],
|
|
77
|
+
"responses_thinking": [
|
|
78
|
+
{
|
|
79
|
+
"role": "user",
|
|
80
|
+
"content": "I need to calculate the factorial of 5. Can you help me think through this step by step?",
|
|
81
|
+
}
|
|
82
|
+
],
|
|
83
|
+
# Using messages format with tools
|
|
84
|
+
# Structured output payloads
|
|
85
|
+
"openai_structured": [
|
|
86
|
+
{"role": "user", "content": "What is 2+2? Answer in one word."}
|
|
87
|
+
],
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
OPENAI_STRUCTURED_RESPONSE_FORMAT: dict[str, Any] = {
|
|
92
|
+
"type": "json_schema",
|
|
93
|
+
"json_schema": {
|
|
94
|
+
"name": "structured_math_response",
|
|
95
|
+
"schema": {
|
|
96
|
+
"type": "object",
|
|
97
|
+
"properties": {
|
|
98
|
+
"answer": {
|
|
99
|
+
"type": "string",
|
|
100
|
+
"description": "Single-word response to the math question.",
|
|
101
|
+
},
|
|
102
|
+
"answer_type": {
|
|
103
|
+
"type": "string",
|
|
104
|
+
"description": "Classification of the answer, e.g. number or word.",
|
|
105
|
+
},
|
|
106
|
+
},
|
|
107
|
+
"required": ["answer", "answer_type"],
|
|
108
|
+
"additionalProperties": False,
|
|
109
|
+
},
|
|
110
|
+
},
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
RESPONSES_STRUCTURED_TEXT_FORMAT: dict[str, Any] = {
|
|
115
|
+
"format": {
|
|
116
|
+
"type": "json_schema",
|
|
117
|
+
"name": OPENAI_STRUCTURED_RESPONSE_FORMAT["json_schema"].get(
|
|
118
|
+
"name", "structured_response"
|
|
119
|
+
),
|
|
120
|
+
"schema": OPENAI_STRUCTURED_RESPONSE_FORMAT["json_schema"].get("schema", {}),
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
# Request payload templates with model_class for validation
|
|
126
|
+
REQUEST_DATA: dict[str, dict[str, Any]] = {
|
|
127
|
+
"openai_stream": {
|
|
128
|
+
"model": "{model}",
|
|
129
|
+
"messages": MESSAGE_PAYLOADS["openai"],
|
|
130
|
+
"max_tokens": 100,
|
|
131
|
+
"stream": True,
|
|
132
|
+
"model_class": ChatCompletionResponse,
|
|
133
|
+
"chunk_model_class": ChatCompletionChunk,
|
|
134
|
+
"api_format": "openai",
|
|
135
|
+
},
|
|
136
|
+
"openai_non_stream": {
|
|
137
|
+
"model": "{model}",
|
|
138
|
+
"messages": MESSAGE_PAYLOADS["openai"],
|
|
139
|
+
"max_tokens": 100,
|
|
140
|
+
"stream": False,
|
|
141
|
+
"model_class": ChatCompletionResponse,
|
|
142
|
+
"api_format": "openai",
|
|
143
|
+
},
|
|
144
|
+
"response_api_stream": {
|
|
145
|
+
"model": "{model}",
|
|
146
|
+
"stream": True,
|
|
147
|
+
"max_completion_tokens": 1000,
|
|
148
|
+
"input": MESSAGE_PAYLOADS["response_api"],
|
|
149
|
+
"model_class": ResponseObject,
|
|
150
|
+
"chunk_model_class": BaseStreamEvent,
|
|
151
|
+
"api_format": "responses",
|
|
152
|
+
},
|
|
153
|
+
"response_api_non_stream": {
|
|
154
|
+
"model": "{model}",
|
|
155
|
+
"stream": False,
|
|
156
|
+
"max_completion_tokens": 1000,
|
|
157
|
+
"input": MESSAGE_PAYLOADS["response_api"],
|
|
158
|
+
"model_class": ResponseObject,
|
|
159
|
+
"api_format": "responses",
|
|
160
|
+
},
|
|
161
|
+
"responses_structured_stream": {
|
|
162
|
+
"model": "{model}",
|
|
163
|
+
"max_completion_tokens": 1000,
|
|
164
|
+
"stream": True,
|
|
165
|
+
"input": MESSAGE_PAYLOADS["response_api_structured"],
|
|
166
|
+
"text": RESPONSES_STRUCTURED_TEXT_FORMAT,
|
|
167
|
+
"model_class": ResponseObject,
|
|
168
|
+
"chunk_model_class": BaseStreamEvent,
|
|
169
|
+
"accumulator_class": ResponsesAccumulator,
|
|
170
|
+
"api_format": "responses",
|
|
171
|
+
},
|
|
172
|
+
"responses_structured_non_stream": {
|
|
173
|
+
"model": "{model}",
|
|
174
|
+
"max_completion_tokens": 1000,
|
|
175
|
+
"stream": False,
|
|
176
|
+
"input": MESSAGE_PAYLOADS["response_api_structured"],
|
|
177
|
+
"text": RESPONSES_STRUCTURED_TEXT_FORMAT,
|
|
178
|
+
"model_class": ResponseObject,
|
|
179
|
+
"accumulator_class": ResponsesAccumulator,
|
|
180
|
+
"api_format": "responses",
|
|
181
|
+
},
|
|
182
|
+
"anthropic_stream": {
|
|
183
|
+
"model": "{model}",
|
|
184
|
+
"max_tokens": 1000,
|
|
185
|
+
"stream": True,
|
|
186
|
+
"messages": MESSAGE_PAYLOADS["anthropic"],
|
|
187
|
+
"model_class": MessageResponse,
|
|
188
|
+
"chunk_model_class": MessageStartEvent,
|
|
189
|
+
"api_format": "anthropic",
|
|
190
|
+
},
|
|
191
|
+
"anthropic_non_stream": {
|
|
192
|
+
"model": "{model}",
|
|
193
|
+
"max_tokens": 1000,
|
|
194
|
+
"stream": False,
|
|
195
|
+
"messages": MESSAGE_PAYLOADS["anthropic"],
|
|
196
|
+
"model_class": MessageResponse,
|
|
197
|
+
"api_format": "anthropic",
|
|
198
|
+
},
|
|
199
|
+
# Tool-enhanced requests
|
|
200
|
+
"responses_tools_stream": {
|
|
201
|
+
"model": "{model}",
|
|
202
|
+
"max_completion_tokens": 1000,
|
|
203
|
+
"stream": True,
|
|
204
|
+
"tools": CODEX_TOOLS,
|
|
205
|
+
"input": MESSAGE_PAYLOADS["responses_tools"],
|
|
206
|
+
"model_class": ResponseObject,
|
|
207
|
+
"chunk_model_class": BaseStreamEvent,
|
|
208
|
+
"accumulator_class": ResponsesAccumulator,
|
|
209
|
+
"api_format": "responses",
|
|
210
|
+
},
|
|
211
|
+
"responses_tools_non_stream": {
|
|
212
|
+
"model": "{model}",
|
|
213
|
+
"max_completion_tokens": 1000,
|
|
214
|
+
"stream": False,
|
|
215
|
+
"tools": CODEX_TOOLS,
|
|
216
|
+
"input": MESSAGE_PAYLOADS["responses_tools"],
|
|
217
|
+
"model_class": ResponseObject,
|
|
218
|
+
"accumulator_class": ResponsesAccumulator,
|
|
219
|
+
"api_format": "responses",
|
|
220
|
+
},
|
|
221
|
+
"responses_thinking_stream": {
|
|
222
|
+
"model": "{model}",
|
|
223
|
+
"max_completion_tokens": 1000,
|
|
224
|
+
"stream": True,
|
|
225
|
+
"input": MESSAGE_PAYLOADS["responses_thinking"],
|
|
226
|
+
"reasoning": {"effort": "high", "summary": "auto"},
|
|
227
|
+
"model_class": ResponseObject,
|
|
228
|
+
"chunk_model_class": BaseStreamEvent,
|
|
229
|
+
"accumulator_class": ResponsesAccumulator,
|
|
230
|
+
"api_format": "responses",
|
|
231
|
+
},
|
|
232
|
+
"responses_thinking_non_stream": {
|
|
233
|
+
"model": "{model}",
|
|
234
|
+
"max_completion_tokens": 1000,
|
|
235
|
+
"stream": False,
|
|
236
|
+
"input": MESSAGE_PAYLOADS["responses_thinking"],
|
|
237
|
+
"reasoning": {"effort": "high", "summary": "auto"},
|
|
238
|
+
"model_class": ResponseObject,
|
|
239
|
+
"accumulator_class": ResponsesAccumulator,
|
|
240
|
+
"api_format": "responses",
|
|
241
|
+
},
|
|
242
|
+
"openai_tools_stream": {
|
|
243
|
+
"model": "{model}",
|
|
244
|
+
"messages": MESSAGE_PAYLOADS["openai_tools"],
|
|
245
|
+
"max_tokens": 1000,
|
|
246
|
+
"stream": True,
|
|
247
|
+
"tools": OPENAI_TOOLS,
|
|
248
|
+
"model_class": ChatCompletionResponse,
|
|
249
|
+
"chunk_model_class": ChatCompletionChunk,
|
|
250
|
+
"accumulator_class": OpenAIAccumulator,
|
|
251
|
+
"api_format": "openai",
|
|
252
|
+
},
|
|
253
|
+
"openai_tools_non_stream": {
|
|
254
|
+
"model": "{model}",
|
|
255
|
+
"messages": MESSAGE_PAYLOADS["openai_tools"],
|
|
256
|
+
"max_tokens": 1000,
|
|
257
|
+
"stream": False,
|
|
258
|
+
"tools": OPENAI_TOOLS,
|
|
259
|
+
"model_class": ChatCompletionResponse,
|
|
260
|
+
"api_format": "openai",
|
|
261
|
+
},
|
|
262
|
+
"anthropic_tools_stream": {
|
|
263
|
+
"model": "{model}",
|
|
264
|
+
"max_tokens": 1000,
|
|
265
|
+
"stream": True,
|
|
266
|
+
"messages": MESSAGE_PAYLOADS["anthropic_tools"],
|
|
267
|
+
"tools": ANTHROPIC_TOOLS,
|
|
268
|
+
"model_class": MessageResponse,
|
|
269
|
+
"chunk_model_class": MessageStartEvent,
|
|
270
|
+
"accumulator_class": ClaudeAccumulator,
|
|
271
|
+
"api_format": "anthropic",
|
|
272
|
+
},
|
|
273
|
+
"anthropic_tools_non_stream": {
|
|
274
|
+
"model": "{model}",
|
|
275
|
+
"max_tokens": 1000,
|
|
276
|
+
"stream": False,
|
|
277
|
+
"messages": MESSAGE_PAYLOADS["anthropic_tools"],
|
|
278
|
+
"tools": ANTHROPIC_TOOLS,
|
|
279
|
+
"model_class": MessageResponse,
|
|
280
|
+
"api_format": "anthropic",
|
|
281
|
+
},
|
|
282
|
+
"messages_tools_stream": {
|
|
283
|
+
"model": "{model}",
|
|
284
|
+
"max_tokens": 1000,
|
|
285
|
+
"stream": True,
|
|
286
|
+
"messages": MESSAGE_PAYLOADS["anthropic_tools"],
|
|
287
|
+
"tools": CODEX_TOOLS,
|
|
288
|
+
"model_class": MessageResponse,
|
|
289
|
+
"chunk_model_class": MessageStartEvent,
|
|
290
|
+
"accumulator_class": ClaudeAccumulator,
|
|
291
|
+
"api_format": "responses",
|
|
292
|
+
},
|
|
293
|
+
"messages_tools_non_stream": {
|
|
294
|
+
"model": "{model}",
|
|
295
|
+
"max_tokens": 1000,
|
|
296
|
+
"stream": False,
|
|
297
|
+
"messages": MESSAGE_PAYLOADS["anthropic_tools"],
|
|
298
|
+
"tools": CODEX_TOOLS,
|
|
299
|
+
"model_class": MessageResponse,
|
|
300
|
+
"api_format": "responses",
|
|
301
|
+
},
|
|
302
|
+
# Thinking mode requests (OpenAI only)
|
|
303
|
+
"openai_thinking_stream": {
|
|
304
|
+
"model": "o3-mini",
|
|
305
|
+
"messages": MESSAGE_PAYLOADS["openai_thinking"],
|
|
306
|
+
"stream": True,
|
|
307
|
+
"temperature": 1.0,
|
|
308
|
+
"model_class": ChatCompletionResponse,
|
|
309
|
+
"chunk_model_class": ChatCompletionChunk,
|
|
310
|
+
"api_format": "openai",
|
|
311
|
+
},
|
|
312
|
+
"openai_thinking_non_stream": {
|
|
313
|
+
"model": "o3-mini",
|
|
314
|
+
"messages": MESSAGE_PAYLOADS["openai_thinking"],
|
|
315
|
+
"stream": False,
|
|
316
|
+
"temperature": 1.0,
|
|
317
|
+
"model_class": ChatCompletionResponse,
|
|
318
|
+
"api_format": "openai",
|
|
319
|
+
},
|
|
320
|
+
# Structured output requests
|
|
321
|
+
"openai_structured_stream": {
|
|
322
|
+
"model": "{model}",
|
|
323
|
+
"messages": MESSAGE_PAYLOADS["openai_structured"],
|
|
324
|
+
"max_tokens": 100,
|
|
325
|
+
"stream": True,
|
|
326
|
+
"temperature": 0.7,
|
|
327
|
+
"response_format": OPENAI_STRUCTURED_RESPONSE_FORMAT,
|
|
328
|
+
"model_class": ChatCompletionResponse,
|
|
329
|
+
"chunk_model_class": ChatCompletionChunk,
|
|
330
|
+
"api_format": "openai",
|
|
331
|
+
},
|
|
332
|
+
"openai_structured_non_stream": {
|
|
333
|
+
"model": "{model}",
|
|
334
|
+
"messages": MESSAGE_PAYLOADS["openai_structured"],
|
|
335
|
+
"max_tokens": 100,
|
|
336
|
+
"stream": False,
|
|
337
|
+
"temperature": 0.7,
|
|
338
|
+
"response_format": OPENAI_STRUCTURED_RESPONSE_FORMAT,
|
|
339
|
+
"model_class": ChatCompletionResponse,
|
|
340
|
+
"api_format": "openai",
|
|
341
|
+
},
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
class APIFormatTools(Protocol):
|
|
346
|
+
"""Protocol for API format-specific tool result handling."""
|
|
347
|
+
|
|
348
|
+
def build_continuation_request(
|
|
349
|
+
self,
|
|
350
|
+
initial_request: dict[str, Any],
|
|
351
|
+
original_response: dict[str, Any],
|
|
352
|
+
tool_results: list[dict[str, Any]],
|
|
353
|
+
) -> dict[str, Any]:
|
|
354
|
+
"""Build a continuation request with tool results for this API format."""
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
class OpenAIFormatTools:
|
|
358
|
+
"""Handle tool result continuation for the OpenAI API format."""
|
|
359
|
+
|
|
360
|
+
def build_continuation_request(
|
|
361
|
+
self,
|
|
362
|
+
initial_request: dict[str, Any],
|
|
363
|
+
original_response: dict[str, Any],
|
|
364
|
+
tool_results: list[dict[str, Any]],
|
|
365
|
+
) -> dict[str, Any]:
|
|
366
|
+
continuation_request = initial_request.copy()
|
|
367
|
+
|
|
368
|
+
original_messages = initial_request.get("messages", [])
|
|
369
|
+
|
|
370
|
+
tool_calls = []
|
|
371
|
+
if "choices" in original_response:
|
|
372
|
+
for choice in original_response.get("choices", []):
|
|
373
|
+
message = choice.get("message", {})
|
|
374
|
+
if message.get("tool_calls"):
|
|
375
|
+
tool_calls.extend(message["tool_calls"])
|
|
376
|
+
|
|
377
|
+
if not tool_calls and original_response.get("tool_calls"):
|
|
378
|
+
tool_calls.extend(original_response["tool_calls"])
|
|
379
|
+
|
|
380
|
+
assistant_message = {
|
|
381
|
+
"role": "assistant",
|
|
382
|
+
"content": None,
|
|
383
|
+
"tool_calls": tool_calls,
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
tool_messages = []
|
|
387
|
+
for result in tool_results:
|
|
388
|
+
tool_call = result["tool_call"]
|
|
389
|
+
tool_result = result["result"]
|
|
390
|
+
tool_messages.append(
|
|
391
|
+
{
|
|
392
|
+
"role": "tool",
|
|
393
|
+
"tool_call_id": tool_call.get("id"),
|
|
394
|
+
"content": json.dumps(tool_result),
|
|
395
|
+
}
|
|
396
|
+
)
|
|
397
|
+
|
|
398
|
+
continuation_request["messages"] = (
|
|
399
|
+
original_messages + [assistant_message] + tool_messages
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
if "tools" in continuation_request:
|
|
403
|
+
del continuation_request["tools"]
|
|
404
|
+
|
|
405
|
+
return continuation_request
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
class AnthropicFormatTools:
|
|
409
|
+
"""Handle tool result continuation for the Anthropic API format."""
|
|
410
|
+
|
|
411
|
+
def build_continuation_request(
|
|
412
|
+
self,
|
|
413
|
+
initial_request: dict[str, Any],
|
|
414
|
+
original_response: dict[str, Any],
|
|
415
|
+
tool_results: list[dict[str, Any]],
|
|
416
|
+
) -> dict[str, Any]:
|
|
417
|
+
continuation_request = initial_request.copy()
|
|
418
|
+
|
|
419
|
+
original_messages = initial_request.get("messages", [])
|
|
420
|
+
|
|
421
|
+
assistant_content = []
|
|
422
|
+
for result in tool_results:
|
|
423
|
+
tool_call = result["tool_call"]
|
|
424
|
+
assistant_content.append(
|
|
425
|
+
{
|
|
426
|
+
"type": "tool_use",
|
|
427
|
+
"id": tool_call.get("id"),
|
|
428
|
+
"name": tool_call.get("name"),
|
|
429
|
+
"input": result["tool_input"],
|
|
430
|
+
}
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
assistant_message = {
|
|
434
|
+
"role": "assistant",
|
|
435
|
+
"content": assistant_content,
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
user_content = []
|
|
439
|
+
summary_parts: list[str] = []
|
|
440
|
+
for result in tool_results:
|
|
441
|
+
tool_call = result["tool_call"]
|
|
442
|
+
tool_result = result["result"]
|
|
443
|
+
user_content.append(
|
|
444
|
+
{
|
|
445
|
+
"type": "tool_result",
|
|
446
|
+
"tool_use_id": tool_call.get("id"),
|
|
447
|
+
"content": str(tool_result),
|
|
448
|
+
}
|
|
449
|
+
)
|
|
450
|
+
summary_parts.append(
|
|
451
|
+
f"Tool {tool_call.get('name', 'unknown')} returned: {json.dumps(tool_result)}"
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
if summary_parts:
|
|
455
|
+
user_content.append(
|
|
456
|
+
{
|
|
457
|
+
"type": "text",
|
|
458
|
+
"text": " ".join(summary_parts),
|
|
459
|
+
}
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
continuation_messages = original_messages + [assistant_message]
|
|
463
|
+
if user_content:
|
|
464
|
+
continuation_messages.append({"role": "user", "content": user_content})
|
|
465
|
+
|
|
466
|
+
continuation_request["messages"] = continuation_messages
|
|
467
|
+
|
|
468
|
+
if "tools" in continuation_request:
|
|
469
|
+
del continuation_request["tools"]
|
|
470
|
+
|
|
471
|
+
return continuation_request
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
class ResponsesFormatTools:
|
|
475
|
+
"""Handle tool result continuation for the Responses API format."""
|
|
476
|
+
|
|
477
|
+
def build_continuation_request(
|
|
478
|
+
self,
|
|
479
|
+
initial_request: dict[str, Any],
|
|
480
|
+
original_response: dict[str, Any],
|
|
481
|
+
tool_results: list[dict[str, Any]],
|
|
482
|
+
) -> dict[str, Any]:
|
|
483
|
+
continuation_request = initial_request.copy()
|
|
484
|
+
|
|
485
|
+
original_input = initial_request.get("input", [])
|
|
486
|
+
original_messages = initial_request.get("messages", [])
|
|
487
|
+
|
|
488
|
+
if not original_input and original_messages:
|
|
489
|
+
original_input = []
|
|
490
|
+
for message in original_messages:
|
|
491
|
+
content = message.get("content")
|
|
492
|
+
if isinstance(content, str):
|
|
493
|
+
original_input.append(
|
|
494
|
+
{
|
|
495
|
+
"type": "message",
|
|
496
|
+
"role": message["role"],
|
|
497
|
+
"content": [
|
|
498
|
+
{
|
|
499
|
+
"type": "input_text"
|
|
500
|
+
if message["role"] == "user"
|
|
501
|
+
else "output_text",
|
|
502
|
+
"text": content,
|
|
503
|
+
}
|
|
504
|
+
],
|
|
505
|
+
}
|
|
506
|
+
)
|
|
507
|
+
elif isinstance(content, list):
|
|
508
|
+
blocks = []
|
|
509
|
+
for block in content:
|
|
510
|
+
if block.get("type") == "tool_use":
|
|
511
|
+
tool_name = block.get("name", "unknown")
|
|
512
|
+
tool_input = block.get("input", {})
|
|
513
|
+
blocks.append(
|
|
514
|
+
{
|
|
515
|
+
"type": "output_text",
|
|
516
|
+
"text": f"Called {tool_name} with {tool_input}",
|
|
517
|
+
}
|
|
518
|
+
)
|
|
519
|
+
elif block.get("type") == "tool_result":
|
|
520
|
+
tool_id = block.get("tool_use_id", "")
|
|
521
|
+
result_content = block.get("content", "")
|
|
522
|
+
blocks.append(
|
|
523
|
+
{
|
|
524
|
+
"type": "input_text",
|
|
525
|
+
"text": f"Tool {tool_id} returned: {result_content}",
|
|
526
|
+
}
|
|
527
|
+
)
|
|
528
|
+
else:
|
|
529
|
+
blocks.append(
|
|
530
|
+
{
|
|
531
|
+
"type": "input_text"
|
|
532
|
+
if message["role"] == "user"
|
|
533
|
+
else "output_text",
|
|
534
|
+
"text": str(block),
|
|
535
|
+
}
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
if blocks:
|
|
539
|
+
original_input.append(
|
|
540
|
+
{
|
|
541
|
+
"type": "message",
|
|
542
|
+
"role": message["role"],
|
|
543
|
+
"content": blocks,
|
|
544
|
+
}
|
|
545
|
+
)
|
|
546
|
+
|
|
547
|
+
continuation_input = original_input.copy()
|
|
548
|
+
|
|
549
|
+
assistant_text_parts = []
|
|
550
|
+
for result in tool_results:
|
|
551
|
+
tool_name = result["tool_name"]
|
|
552
|
+
tool_input = result["tool_input"]
|
|
553
|
+
assistant_text_parts.append(
|
|
554
|
+
f"I called {tool_name} with arguments: {json.dumps(tool_input)}"
|
|
555
|
+
)
|
|
556
|
+
|
|
557
|
+
if assistant_text_parts:
|
|
558
|
+
continuation_input.append(
|
|
559
|
+
{
|
|
560
|
+
"type": "message",
|
|
561
|
+
"role": "assistant",
|
|
562
|
+
"content": [
|
|
563
|
+
{
|
|
564
|
+
"type": "output_text",
|
|
565
|
+
"text": " ".join(assistant_text_parts),
|
|
566
|
+
}
|
|
567
|
+
],
|
|
568
|
+
}
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
user_text_parts = []
|
|
572
|
+
for result in tool_results:
|
|
573
|
+
tool_name = result["tool_name"]
|
|
574
|
+
tool_result = result["result"]
|
|
575
|
+
user_text_parts.append(
|
|
576
|
+
f"The {tool_name} function returned: {json.dumps(tool_result)}"
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
continuation_input.append(
|
|
580
|
+
{
|
|
581
|
+
"type": "message",
|
|
582
|
+
"role": "user",
|
|
583
|
+
"content": [
|
|
584
|
+
{
|
|
585
|
+
"type": "input_text",
|
|
586
|
+
"text": " ".join(user_text_parts)
|
|
587
|
+
+ " Please provide a summary of this information.",
|
|
588
|
+
}
|
|
589
|
+
],
|
|
590
|
+
}
|
|
591
|
+
)
|
|
592
|
+
|
|
593
|
+
continuation_request["input"] = continuation_input
|
|
594
|
+
|
|
595
|
+
if "messages" in continuation_request:
|
|
596
|
+
del continuation_request["messages"]
|
|
597
|
+
|
|
598
|
+
if "tools" in continuation_request:
|
|
599
|
+
del continuation_request["tools"]
|
|
600
|
+
|
|
601
|
+
return continuation_request
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
FORMAT_TOOLS: dict[str, APIFormatTools] = {
|
|
605
|
+
"openai": OpenAIFormatTools(),
|
|
606
|
+
"anthropic": AnthropicFormatTools(),
|
|
607
|
+
"responses": ResponsesFormatTools(),
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
@dataclass(frozen=True)
|
|
612
|
+
class ProviderConfig:
|
|
613
|
+
"""Configuration for a provider's endpoints and capabilities."""
|
|
614
|
+
|
|
615
|
+
name: str
|
|
616
|
+
base_path: str
|
|
617
|
+
model: str
|
|
618
|
+
supported_formats: list[str]
|
|
619
|
+
description_prefix: str
|
|
620
|
+
|
|
621
|
+
|
|
622
|
+
@dataclass(frozen=True)
|
|
623
|
+
class FormatConfig:
|
|
624
|
+
"""Configuration mapping API format to request types and endpoint paths."""
|
|
625
|
+
|
|
626
|
+
name: str
|
|
627
|
+
endpoint_path: str
|
|
628
|
+
request_type_base: str
|
|
629
|
+
description: str
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
PROVIDER_CONFIGS: dict[str, ProviderConfig] = {
|
|
633
|
+
"copilot": ProviderConfig(
|
|
634
|
+
name="copilot",
|
|
635
|
+
base_path="/copilot/v1",
|
|
636
|
+
model="gpt-4o",
|
|
637
|
+
supported_formats=[
|
|
638
|
+
"chat_completions",
|
|
639
|
+
"responses",
|
|
640
|
+
"messages",
|
|
641
|
+
"chat_completions_tools",
|
|
642
|
+
"messages_tools",
|
|
643
|
+
"chat_completions_thinking",
|
|
644
|
+
"chat_completions_structured",
|
|
645
|
+
"responses_structured",
|
|
646
|
+
],
|
|
647
|
+
description_prefix="Copilot",
|
|
648
|
+
),
|
|
649
|
+
"claude": ProviderConfig(
|
|
650
|
+
name="claude",
|
|
651
|
+
base_path="/claude/v1",
|
|
652
|
+
model="claude-sonnet-4-20250514",
|
|
653
|
+
supported_formats=[
|
|
654
|
+
"chat_completions",
|
|
655
|
+
"responses",
|
|
656
|
+
"messages",
|
|
657
|
+
"chat_completions_tools",
|
|
658
|
+
"messages_tools",
|
|
659
|
+
"chat_completions_structured",
|
|
660
|
+
"responses_structured",
|
|
661
|
+
],
|
|
662
|
+
description_prefix="Claude API",
|
|
663
|
+
),
|
|
664
|
+
"claude_sdk": ProviderConfig(
|
|
665
|
+
name="claude_sdk",
|
|
666
|
+
base_path="/claude/sdk/v1",
|
|
667
|
+
model="claude-sonnet-4-20250514",
|
|
668
|
+
supported_formats=[
|
|
669
|
+
"chat_completions",
|
|
670
|
+
"responses",
|
|
671
|
+
"messages",
|
|
672
|
+
"chat_completions_structured",
|
|
673
|
+
],
|
|
674
|
+
description_prefix="Claude SDK",
|
|
675
|
+
),
|
|
676
|
+
"codex": ProviderConfig(
|
|
677
|
+
name="codex",
|
|
678
|
+
base_path="/codex/v1",
|
|
679
|
+
model="gpt-5",
|
|
680
|
+
supported_formats=[
|
|
681
|
+
"chat_completions",
|
|
682
|
+
"responses",
|
|
683
|
+
"messages",
|
|
684
|
+
"responses_tools",
|
|
685
|
+
"responses_thinking",
|
|
686
|
+
"responses_structured",
|
|
687
|
+
"chat_completions_tools",
|
|
688
|
+
"messages_tools",
|
|
689
|
+
"chat_completions_thinking",
|
|
690
|
+
"chat_completions_structured",
|
|
691
|
+
],
|
|
692
|
+
description_prefix="Codex",
|
|
693
|
+
),
|
|
694
|
+
}
|
|
695
|
+
|
|
696
|
+
|
|
697
|
+
PROVIDER_TOOL_ACCUMULATORS: dict[str, type[StreamAccumulator] | None] = {
|
|
698
|
+
"codex": codex_factory.tool_accumulator_class,
|
|
699
|
+
"claude": claude_api_factory.tool_accumulator_class,
|
|
700
|
+
"claude_sdk": claude_sdk_factory.tool_accumulator_class,
|
|
701
|
+
"copilot": copilot_factory.tool_accumulator_class,
|
|
702
|
+
}
|
|
703
|
+
|
|
704
|
+
|
|
705
|
+
FORMAT_CONFIGS: dict[str, FormatConfig] = {
|
|
706
|
+
"chat_completions": FormatConfig(
|
|
707
|
+
name="chat_completions",
|
|
708
|
+
endpoint_path="/chat/completions",
|
|
709
|
+
request_type_base="openai",
|
|
710
|
+
description="chat completions",
|
|
711
|
+
),
|
|
712
|
+
"responses": FormatConfig(
|
|
713
|
+
name="responses",
|
|
714
|
+
endpoint_path="/responses",
|
|
715
|
+
request_type_base="response_api",
|
|
716
|
+
description="responses",
|
|
717
|
+
),
|
|
718
|
+
"responses_tools": FormatConfig(
|
|
719
|
+
name="responses_tools",
|
|
720
|
+
endpoint_path="/responses",
|
|
721
|
+
request_type_base="responses_tools",
|
|
722
|
+
description="responses with tools",
|
|
723
|
+
),
|
|
724
|
+
"responses_thinking": FormatConfig(
|
|
725
|
+
name="responses_thinking",
|
|
726
|
+
endpoint_path="/responses",
|
|
727
|
+
request_type_base="responses_thinking",
|
|
728
|
+
description="responses with thinking",
|
|
729
|
+
),
|
|
730
|
+
"responses_structured": FormatConfig(
|
|
731
|
+
name="responses_structured",
|
|
732
|
+
endpoint_path="/responses",
|
|
733
|
+
request_type_base="responses_structured",
|
|
734
|
+
description="responses structured",
|
|
735
|
+
),
|
|
736
|
+
"messages": FormatConfig(
|
|
737
|
+
name="messages",
|
|
738
|
+
endpoint_path="/messages",
|
|
739
|
+
request_type_base="anthropic",
|
|
740
|
+
description="messages",
|
|
741
|
+
),
|
|
742
|
+
"chat_completions_tools": FormatConfig(
|
|
743
|
+
name="chat_completions_tools",
|
|
744
|
+
endpoint_path="/chat/completions",
|
|
745
|
+
request_type_base="openai_tools",
|
|
746
|
+
description="chat completions with tools",
|
|
747
|
+
),
|
|
748
|
+
"messages_tools": FormatConfig(
|
|
749
|
+
name="messages_tools",
|
|
750
|
+
endpoint_path="/messages",
|
|
751
|
+
request_type_base="anthropic_tools",
|
|
752
|
+
description="messages with tools",
|
|
753
|
+
),
|
|
754
|
+
"chat_completions_thinking": FormatConfig(
|
|
755
|
+
name="chat_completions_thinking",
|
|
756
|
+
endpoint_path="/chat/completions",
|
|
757
|
+
request_type_base="openai_thinking",
|
|
758
|
+
description="chat completions with thinking",
|
|
759
|
+
),
|
|
760
|
+
"chat_completions_structured": FormatConfig(
|
|
761
|
+
name="chat_completions_structured",
|
|
762
|
+
endpoint_path="/chat/completions",
|
|
763
|
+
request_type_base="openai_structured",
|
|
764
|
+
description="chat completions structured",
|
|
765
|
+
),
|
|
766
|
+
}
|
|
767
|
+
|
|
768
|
+
|
|
769
|
+
def generate_endpoint_tests() -> list[EndpointTest]:
|
|
770
|
+
"""Generate all endpoint test permutations from provider and format configurations."""
|
|
771
|
+
|
|
772
|
+
tests: list[EndpointTest] = []
|
|
773
|
+
|
|
774
|
+
for provider_key, provider in PROVIDER_CONFIGS.items():
|
|
775
|
+
for format_name in provider.supported_formats:
|
|
776
|
+
format_config = FORMAT_CONFIGS.get(format_name)
|
|
777
|
+
if not format_config:
|
|
778
|
+
continue
|
|
779
|
+
|
|
780
|
+
endpoint = provider.base_path + format_config.endpoint_path
|
|
781
|
+
|
|
782
|
+
for is_streaming in [True, False]:
|
|
783
|
+
stream_suffix = "_stream" if is_streaming else "_non_stream"
|
|
784
|
+
request_type = format_config.request_type_base + stream_suffix
|
|
785
|
+
|
|
786
|
+
if request_type not in REQUEST_DATA:
|
|
787
|
+
continue
|
|
788
|
+
|
|
789
|
+
stream_name_part = "_stream" if is_streaming else ""
|
|
790
|
+
test_name = f"{provider_key}_{format_config.name}{stream_name_part}"
|
|
791
|
+
|
|
792
|
+
stream_desc = "streaming" if is_streaming else "non-streaming"
|
|
793
|
+
description = f"{provider.description_prefix} {format_config.description} {stream_desc}"
|
|
794
|
+
|
|
795
|
+
tests.append(
|
|
796
|
+
EndpointTest(
|
|
797
|
+
name=test_name,
|
|
798
|
+
endpoint=endpoint,
|
|
799
|
+
stream=is_streaming,
|
|
800
|
+
request=request_type,
|
|
801
|
+
model=provider.model,
|
|
802
|
+
description=description,
|
|
803
|
+
)
|
|
804
|
+
)
|
|
805
|
+
|
|
806
|
+
return tests
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
ENDPOINT_TESTS: list[EndpointTest] = generate_endpoint_tests()
|
|
810
|
+
|
|
811
|
+
|
|
812
|
+
def add_provider(
|
|
813
|
+
name: str,
|
|
814
|
+
base_path: str,
|
|
815
|
+
model: str,
|
|
816
|
+
supported_formats: list[str],
|
|
817
|
+
description_prefix: str,
|
|
818
|
+
) -> None:
|
|
819
|
+
"""Add a new provider configuration and regenerate endpoint tests."""
|
|
820
|
+
|
|
821
|
+
global ENDPOINT_TESTS
|
|
822
|
+
|
|
823
|
+
PROVIDER_CONFIGS[name] = ProviderConfig(
|
|
824
|
+
name=name,
|
|
825
|
+
base_path=base_path,
|
|
826
|
+
model=model,
|
|
827
|
+
supported_formats=supported_formats,
|
|
828
|
+
description_prefix=description_prefix,
|
|
829
|
+
)
|
|
830
|
+
|
|
831
|
+
ENDPOINT_TESTS = generate_endpoint_tests()
|
|
832
|
+
|
|
833
|
+
|
|
834
|
+
def add_format(
|
|
835
|
+
name: str,
|
|
836
|
+
endpoint_path: str,
|
|
837
|
+
request_type_base: str,
|
|
838
|
+
description: str,
|
|
839
|
+
) -> None:
|
|
840
|
+
"""Add a new format configuration and regenerate endpoint tests."""
|
|
841
|
+
|
|
842
|
+
global ENDPOINT_TESTS
|
|
843
|
+
|
|
844
|
+
FORMAT_CONFIGS[name] = FormatConfig(
|
|
845
|
+
name=name,
|
|
846
|
+
endpoint_path=endpoint_path,
|
|
847
|
+
request_type_base=request_type_base,
|
|
848
|
+
description=description,
|
|
849
|
+
)
|
|
850
|
+
|
|
851
|
+
ENDPOINT_TESTS = generate_endpoint_tests()
|
|
852
|
+
|
|
853
|
+
|
|
854
|
+
def list_available_tests() -> str:
|
|
855
|
+
"""Generate a formatted list of available tests for help text."""
|
|
856
|
+
|
|
857
|
+
lines = ["Available tests:"]
|
|
858
|
+
for i, test in enumerate(ENDPOINT_TESTS, 1):
|
|
859
|
+
lines.append(f" {i:2d}. {test.name:<30} - {test.description}")
|
|
860
|
+
return "\n".join(lines)
|
|
861
|
+
|
|
862
|
+
|
|
863
|
+
__all__ = [
|
|
864
|
+
"MESSAGE_PAYLOADS",
|
|
865
|
+
"REQUEST_DATA",
|
|
866
|
+
"FORMAT_TOOLS",
|
|
867
|
+
"PROVIDER_TOOL_ACCUMULATORS",
|
|
868
|
+
"ENDPOINT_TESTS",
|
|
869
|
+
"list_available_tests",
|
|
870
|
+
"add_provider",
|
|
871
|
+
"add_format",
|
|
872
|
+
"OPENAI_STRUCTURED_RESPONSE_FORMAT",
|
|
873
|
+
"RESPONSES_STRUCTURED_TEXT_FORMAT",
|
|
874
|
+
]
|