ccproxy-api 0.1.7__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccproxy/api/__init__.py +1 -15
- ccproxy/api/app.py +434 -219
- ccproxy/api/bootstrap.py +30 -0
- ccproxy/api/decorators.py +85 -0
- ccproxy/api/dependencies.py +144 -168
- ccproxy/api/format_validation.py +54 -0
- ccproxy/api/middleware/cors.py +6 -3
- ccproxy/api/middleware/errors.py +388 -524
- ccproxy/api/middleware/hooks.py +563 -0
- ccproxy/api/middleware/normalize_headers.py +59 -0
- ccproxy/api/middleware/request_id.py +35 -16
- ccproxy/api/middleware/streaming_hooks.py +292 -0
- ccproxy/api/routes/__init__.py +5 -14
- ccproxy/api/routes/health.py +39 -672
- ccproxy/api/routes/plugins.py +277 -0
- ccproxy/auth/__init__.py +2 -19
- ccproxy/auth/bearer.py +25 -15
- ccproxy/auth/dependencies.py +123 -157
- ccproxy/auth/exceptions.py +0 -12
- ccproxy/auth/manager.py +35 -49
- ccproxy/auth/managers/__init__.py +10 -0
- ccproxy/auth/managers/base.py +523 -0
- ccproxy/auth/managers/base_enhanced.py +63 -0
- ccproxy/auth/managers/token_snapshot.py +77 -0
- ccproxy/auth/models/base.py +65 -0
- ccproxy/auth/models/credentials.py +40 -0
- ccproxy/auth/oauth/__init__.py +4 -18
- ccproxy/auth/oauth/base.py +533 -0
- ccproxy/auth/oauth/cli_errors.py +37 -0
- ccproxy/auth/oauth/flows.py +430 -0
- ccproxy/auth/oauth/protocol.py +366 -0
- ccproxy/auth/oauth/registry.py +408 -0
- ccproxy/auth/oauth/router.py +396 -0
- ccproxy/auth/oauth/routes.py +186 -113
- ccproxy/auth/oauth/session.py +151 -0
- ccproxy/auth/oauth/templates.py +342 -0
- ccproxy/auth/storage/__init__.py +2 -5
- ccproxy/auth/storage/base.py +279 -5
- ccproxy/auth/storage/generic.py +134 -0
- ccproxy/cli/__init__.py +1 -2
- ccproxy/cli/_settings_help.py +351 -0
- ccproxy/cli/commands/auth.py +1519 -793
- ccproxy/cli/commands/config/commands.py +209 -276
- ccproxy/cli/commands/plugins.py +669 -0
- ccproxy/cli/commands/serve.py +75 -810
- ccproxy/cli/commands/status.py +254 -0
- ccproxy/cli/decorators.py +83 -0
- ccproxy/cli/helpers.py +22 -60
- ccproxy/cli/main.py +359 -10
- ccproxy/cli/options/claude_options.py +0 -25
- ccproxy/config/__init__.py +7 -11
- ccproxy/config/core.py +227 -0
- ccproxy/config/env_generator.py +232 -0
- ccproxy/config/runtime.py +67 -0
- ccproxy/config/security.py +36 -3
- ccproxy/config/settings.py +382 -441
- ccproxy/config/toml_generator.py +299 -0
- ccproxy/config/utils.py +452 -0
- ccproxy/core/__init__.py +7 -271
- ccproxy/{_version.py → core/_version.py} +16 -3
- ccproxy/core/async_task_manager.py +516 -0
- ccproxy/core/async_utils.py +47 -14
- ccproxy/core/auth/__init__.py +6 -0
- ccproxy/core/constants.py +16 -50
- ccproxy/core/errors.py +53 -0
- ccproxy/core/id_utils.py +20 -0
- ccproxy/core/interfaces.py +16 -123
- ccproxy/core/logging.py +473 -18
- ccproxy/core/plugins/__init__.py +77 -0
- ccproxy/core/plugins/cli_discovery.py +211 -0
- ccproxy/core/plugins/declaration.py +455 -0
- ccproxy/core/plugins/discovery.py +604 -0
- ccproxy/core/plugins/factories.py +967 -0
- ccproxy/core/plugins/hooks/__init__.py +30 -0
- ccproxy/core/plugins/hooks/base.py +58 -0
- ccproxy/core/plugins/hooks/events.py +46 -0
- ccproxy/core/plugins/hooks/implementations/__init__.py +16 -0
- ccproxy/core/plugins/hooks/implementations/formatters/__init__.py +11 -0
- ccproxy/core/plugins/hooks/implementations/formatters/json.py +552 -0
- ccproxy/core/plugins/hooks/implementations/formatters/raw.py +370 -0
- ccproxy/core/plugins/hooks/implementations/http_tracer.py +431 -0
- ccproxy/core/plugins/hooks/layers.py +44 -0
- ccproxy/core/plugins/hooks/manager.py +186 -0
- ccproxy/core/plugins/hooks/registry.py +139 -0
- ccproxy/core/plugins/hooks/thread_manager.py +203 -0
- ccproxy/core/plugins/hooks/types.py +22 -0
- ccproxy/core/plugins/interfaces.py +416 -0
- ccproxy/core/plugins/loader.py +166 -0
- ccproxy/core/plugins/middleware.py +233 -0
- ccproxy/core/plugins/models.py +59 -0
- ccproxy/core/plugins/protocol.py +180 -0
- ccproxy/core/plugins/runtime.py +519 -0
- ccproxy/{observability/context.py → core/request_context.py} +137 -94
- ccproxy/core/status_report.py +211 -0
- ccproxy/core/transformers.py +13 -8
- ccproxy/data/claude_headers_fallback.json +540 -19
- ccproxy/data/codex_headers_fallback.json +114 -7
- ccproxy/http/__init__.py +30 -0
- ccproxy/http/base.py +95 -0
- ccproxy/http/client.py +323 -0
- ccproxy/http/hooks.py +642 -0
- ccproxy/http/pool.py +279 -0
- ccproxy/llms/formatters/__init__.py +7 -0
- ccproxy/llms/formatters/anthropic_to_openai/__init__.py +55 -0
- ccproxy/llms/formatters/anthropic_to_openai/errors.py +65 -0
- ccproxy/llms/formatters/anthropic_to_openai/requests.py +356 -0
- ccproxy/llms/formatters/anthropic_to_openai/responses.py +153 -0
- ccproxy/llms/formatters/anthropic_to_openai/streams.py +1546 -0
- ccproxy/llms/formatters/base.py +140 -0
- ccproxy/llms/formatters/base_model.py +33 -0
- ccproxy/llms/formatters/common/__init__.py +51 -0
- ccproxy/llms/formatters/common/identifiers.py +48 -0
- ccproxy/llms/formatters/common/streams.py +254 -0
- ccproxy/llms/formatters/common/thinking.py +74 -0
- ccproxy/llms/formatters/common/usage.py +135 -0
- ccproxy/llms/formatters/constants.py +55 -0
- ccproxy/llms/formatters/context.py +116 -0
- ccproxy/llms/formatters/mapping.py +33 -0
- ccproxy/llms/formatters/openai_to_anthropic/__init__.py +55 -0
- ccproxy/llms/formatters/openai_to_anthropic/_helpers.py +141 -0
- ccproxy/llms/formatters/openai_to_anthropic/errors.py +53 -0
- ccproxy/llms/formatters/openai_to_anthropic/requests.py +674 -0
- ccproxy/llms/formatters/openai_to_anthropic/responses.py +285 -0
- ccproxy/llms/formatters/openai_to_anthropic/streams.py +530 -0
- ccproxy/llms/formatters/openai_to_openai/__init__.py +53 -0
- ccproxy/llms/formatters/openai_to_openai/_helpers.py +325 -0
- ccproxy/llms/formatters/openai_to_openai/errors.py +6 -0
- ccproxy/llms/formatters/openai_to_openai/requests.py +388 -0
- ccproxy/llms/formatters/openai_to_openai/responses.py +594 -0
- ccproxy/llms/formatters/openai_to_openai/streams.py +1832 -0
- ccproxy/llms/formatters/utils.py +306 -0
- ccproxy/llms/models/__init__.py +9 -0
- ccproxy/llms/models/anthropic.py +619 -0
- ccproxy/llms/models/openai.py +844 -0
- ccproxy/llms/streaming/__init__.py +26 -0
- ccproxy/llms/streaming/accumulators.py +1074 -0
- ccproxy/llms/streaming/formatters.py +251 -0
- ccproxy/{adapters/openai/streaming.py → llms/streaming/processors.py} +193 -240
- ccproxy/models/__init__.py +8 -159
- ccproxy/models/detection.py +92 -193
- ccproxy/models/provider.py +75 -0
- ccproxy/plugins/access_log/README.md +32 -0
- ccproxy/plugins/access_log/__init__.py +20 -0
- ccproxy/plugins/access_log/config.py +33 -0
- ccproxy/plugins/access_log/formatter.py +126 -0
- ccproxy/plugins/access_log/hook.py +763 -0
- ccproxy/plugins/access_log/logger.py +254 -0
- ccproxy/plugins/access_log/plugin.py +137 -0
- ccproxy/plugins/access_log/writer.py +109 -0
- ccproxy/plugins/analytics/README.md +24 -0
- ccproxy/plugins/analytics/__init__.py +1 -0
- ccproxy/plugins/analytics/config.py +5 -0
- ccproxy/plugins/analytics/ingest.py +85 -0
- ccproxy/plugins/analytics/models.py +97 -0
- ccproxy/plugins/analytics/plugin.py +121 -0
- ccproxy/plugins/analytics/routes.py +163 -0
- ccproxy/plugins/analytics/service.py +284 -0
- ccproxy/plugins/claude_api/README.md +29 -0
- ccproxy/plugins/claude_api/__init__.py +10 -0
- ccproxy/plugins/claude_api/adapter.py +829 -0
- ccproxy/plugins/claude_api/config.py +52 -0
- ccproxy/plugins/claude_api/detection_service.py +461 -0
- ccproxy/plugins/claude_api/health.py +175 -0
- ccproxy/plugins/claude_api/hooks.py +284 -0
- ccproxy/plugins/claude_api/models.py +256 -0
- ccproxy/plugins/claude_api/plugin.py +298 -0
- ccproxy/plugins/claude_api/routes.py +118 -0
- ccproxy/plugins/claude_api/streaming_metrics.py +68 -0
- ccproxy/plugins/claude_api/tasks.py +84 -0
- ccproxy/plugins/claude_sdk/README.md +35 -0
- ccproxy/plugins/claude_sdk/__init__.py +80 -0
- ccproxy/plugins/claude_sdk/adapter.py +749 -0
- ccproxy/plugins/claude_sdk/auth.py +57 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/client.py +63 -39
- ccproxy/plugins/claude_sdk/config.py +210 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/converter.py +6 -6
- ccproxy/plugins/claude_sdk/detection_service.py +163 -0
- ccproxy/{services/claude_sdk_service.py → plugins/claude_sdk/handler.py} +123 -304
- ccproxy/plugins/claude_sdk/health.py +113 -0
- ccproxy/plugins/claude_sdk/hooks.py +115 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/manager.py +42 -32
- ccproxy/{claude_sdk → plugins/claude_sdk}/message_queue.py +8 -8
- ccproxy/{models/claude_sdk.py → plugins/claude_sdk/models.py} +64 -16
- ccproxy/plugins/claude_sdk/options.py +154 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/parser.py +23 -5
- ccproxy/plugins/claude_sdk/plugin.py +269 -0
- ccproxy/plugins/claude_sdk/routes.py +104 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/session_client.py +124 -12
- ccproxy/plugins/claude_sdk/session_pool.py +700 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/stream_handle.py +48 -43
- ccproxy/{claude_sdk → plugins/claude_sdk}/stream_worker.py +22 -18
- ccproxy/{claude_sdk → plugins/claude_sdk}/streaming.py +50 -16
- ccproxy/plugins/claude_sdk/tasks.py +97 -0
- ccproxy/plugins/claude_shared/README.md +18 -0
- ccproxy/plugins/claude_shared/__init__.py +12 -0
- ccproxy/plugins/claude_shared/model_defaults.py +171 -0
- ccproxy/plugins/codex/README.md +35 -0
- ccproxy/plugins/codex/__init__.py +6 -0
- ccproxy/plugins/codex/adapter.py +635 -0
- ccproxy/{config/codex.py → plugins/codex/config.py} +78 -12
- ccproxy/plugins/codex/detection_service.py +544 -0
- ccproxy/plugins/codex/health.py +162 -0
- ccproxy/plugins/codex/hooks.py +263 -0
- ccproxy/plugins/codex/model_defaults.py +39 -0
- ccproxy/plugins/codex/models.py +263 -0
- ccproxy/plugins/codex/plugin.py +275 -0
- ccproxy/plugins/codex/routes.py +129 -0
- ccproxy/plugins/codex/streaming_metrics.py +324 -0
- ccproxy/plugins/codex/tasks.py +106 -0
- ccproxy/plugins/codex/utils/__init__.py +1 -0
- ccproxy/plugins/codex/utils/sse_parser.py +106 -0
- ccproxy/plugins/command_replay/README.md +34 -0
- ccproxy/plugins/command_replay/__init__.py +17 -0
- ccproxy/plugins/command_replay/config.py +133 -0
- ccproxy/plugins/command_replay/formatter.py +432 -0
- ccproxy/plugins/command_replay/hook.py +294 -0
- ccproxy/plugins/command_replay/plugin.py +161 -0
- ccproxy/plugins/copilot/README.md +39 -0
- ccproxy/plugins/copilot/__init__.py +11 -0
- ccproxy/plugins/copilot/adapter.py +465 -0
- ccproxy/plugins/copilot/config.py +155 -0
- ccproxy/plugins/copilot/data/copilot_fallback.json +41 -0
- ccproxy/plugins/copilot/detection_service.py +255 -0
- ccproxy/plugins/copilot/manager.py +275 -0
- ccproxy/plugins/copilot/model_defaults.py +284 -0
- ccproxy/plugins/copilot/models.py +148 -0
- ccproxy/plugins/copilot/oauth/__init__.py +16 -0
- ccproxy/plugins/copilot/oauth/client.py +494 -0
- ccproxy/plugins/copilot/oauth/models.py +385 -0
- ccproxy/plugins/copilot/oauth/provider.py +602 -0
- ccproxy/plugins/copilot/oauth/storage.py +170 -0
- ccproxy/plugins/copilot/plugin.py +360 -0
- ccproxy/plugins/copilot/routes.py +294 -0
- ccproxy/plugins/credential_balancer/README.md +124 -0
- ccproxy/plugins/credential_balancer/__init__.py +6 -0
- ccproxy/plugins/credential_balancer/config.py +270 -0
- ccproxy/plugins/credential_balancer/factory.py +415 -0
- ccproxy/plugins/credential_balancer/hook.py +51 -0
- ccproxy/plugins/credential_balancer/manager.py +587 -0
- ccproxy/plugins/credential_balancer/plugin.py +146 -0
- ccproxy/plugins/dashboard/README.md +25 -0
- ccproxy/plugins/dashboard/__init__.py +1 -0
- ccproxy/plugins/dashboard/config.py +8 -0
- ccproxy/plugins/dashboard/plugin.py +71 -0
- ccproxy/plugins/dashboard/routes.py +67 -0
- ccproxy/plugins/docker/README.md +32 -0
- ccproxy/{docker → plugins/docker}/__init__.py +3 -0
- ccproxy/{docker → plugins/docker}/adapter.py +108 -10
- ccproxy/plugins/docker/config.py +82 -0
- ccproxy/{docker → plugins/docker}/docker_path.py +4 -3
- ccproxy/{docker → plugins/docker}/middleware.py +2 -2
- ccproxy/plugins/docker/plugin.py +198 -0
- ccproxy/{docker → plugins/docker}/stream_process.py +3 -3
- ccproxy/plugins/duckdb_storage/README.md +26 -0
- ccproxy/plugins/duckdb_storage/__init__.py +1 -0
- ccproxy/plugins/duckdb_storage/config.py +22 -0
- ccproxy/plugins/duckdb_storage/plugin.py +128 -0
- ccproxy/plugins/duckdb_storage/routes.py +51 -0
- ccproxy/plugins/duckdb_storage/storage.py +633 -0
- ccproxy/plugins/max_tokens/README.md +38 -0
- ccproxy/plugins/max_tokens/__init__.py +12 -0
- ccproxy/plugins/max_tokens/adapter.py +235 -0
- ccproxy/plugins/max_tokens/config.py +86 -0
- ccproxy/plugins/max_tokens/models.py +53 -0
- ccproxy/plugins/max_tokens/plugin.py +200 -0
- ccproxy/plugins/max_tokens/service.py +271 -0
- ccproxy/plugins/max_tokens/token_limits.json +54 -0
- ccproxy/plugins/metrics/README.md +35 -0
- ccproxy/plugins/metrics/__init__.py +10 -0
- ccproxy/{observability/metrics.py → plugins/metrics/collector.py} +20 -153
- ccproxy/plugins/metrics/config.py +85 -0
- ccproxy/plugins/metrics/grafana/dashboards/ccproxy-dashboard.json +1720 -0
- ccproxy/plugins/metrics/hook.py +403 -0
- ccproxy/plugins/metrics/plugin.py +268 -0
- ccproxy/{observability → plugins/metrics}/pushgateway.py +57 -59
- ccproxy/plugins/metrics/routes.py +107 -0
- ccproxy/plugins/metrics/tasks.py +117 -0
- ccproxy/plugins/oauth_claude/README.md +35 -0
- ccproxy/plugins/oauth_claude/__init__.py +14 -0
- ccproxy/plugins/oauth_claude/client.py +270 -0
- ccproxy/plugins/oauth_claude/config.py +84 -0
- ccproxy/plugins/oauth_claude/manager.py +482 -0
- ccproxy/plugins/oauth_claude/models.py +266 -0
- ccproxy/plugins/oauth_claude/plugin.py +149 -0
- ccproxy/plugins/oauth_claude/provider.py +571 -0
- ccproxy/plugins/oauth_claude/storage.py +212 -0
- ccproxy/plugins/oauth_codex/README.md +38 -0
- ccproxy/plugins/oauth_codex/__init__.py +14 -0
- ccproxy/plugins/oauth_codex/client.py +224 -0
- ccproxy/plugins/oauth_codex/config.py +95 -0
- ccproxy/plugins/oauth_codex/manager.py +256 -0
- ccproxy/plugins/oauth_codex/models.py +239 -0
- ccproxy/plugins/oauth_codex/plugin.py +146 -0
- ccproxy/plugins/oauth_codex/provider.py +574 -0
- ccproxy/plugins/oauth_codex/storage.py +92 -0
- ccproxy/plugins/permissions/README.md +28 -0
- ccproxy/plugins/permissions/__init__.py +22 -0
- ccproxy/plugins/permissions/config.py +28 -0
- ccproxy/{cli/commands/permission_handler.py → plugins/permissions/handlers/cli.py} +49 -25
- ccproxy/plugins/permissions/handlers/protocol.py +33 -0
- ccproxy/plugins/permissions/handlers/terminal.py +675 -0
- ccproxy/{api/routes → plugins/permissions}/mcp.py +34 -7
- ccproxy/{models/permissions.py → plugins/permissions/models.py} +65 -1
- ccproxy/plugins/permissions/plugin.py +153 -0
- ccproxy/{api/routes/permissions.py → plugins/permissions/routes.py} +20 -16
- ccproxy/{api/services/permission_service.py → plugins/permissions/service.py} +65 -11
- ccproxy/{api → plugins/permissions}/ui/permission_handler_protocol.py +1 -1
- ccproxy/{api → plugins/permissions}/ui/terminal_permission_handler.py +66 -10
- ccproxy/plugins/pricing/README.md +34 -0
- ccproxy/plugins/pricing/__init__.py +6 -0
- ccproxy/{pricing → plugins/pricing}/cache.py +7 -6
- ccproxy/{config/pricing.py → plugins/pricing/config.py} +32 -6
- ccproxy/plugins/pricing/exceptions.py +35 -0
- ccproxy/plugins/pricing/loader.py +440 -0
- ccproxy/{pricing → plugins/pricing}/models.py +13 -23
- ccproxy/plugins/pricing/plugin.py +169 -0
- ccproxy/plugins/pricing/service.py +191 -0
- ccproxy/plugins/pricing/tasks.py +300 -0
- ccproxy/{pricing → plugins/pricing}/updater.py +86 -72
- ccproxy/plugins/pricing/utils.py +99 -0
- ccproxy/plugins/request_tracer/README.md +40 -0
- ccproxy/plugins/request_tracer/__init__.py +7 -0
- ccproxy/plugins/request_tracer/config.py +120 -0
- ccproxy/plugins/request_tracer/hook.py +415 -0
- ccproxy/plugins/request_tracer/plugin.py +255 -0
- ccproxy/scheduler/__init__.py +2 -14
- ccproxy/scheduler/core.py +26 -41
- ccproxy/scheduler/manager.py +61 -105
- ccproxy/scheduler/registry.py +6 -32
- ccproxy/scheduler/tasks.py +268 -276
- ccproxy/services/__init__.py +0 -1
- ccproxy/services/adapters/__init__.py +11 -0
- ccproxy/services/adapters/base.py +123 -0
- ccproxy/services/adapters/chain_composer.py +88 -0
- ccproxy/services/adapters/chain_validation.py +44 -0
- ccproxy/services/adapters/chat_accumulator.py +200 -0
- ccproxy/services/adapters/delta_utils.py +142 -0
- ccproxy/services/adapters/format_adapter.py +136 -0
- ccproxy/services/adapters/format_context.py +11 -0
- ccproxy/services/adapters/format_registry.py +158 -0
- ccproxy/services/adapters/http_adapter.py +1045 -0
- ccproxy/services/adapters/mock_adapter.py +118 -0
- ccproxy/services/adapters/protocols.py +35 -0
- ccproxy/services/adapters/simple_converters.py +571 -0
- ccproxy/services/auth_registry.py +180 -0
- ccproxy/services/cache/__init__.py +6 -0
- ccproxy/services/cache/response_cache.py +261 -0
- ccproxy/services/cli_detection.py +437 -0
- ccproxy/services/config/__init__.py +6 -0
- ccproxy/services/config/proxy_configuration.py +111 -0
- ccproxy/services/container.py +256 -0
- ccproxy/services/factories.py +380 -0
- ccproxy/services/handler_config.py +76 -0
- ccproxy/services/interfaces.py +298 -0
- ccproxy/services/mocking/__init__.py +6 -0
- ccproxy/services/mocking/mock_handler.py +291 -0
- ccproxy/services/tracing/__init__.py +7 -0
- ccproxy/services/tracing/interfaces.py +61 -0
- ccproxy/services/tracing/null_tracer.py +57 -0
- ccproxy/streaming/__init__.py +23 -0
- ccproxy/streaming/buffer.py +1056 -0
- ccproxy/streaming/deferred.py +897 -0
- ccproxy/streaming/handler.py +117 -0
- ccproxy/streaming/interfaces.py +77 -0
- ccproxy/streaming/simple_adapter.py +39 -0
- ccproxy/streaming/sse.py +109 -0
- ccproxy/streaming/sse_parser.py +127 -0
- ccproxy/templates/__init__.py +6 -0
- ccproxy/templates/plugin_scaffold.py +695 -0
- ccproxy/testing/endpoints/__init__.py +33 -0
- ccproxy/testing/endpoints/cli.py +215 -0
- ccproxy/testing/endpoints/config.py +874 -0
- ccproxy/testing/endpoints/console.py +57 -0
- ccproxy/testing/endpoints/models.py +100 -0
- ccproxy/testing/endpoints/runner.py +1903 -0
- ccproxy/testing/endpoints/tools.py +308 -0
- ccproxy/testing/mock_responses.py +70 -1
- ccproxy/testing/response_handlers.py +20 -0
- ccproxy/utils/__init__.py +0 -6
- ccproxy/utils/binary_resolver.py +476 -0
- ccproxy/utils/caching.py +327 -0
- ccproxy/utils/cli_logging.py +101 -0
- ccproxy/utils/command_line.py +251 -0
- ccproxy/utils/headers.py +228 -0
- ccproxy/utils/model_mapper.py +120 -0
- ccproxy/utils/startup_helpers.py +68 -446
- ccproxy/utils/version_checker.py +273 -6
- ccproxy_api-0.2.0.dist-info/METADATA +212 -0
- ccproxy_api-0.2.0.dist-info/RECORD +417 -0
- {ccproxy_api-0.1.7.dist-info → ccproxy_api-0.2.0.dist-info}/WHEEL +1 -1
- ccproxy_api-0.2.0.dist-info/entry_points.txt +24 -0
- ccproxy/__init__.py +0 -4
- ccproxy/adapters/__init__.py +0 -11
- ccproxy/adapters/base.py +0 -80
- ccproxy/adapters/codex/__init__.py +0 -11
- ccproxy/adapters/openai/__init__.py +0 -42
- ccproxy/adapters/openai/adapter.py +0 -953
- ccproxy/adapters/openai/models.py +0 -412
- ccproxy/adapters/openai/response_adapter.py +0 -355
- ccproxy/adapters/openai/response_models.py +0 -178
- ccproxy/api/middleware/headers.py +0 -49
- ccproxy/api/middleware/logging.py +0 -180
- ccproxy/api/middleware/request_content_logging.py +0 -297
- ccproxy/api/middleware/server_header.py +0 -58
- ccproxy/api/responses.py +0 -89
- ccproxy/api/routes/claude.py +0 -371
- ccproxy/api/routes/codex.py +0 -1251
- ccproxy/api/routes/metrics.py +0 -1029
- ccproxy/api/routes/proxy.py +0 -211
- ccproxy/api/services/__init__.py +0 -6
- ccproxy/auth/conditional.py +0 -84
- ccproxy/auth/credentials_adapter.py +0 -93
- ccproxy/auth/models.py +0 -118
- ccproxy/auth/oauth/models.py +0 -48
- ccproxy/auth/openai/__init__.py +0 -13
- ccproxy/auth/openai/credentials.py +0 -166
- ccproxy/auth/openai/oauth_client.py +0 -334
- ccproxy/auth/openai/storage.py +0 -184
- ccproxy/auth/storage/json_file.py +0 -158
- ccproxy/auth/storage/keyring.py +0 -189
- ccproxy/claude_sdk/__init__.py +0 -18
- ccproxy/claude_sdk/options.py +0 -194
- ccproxy/claude_sdk/session_pool.py +0 -550
- ccproxy/cli/docker/__init__.py +0 -34
- ccproxy/cli/docker/adapter_factory.py +0 -157
- ccproxy/cli/docker/params.py +0 -274
- ccproxy/config/auth.py +0 -153
- ccproxy/config/claude.py +0 -348
- ccproxy/config/cors.py +0 -79
- ccproxy/config/discovery.py +0 -95
- ccproxy/config/docker_settings.py +0 -264
- ccproxy/config/observability.py +0 -158
- ccproxy/config/reverse_proxy.py +0 -31
- ccproxy/config/scheduler.py +0 -108
- ccproxy/config/server.py +0 -86
- ccproxy/config/validators.py +0 -231
- ccproxy/core/codex_transformers.py +0 -389
- ccproxy/core/http.py +0 -328
- ccproxy/core/http_transformers.py +0 -812
- ccproxy/core/proxy.py +0 -143
- ccproxy/core/validators.py +0 -288
- ccproxy/models/errors.py +0 -42
- ccproxy/models/messages.py +0 -269
- ccproxy/models/requests.py +0 -107
- ccproxy/models/responses.py +0 -270
- ccproxy/models/types.py +0 -102
- ccproxy/observability/__init__.py +0 -51
- ccproxy/observability/access_logger.py +0 -457
- ccproxy/observability/sse_events.py +0 -303
- ccproxy/observability/stats_printer.py +0 -753
- ccproxy/observability/storage/__init__.py +0 -1
- ccproxy/observability/storage/duckdb_simple.py +0 -677
- ccproxy/observability/storage/models.py +0 -70
- ccproxy/observability/streaming_response.py +0 -107
- ccproxy/pricing/__init__.py +0 -19
- ccproxy/pricing/loader.py +0 -251
- ccproxy/services/claude_detection_service.py +0 -243
- ccproxy/services/codex_detection_service.py +0 -252
- ccproxy/services/credentials/__init__.py +0 -55
- ccproxy/services/credentials/config.py +0 -105
- ccproxy/services/credentials/manager.py +0 -561
- ccproxy/services/credentials/oauth_client.py +0 -481
- ccproxy/services/proxy_service.py +0 -1827
- ccproxy/static/.keep +0 -0
- ccproxy/utils/cost_calculator.py +0 -210
- ccproxy/utils/disconnection_monitor.py +0 -83
- ccproxy/utils/model_mapping.py +0 -199
- ccproxy/utils/models_provider.py +0 -150
- ccproxy/utils/simple_request_logger.py +0 -284
- ccproxy/utils/streaming_metrics.py +0 -199
- ccproxy_api-0.1.7.dist-info/METADATA +0 -615
- ccproxy_api-0.1.7.dist-info/RECORD +0 -191
- ccproxy_api-0.1.7.dist-info/entry_points.txt +0 -4
- /ccproxy/{api/middleware/auth.py → auth/models/__init__.py} +0 -0
- /ccproxy/{claude_sdk → plugins/claude_sdk}/exceptions.py +0 -0
- /ccproxy/{docker → plugins/docker}/models.py +0 -0
- /ccproxy/{docker → plugins/docker}/protocol.py +0 -0
- /ccproxy/{docker → plugins/docker}/validators.py +0 -0
- /ccproxy/{auth/oauth/storage.py → plugins/permissions/handlers/__init__.py} +0 -0
- /ccproxy/{api → plugins/permissions}/ui/__init__.py +0 -0
- {ccproxy_api-0.1.7.dist-info → ccproxy_api-0.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,844 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Pydantic V2 models for OpenAI API endpoints based on the provided reference.
|
|
3
|
+
|
|
4
|
+
This module contains data structures for:
|
|
5
|
+
- /v1/chat/completions (including streaming)
|
|
6
|
+
- /v1/embeddings
|
|
7
|
+
- /v1/models
|
|
8
|
+
- /v1/responses (including streaming)
|
|
9
|
+
- Common Error structures
|
|
10
|
+
|
|
11
|
+
The models are defined using modern Python 3.11 type hints and Pydantic V2 best practices.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import uuid
|
|
15
|
+
from typing import Any, Literal
|
|
16
|
+
|
|
17
|
+
from pydantic import Field, RootModel, field_validator, model_validator
|
|
18
|
+
|
|
19
|
+
from ccproxy.llms.formatters import LlmBaseModel
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# ==============================================================================
|
|
23
|
+
# Error Models
|
|
24
|
+
# ==============================================================================
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ErrorDetail(LlmBaseModel):
|
|
28
|
+
"""
|
|
29
|
+
Detailed information about an API error.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
code: str | None = Field(None, description="The error code.")
|
|
33
|
+
message: str = Field(..., description="The error message.")
|
|
34
|
+
param: str | None = Field(None, description="The parameter that caused the error.")
|
|
35
|
+
type: str | None = Field(None, description="The type of error.")
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ErrorResponse(LlmBaseModel):
|
|
39
|
+
"""
|
|
40
|
+
The structure of an error response from the OpenAI API.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
error: ErrorDetail = Field(..., description="Container for the error details.")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# ==============================================================================
|
|
47
|
+
# Models Endpoint (/v1/models)
|
|
48
|
+
# ==============================================================================
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class Model(LlmBaseModel):
|
|
52
|
+
"""
|
|
53
|
+
Represents a model available in the API.
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
id: str = Field(..., description="The model identifier.")
|
|
57
|
+
created: int = Field(
|
|
58
|
+
..., description="The Unix timestamp of when the model was created."
|
|
59
|
+
)
|
|
60
|
+
object: Literal["model"] = Field(
|
|
61
|
+
default="model", description="The object type, always 'model'."
|
|
62
|
+
)
|
|
63
|
+
owned_by: str = Field(..., description="The organization that owns the model.")
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class ModelList(LlmBaseModel):
|
|
67
|
+
"""
|
|
68
|
+
A list of available models.
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
object: Literal["list"] = Field(
|
|
72
|
+
default="list", description="The object type, always 'list'."
|
|
73
|
+
)
|
|
74
|
+
data: list[Model] = Field(..., description="A list of model objects.")
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
# ==============================================================================
|
|
78
|
+
# Embeddings Endpoint (/v1/embeddings)
|
|
79
|
+
# ==============================================================================
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class EmbeddingRequest(LlmBaseModel):
|
|
83
|
+
"""
|
|
84
|
+
Request body for creating an embedding.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
input: str | list[str] | list[int] | list[list[int]] = Field(
|
|
88
|
+
..., description="Input text to embed, encoded as a string or array of tokens."
|
|
89
|
+
)
|
|
90
|
+
model: str = Field(..., description="ID of the model to use for embedding.")
|
|
91
|
+
encoding_format: Literal["float", "base64"] | None = Field(
|
|
92
|
+
"float", description="The format to return the embeddings in."
|
|
93
|
+
)
|
|
94
|
+
dimensions: int | None = Field(
|
|
95
|
+
None,
|
|
96
|
+
description="The number of dimensions the resulting output embeddings should have.",
|
|
97
|
+
)
|
|
98
|
+
user: str | None = Field(
|
|
99
|
+
None, description="A unique identifier representing your end-user."
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class EmbeddingData(LlmBaseModel):
|
|
104
|
+
"""
|
|
105
|
+
Represents a single embedding vector.
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
object: Literal["embedding"] = Field(
|
|
109
|
+
default="embedding", description="The object type, always 'embedding'."
|
|
110
|
+
)
|
|
111
|
+
embedding: list[float] = Field(..., description="The embedding vector.")
|
|
112
|
+
index: int = Field(..., description="The index of the embedding in the list.")
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class EmbeddingUsage(LlmBaseModel):
|
|
116
|
+
"""
|
|
117
|
+
Token usage statistics for an embedding request.
|
|
118
|
+
"""
|
|
119
|
+
|
|
120
|
+
prompt_tokens: int = Field(..., description="Number of tokens in the prompt.")
|
|
121
|
+
total_tokens: int = Field(..., description="Total number of tokens used.")
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class EmbeddingResponse(LlmBaseModel):
|
|
125
|
+
"""
|
|
126
|
+
Response object for an embedding request.
|
|
127
|
+
"""
|
|
128
|
+
|
|
129
|
+
object: Literal["list"] = Field(
|
|
130
|
+
default="list", description="The object type, always 'list'."
|
|
131
|
+
)
|
|
132
|
+
data: list[EmbeddingData] = Field(..., description="List of embedding objects.")
|
|
133
|
+
model: str = Field(..., description="The model used for the embedding.")
|
|
134
|
+
usage: EmbeddingUsage = Field(..., description="Token usage for the request.")
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
# ==============================================================================
|
|
138
|
+
# Chat Completions Endpoint (/v1/chat/completions)
|
|
139
|
+
# ==============================================================================
|
|
140
|
+
|
|
141
|
+
# --- Request Models ---
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
class ResponseFormat(LlmBaseModel):
|
|
145
|
+
"""
|
|
146
|
+
An object specifying the format that the model must output.
|
|
147
|
+
"""
|
|
148
|
+
|
|
149
|
+
type: Literal["text", "json_object", "json_schema"] = Field(
|
|
150
|
+
"text", description="The type of response format."
|
|
151
|
+
)
|
|
152
|
+
json_schema: dict[str, Any] | None = None
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class FunctionDefinition(LlmBaseModel):
|
|
156
|
+
"""
|
|
157
|
+
The definition of a function that the model can call.
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
name: str = Field(..., description="The name of the function to be called.")
|
|
161
|
+
description: str | None = Field(
|
|
162
|
+
None, description="A description of what the function does."
|
|
163
|
+
)
|
|
164
|
+
parameters: dict[str, Any] = Field(
|
|
165
|
+
default={},
|
|
166
|
+
description="The parameters the functions accepts, described as a JSON Schema object.",
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
class Tool(LlmBaseModel):
|
|
171
|
+
"""
|
|
172
|
+
A tool the model may call.
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
type: Literal["function"] = Field(
|
|
176
|
+
default="function",
|
|
177
|
+
description="The type of the tool, currently only 'function' is supported.",
|
|
178
|
+
)
|
|
179
|
+
function: FunctionDefinition
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
class FunctionCall(LlmBaseModel):
|
|
183
|
+
name: str
|
|
184
|
+
arguments: str
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
class ToolCall(LlmBaseModel):
|
|
188
|
+
id: str
|
|
189
|
+
type: Literal["function"] = Field(default="function")
|
|
190
|
+
function: FunctionCall
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
class ChatMessage(LlmBaseModel):
|
|
194
|
+
"""
|
|
195
|
+
A message within a chat conversation.
|
|
196
|
+
"""
|
|
197
|
+
|
|
198
|
+
role: Literal["system", "user", "assistant", "tool", "developer"]
|
|
199
|
+
content: str | list[dict[str, Any]] | None
|
|
200
|
+
name: str | None = Field(
|
|
201
|
+
default=None,
|
|
202
|
+
description="The name of the author of this message. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.",
|
|
203
|
+
)
|
|
204
|
+
tool_calls: list[ToolCall] | None = None
|
|
205
|
+
tool_call_id: str | None = None # For tool role messages
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
class ChatCompletionRequest(LlmBaseModel):
|
|
209
|
+
"""
|
|
210
|
+
Request body for creating a chat completion.
|
|
211
|
+
"""
|
|
212
|
+
|
|
213
|
+
messages: list[ChatMessage]
|
|
214
|
+
model: str | None = Field(default=None)
|
|
215
|
+
audio: dict[str, Any] | None = None
|
|
216
|
+
frequency_penalty: float | None = Field(default=None, ge=-2.0, le=2.0)
|
|
217
|
+
logit_bias: dict[str, float] | None = Field(default=None)
|
|
218
|
+
logprobs: bool | None = Field(default=None)
|
|
219
|
+
top_logprobs: int | None = Field(default=None, ge=0, le=20)
|
|
220
|
+
max_tokens: int | None = Field(default=None, deprecated=True)
|
|
221
|
+
max_completion_tokens: int | None = Field(default=None)
|
|
222
|
+
n: int | None = Field(default=1)
|
|
223
|
+
parallel_tool_calls: bool | None = Field(default=None)
|
|
224
|
+
presence_penalty: float | None = Field(default=None, ge=-2.0, le=2.0)
|
|
225
|
+
reasoning_effort: Literal["minimal", "low", "medium", "high"] | None = Field(
|
|
226
|
+
default=None
|
|
227
|
+
)
|
|
228
|
+
response_format: ResponseFormat | None = Field(default=None)
|
|
229
|
+
seed: int | None = Field(default=None)
|
|
230
|
+
stop: str | list[str] | None = Field(default=None)
|
|
231
|
+
stream: bool | None = Field(default=None)
|
|
232
|
+
stream_options: dict[str, Any] | None = Field(default=None)
|
|
233
|
+
temperature: float | None = Field(default=None, ge=0.0, le=2.0)
|
|
234
|
+
top_p: float | None = Field(default=None, ge=0.0, le=1.0)
|
|
235
|
+
tools: list[Tool] | None = Field(default=None)
|
|
236
|
+
tool_choice: Literal["none", "auto", "required"] | dict[str, Any] | None = Field(
|
|
237
|
+
default=None
|
|
238
|
+
)
|
|
239
|
+
user: str | None = Field(default=None)
|
|
240
|
+
modalities: list[str] | None = Field(default=None)
|
|
241
|
+
prediction: dict[str, Any] | None = Field(default=None)
|
|
242
|
+
prompt_cache_key: str | None = Field(default=None)
|
|
243
|
+
safety_identifier: str | None = Field(default=None)
|
|
244
|
+
service_tier: str | None = Field(default=None)
|
|
245
|
+
store: bool | None = Field(default=None)
|
|
246
|
+
verbosity: str | None = Field(default=None)
|
|
247
|
+
web_search_options: dict[str, Any] | None = Field(default=None)
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
# --- Response Models (Non-streaming) ---
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
class ResponseMessageReasoning(LlmBaseModel):
|
|
254
|
+
effort: Literal["minimal", "low", "medium", "high"] | None = None
|
|
255
|
+
summary: Literal["auto", "detailed", "concise"] | None = None
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
class ResponseMessage(LlmBaseModel):
|
|
259
|
+
content: str | list[Any] | None = None
|
|
260
|
+
tool_calls: list[ToolCall] | None = None
|
|
261
|
+
role: Literal["assistant"] = Field(default="assistant")
|
|
262
|
+
refusal: str | dict[str, Any] | None = None
|
|
263
|
+
annotations: list[Any] | None = None
|
|
264
|
+
audio: dict[str, Any] | None = None
|
|
265
|
+
reasoning: ResponseMessageReasoning | None = None
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
class Choice(LlmBaseModel):
|
|
269
|
+
finish_reason: Literal["stop", "length", "tool_calls", "content_filter"]
|
|
270
|
+
index: int | None = None
|
|
271
|
+
message: ResponseMessage
|
|
272
|
+
logprobs: dict[str, Any] | None = None
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
class PromptTokensDetails(LlmBaseModel):
|
|
276
|
+
cached_tokens: int = 0
|
|
277
|
+
audio_tokens: int = 0
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
class CompletionTokensDetails(LlmBaseModel):
|
|
281
|
+
reasoning_tokens: int = 0
|
|
282
|
+
audio_tokens: int = 0
|
|
283
|
+
accepted_prediction_tokens: int = 0
|
|
284
|
+
rejected_prediction_tokens: int = 0
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
class CompletionUsage(LlmBaseModel):
|
|
288
|
+
completion_tokens: int
|
|
289
|
+
prompt_tokens: int
|
|
290
|
+
total_tokens: int
|
|
291
|
+
prompt_tokens_details: PromptTokensDetails | None = None
|
|
292
|
+
completion_tokens_details: CompletionTokensDetails | None = None
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
class ChatCompletionResponse(LlmBaseModel):
|
|
296
|
+
id: str
|
|
297
|
+
choices: list[Choice]
|
|
298
|
+
created: int
|
|
299
|
+
model: str
|
|
300
|
+
system_fingerprint: str | None = None
|
|
301
|
+
object: Literal["chat.completion"] = Field(default="chat.completion")
|
|
302
|
+
usage: CompletionUsage | None = Field(default=None)
|
|
303
|
+
service_tier: str | None = None
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
# --- Response Models (Streaming) ---
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
class DeltaMessage(LlmBaseModel):
|
|
310
|
+
role: Literal["assistant"] | None = None
|
|
311
|
+
content: str | list[Any] | None = None
|
|
312
|
+
tool_calls: list[ToolCall] | None = None
|
|
313
|
+
audio: dict[str, Any] | None = None
|
|
314
|
+
reasoning: ResponseMessageReasoning | None = None
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
class StreamingChoice(LlmBaseModel):
|
|
318
|
+
index: int
|
|
319
|
+
delta: DeltaMessage
|
|
320
|
+
finish_reason: Literal["stop", "length", "tool_calls"] | None = None
|
|
321
|
+
logprobs: dict[str, Any] | None = None
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
class ChatCompletionChunk(LlmBaseModel):
|
|
325
|
+
id: str
|
|
326
|
+
object: Literal["chat.completion.chunk"] = Field(default="chat.completion.chunk")
|
|
327
|
+
created: int
|
|
328
|
+
model: str | None = None
|
|
329
|
+
system_fingerprint: str | None = None
|
|
330
|
+
choices: list[StreamingChoice] = Field(default_factory=list)
|
|
331
|
+
usage: CompletionUsage | None = Field(
|
|
332
|
+
default=None,
|
|
333
|
+
description="Usage stats, present only in the final chunk if requested.",
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
# ==============================================================================
|
|
338
|
+
# Responses Endpoint (/v1/responses)
|
|
339
|
+
# ==============================================================================
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
# --- Request Models ---
|
|
343
|
+
class StreamOptions(LlmBaseModel):
|
|
344
|
+
include_usage: bool | None = Field(
|
|
345
|
+
default=None,
|
|
346
|
+
description="If set, an additional chunk will be streamed before the final completion chunk with usage statistics.",
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
class ToolFunction(LlmBaseModel):
|
|
351
|
+
name: str
|
|
352
|
+
description: str | None = None
|
|
353
|
+
parameters: dict[str, Any] = Field(default_factory=dict)
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
class FunctionTool(LlmBaseModel):
|
|
357
|
+
type: Literal["function"] = Field(default="function")
|
|
358
|
+
function: ToolFunction | None = None
|
|
359
|
+
name: str | None = None
|
|
360
|
+
description: str | None = None
|
|
361
|
+
parameters: dict[str, Any] | None = None
|
|
362
|
+
|
|
363
|
+
@model_validator(mode="after")
|
|
364
|
+
def _normalize(self) -> "FunctionTool":
|
|
365
|
+
fn = self.function
|
|
366
|
+
if fn is None:
|
|
367
|
+
if self.name is None:
|
|
368
|
+
raise ValueError("Function tool requires a name")
|
|
369
|
+
self.function = ToolFunction(
|
|
370
|
+
name=self.name,
|
|
371
|
+
description=self.description,
|
|
372
|
+
parameters=self.parameters or {},
|
|
373
|
+
)
|
|
374
|
+
else:
|
|
375
|
+
self.name = self.name or fn.name
|
|
376
|
+
if self.description is None:
|
|
377
|
+
self.description = fn.description
|
|
378
|
+
if self.parameters is None:
|
|
379
|
+
self.parameters = fn.parameters
|
|
380
|
+
return self
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
# Valid include values for Responses API
|
|
384
|
+
VALID_INCLUDE_VALUES = [
|
|
385
|
+
"web_search_call.action.sources",
|
|
386
|
+
"code_interpreter_call.outputs",
|
|
387
|
+
"computer_call_output.output.image_url",
|
|
388
|
+
"file_search_call.results",
|
|
389
|
+
"message.input_image.image_url",
|
|
390
|
+
"message.output_text.logprobs",
|
|
391
|
+
"reasoning.encrypted_content",
|
|
392
|
+
]
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
class InputTextContent(LlmBaseModel):
|
|
396
|
+
type: Literal["input_text"]
|
|
397
|
+
text: str
|
|
398
|
+
annotations: list[Any] | None = None
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
class InputMessage(LlmBaseModel):
|
|
402
|
+
role: Literal["system", "user", "assistant", "tool", "developer"]
|
|
403
|
+
content: str | list[dict[str, Any] | InputTextContent] | None
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
class ResponseRequest(LlmBaseModel):
|
|
407
|
+
model: str | None = Field(default=None)
|
|
408
|
+
input: str | list[Any]
|
|
409
|
+
background: bool | None = Field(
|
|
410
|
+
default=None, description="Whether to run the model response in the background"
|
|
411
|
+
)
|
|
412
|
+
conversation: str | dict[str, Any] | None = Field(
|
|
413
|
+
default=None, description="The conversation that this response belongs to"
|
|
414
|
+
)
|
|
415
|
+
include: list[str] | None = Field(
|
|
416
|
+
default=None,
|
|
417
|
+
description="Specify additional output data to include in the model response",
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
@field_validator("include")
|
|
421
|
+
@classmethod
|
|
422
|
+
def validate_include(cls, v: list[str] | None) -> list[str] | None:
|
|
423
|
+
if v is not None:
|
|
424
|
+
for item in v:
|
|
425
|
+
if item not in VALID_INCLUDE_VALUES:
|
|
426
|
+
raise ValueError(
|
|
427
|
+
f"Invalid include value: {item}. Valid values are: {VALID_INCLUDE_VALUES}"
|
|
428
|
+
)
|
|
429
|
+
return v
|
|
430
|
+
|
|
431
|
+
instructions: str | None = Field(default=None)
|
|
432
|
+
max_output_tokens: int | None = Field(default=None)
|
|
433
|
+
max_tool_calls: int | None = Field(default=None)
|
|
434
|
+
metadata: dict[str, str] | None = Field(default=None)
|
|
435
|
+
parallel_tool_calls: bool | None = Field(default=None)
|
|
436
|
+
previous_response_id: str | None = Field(default=None)
|
|
437
|
+
prompt: dict[str, Any] | None = Field(default=None)
|
|
438
|
+
prompt_cache_key: str | None = Field(default=None)
|
|
439
|
+
reasoning: dict[str, Any] | None = Field(default=None)
|
|
440
|
+
safety_identifier: str | None = Field(default=None)
|
|
441
|
+
service_tier: str | None = Field(default=None)
|
|
442
|
+
store: bool | None = Field(default=None)
|
|
443
|
+
stream: bool | None = Field(default=None)
|
|
444
|
+
stream_options: StreamOptions | None = Field(default=None)
|
|
445
|
+
temperature: float | None = Field(default=None, ge=0.0, le=2.0)
|
|
446
|
+
text: dict[str, Any] | None = Field(default=None)
|
|
447
|
+
tools: list[Any] | None = Field(default=None)
|
|
448
|
+
tool_choice: str | dict[str, Any] | None = Field(default=None)
|
|
449
|
+
top_logprobs: int | None = Field(default=None)
|
|
450
|
+
top_p: float | None = Field(default=None, ge=0.0, le=1.0)
|
|
451
|
+
truncation: str | None = Field(default=None)
|
|
452
|
+
user: str | None = Field(default=None)
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
# --- Response Models (Non-streaming) ---
|
|
456
|
+
class OutputTextContent(LlmBaseModel):
|
|
457
|
+
type: Literal["output_text"]
|
|
458
|
+
text: str
|
|
459
|
+
annotations: list[Any] | None = None
|
|
460
|
+
logprobs: dict[str, Any] | list[Any] | None = None
|
|
461
|
+
|
|
462
|
+
|
|
463
|
+
class MessageOutput(LlmBaseModel):
|
|
464
|
+
type: Literal["message"]
|
|
465
|
+
id: str
|
|
466
|
+
status: str
|
|
467
|
+
role: Literal["assistant", "user"]
|
|
468
|
+
content: list[OutputTextContent | dict[str, Any]] # To handle various content types
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
class ReasoningOutput(LlmBaseModel):
|
|
472
|
+
type: Literal["reasoning"]
|
|
473
|
+
id: str
|
|
474
|
+
status: str | None = None
|
|
475
|
+
summary: list[Any] | None = None
|
|
476
|
+
|
|
477
|
+
|
|
478
|
+
class FunctionCallOutput(LlmBaseModel):
|
|
479
|
+
type: Literal["function_call"]
|
|
480
|
+
id: str
|
|
481
|
+
status: str | None = None
|
|
482
|
+
name: str | None = None
|
|
483
|
+
call_id: str | None = None
|
|
484
|
+
arguments: str | dict[str, Any] | None = None
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
class InputTokensDetails(LlmBaseModel):
|
|
488
|
+
cached_tokens: int = Field(
|
|
489
|
+
default=0, description="Number of tokens retrieved from cache"
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
|
|
493
|
+
class OutputTokensDetails(LlmBaseModel):
|
|
494
|
+
reasoning_tokens: int = Field(
|
|
495
|
+
default=0, description="Number of tokens used for reasoning"
|
|
496
|
+
)
|
|
497
|
+
|
|
498
|
+
|
|
499
|
+
class ResponseUsage(LlmBaseModel):
|
|
500
|
+
input_tokens: int = Field(default=0, description="Number of input tokens")
|
|
501
|
+
input_tokens_details: InputTokensDetails = Field(
|
|
502
|
+
default_factory=InputTokensDetails, description="Details about input tokens"
|
|
503
|
+
)
|
|
504
|
+
output_tokens: int = Field(default=0, description="Number of output tokens")
|
|
505
|
+
output_tokens_details: OutputTokensDetails = Field(
|
|
506
|
+
default_factory=OutputTokensDetails, description="Details about output tokens"
|
|
507
|
+
)
|
|
508
|
+
total_tokens: int = Field(default=0, description="Total number of tokens used")
|
|
509
|
+
|
|
510
|
+
|
|
511
|
+
class IncompleteDetails(LlmBaseModel):
|
|
512
|
+
reason: str
|
|
513
|
+
|
|
514
|
+
|
|
515
|
+
class Reasoning(LlmBaseModel):
|
|
516
|
+
effort: Any | None = None
|
|
517
|
+
summary: Any | None = None
|
|
518
|
+
|
|
519
|
+
|
|
520
|
+
class ResponseObject(LlmBaseModel):
|
|
521
|
+
id: str
|
|
522
|
+
object: Literal["response"] = Field(default="response")
|
|
523
|
+
created_at: int
|
|
524
|
+
status: str
|
|
525
|
+
model: str
|
|
526
|
+
output: list[MessageOutput | ReasoningOutput | FunctionCallOutput | dict[str, Any]]
|
|
527
|
+
parallel_tool_calls: bool
|
|
528
|
+
usage: ResponseUsage | None = None
|
|
529
|
+
error: ErrorDetail | None = None
|
|
530
|
+
incomplete_details: IncompleteDetails | None = None
|
|
531
|
+
metadata: dict[str, str] | None = None
|
|
532
|
+
instructions: str | None = None
|
|
533
|
+
max_output_tokens: int | None = None
|
|
534
|
+
previous_response_id: str | None = None
|
|
535
|
+
reasoning: Reasoning | None = None
|
|
536
|
+
store: bool | None = None
|
|
537
|
+
temperature: float | None = None
|
|
538
|
+
text: dict[str, Any] | str | None = None
|
|
539
|
+
tool_choice: str | dict[str, Any] | None = None
|
|
540
|
+
tools: list[Any] | None = None
|
|
541
|
+
top_p: float | None = None
|
|
542
|
+
truncation: str | None = None
|
|
543
|
+
user: str | None = None
|
|
544
|
+
|
|
545
|
+
|
|
546
|
+
# --- Response Models (Streaming) ---
|
|
547
|
+
class BaseStreamEvent(LlmBaseModel):
|
|
548
|
+
sequence_number: int
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
class ResponseCreatedEvent(BaseStreamEvent):
|
|
552
|
+
type: Literal["response.created"]
|
|
553
|
+
response: ResponseObject
|
|
554
|
+
|
|
555
|
+
|
|
556
|
+
class ResponseInProgressEvent(BaseStreamEvent):
|
|
557
|
+
type: Literal["response.in_progress"]
|
|
558
|
+
response: ResponseObject
|
|
559
|
+
|
|
560
|
+
|
|
561
|
+
class ResponseCompletedEvent(BaseStreamEvent):
|
|
562
|
+
type: Literal["response.completed"]
|
|
563
|
+
response: ResponseObject
|
|
564
|
+
|
|
565
|
+
|
|
566
|
+
class ResponseFailedEvent(BaseStreamEvent):
|
|
567
|
+
type: Literal["response.failed"]
|
|
568
|
+
response: ResponseObject
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
class ResponseIncompleteEvent(BaseStreamEvent):
|
|
572
|
+
type: Literal["response.incomplete"]
|
|
573
|
+
response: ResponseObject
|
|
574
|
+
|
|
575
|
+
|
|
576
|
+
class OutputItem(LlmBaseModel):
|
|
577
|
+
"""Normalized representation of a Responses API output item.
|
|
578
|
+
|
|
579
|
+
OpenAI currently emits different shapes for text, tool, and reasoning
|
|
580
|
+
items. Some omit fields like ``status`` or ``role`` entirely, while others
|
|
581
|
+
include extra metadata such as ``summary`` or ``call_id``. Keeping these
|
|
582
|
+
attributes optional lets us validate real-world payloads without fighting
|
|
583
|
+
the schema.
|
|
584
|
+
"""
|
|
585
|
+
|
|
586
|
+
id: str
|
|
587
|
+
type: str
|
|
588
|
+
status: str | None = None
|
|
589
|
+
role: str | None = None
|
|
590
|
+
content: list[Any] | None = None
|
|
591
|
+
text: str | None = None
|
|
592
|
+
name: str | None = None
|
|
593
|
+
arguments: str | None = None
|
|
594
|
+
call_id: str | None = None
|
|
595
|
+
output_index: int | None = None
|
|
596
|
+
summary: list[Any] | None = None
|
|
597
|
+
|
|
598
|
+
|
|
599
|
+
class ResponseOutputItemAddedEvent(BaseStreamEvent):
|
|
600
|
+
type: Literal["response.output_item.added"]
|
|
601
|
+
output_index: int
|
|
602
|
+
item: OutputItem
|
|
603
|
+
|
|
604
|
+
|
|
605
|
+
class ResponseOutputItemDoneEvent(BaseStreamEvent):
|
|
606
|
+
type: Literal["response.output_item.done"]
|
|
607
|
+
output_index: int
|
|
608
|
+
item: OutputItem
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
class ContentPart(LlmBaseModel):
|
|
612
|
+
type: str
|
|
613
|
+
text: str | None = None
|
|
614
|
+
annotations: list[Any] | None = None
|
|
615
|
+
|
|
616
|
+
|
|
617
|
+
class ResponseContentPartAddedEvent(BaseStreamEvent):
|
|
618
|
+
type: Literal["response.content_part.added"]
|
|
619
|
+
item_id: str
|
|
620
|
+
output_index: int
|
|
621
|
+
content_index: int
|
|
622
|
+
part: ContentPart
|
|
623
|
+
|
|
624
|
+
|
|
625
|
+
class ResponseContentPartDoneEvent(BaseStreamEvent):
|
|
626
|
+
type: Literal["response.content_part.done"]
|
|
627
|
+
item_id: str
|
|
628
|
+
output_index: int
|
|
629
|
+
content_index: int
|
|
630
|
+
part: ContentPart
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
class ResponseOutputTextDeltaEvent(BaseStreamEvent):
|
|
634
|
+
type: Literal["response.output_text.delta"]
|
|
635
|
+
item_id: str
|
|
636
|
+
output_index: int
|
|
637
|
+
content_index: int
|
|
638
|
+
delta: str
|
|
639
|
+
logprobs: dict[str, Any] | list[Any] | None = None
|
|
640
|
+
|
|
641
|
+
|
|
642
|
+
class ResponseOutputTextDoneEvent(BaseStreamEvent):
|
|
643
|
+
type: Literal["response.output_text.done"]
|
|
644
|
+
item_id: str
|
|
645
|
+
output_index: int
|
|
646
|
+
content_index: int
|
|
647
|
+
text: str
|
|
648
|
+
logprobs: dict[str, Any] | list[Any] | None = None
|
|
649
|
+
|
|
650
|
+
|
|
651
|
+
class ResponseRefusalDeltaEvent(BaseStreamEvent):
|
|
652
|
+
type: Literal["response.refusal.delta"]
|
|
653
|
+
item_id: str
|
|
654
|
+
output_index: int
|
|
655
|
+
content_index: int
|
|
656
|
+
delta: str
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
class ResponseRefusalDoneEvent(BaseStreamEvent):
|
|
660
|
+
type: Literal["response.refusal.done"]
|
|
661
|
+
item_id: str
|
|
662
|
+
output_index: int
|
|
663
|
+
content_index: int
|
|
664
|
+
refusal: str
|
|
665
|
+
|
|
666
|
+
|
|
667
|
+
class ResponseFunctionCallArgumentsDeltaEvent(BaseStreamEvent):
|
|
668
|
+
type: Literal["response.function_call_arguments.delta"]
|
|
669
|
+
item_id: str
|
|
670
|
+
output_index: int
|
|
671
|
+
delta: str
|
|
672
|
+
|
|
673
|
+
|
|
674
|
+
class ResponseFunctionCallArgumentsDoneEvent(BaseStreamEvent):
|
|
675
|
+
type: Literal["response.function_call_arguments.done"]
|
|
676
|
+
item_id: str
|
|
677
|
+
output_index: int
|
|
678
|
+
arguments: str
|
|
679
|
+
|
|
680
|
+
|
|
681
|
+
class ReasoningSummaryPart(LlmBaseModel):
|
|
682
|
+
type: str
|
|
683
|
+
text: str
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
class ReasoningSummaryPartAddedEvent(BaseStreamEvent):
|
|
687
|
+
type: Literal["response.reasoning_summary_part.added"]
|
|
688
|
+
item_id: str
|
|
689
|
+
output_index: int
|
|
690
|
+
summary_index: int
|
|
691
|
+
part: ReasoningSummaryPart
|
|
692
|
+
|
|
693
|
+
|
|
694
|
+
class ReasoningSummaryPartDoneEvent(BaseStreamEvent):
|
|
695
|
+
type: Literal["response.reasoning_summary_part.done"]
|
|
696
|
+
item_id: str
|
|
697
|
+
output_index: int
|
|
698
|
+
summary_index: int
|
|
699
|
+
part: ReasoningSummaryPart
|
|
700
|
+
|
|
701
|
+
|
|
702
|
+
class ReasoningSummaryTextDeltaEvent(BaseStreamEvent):
|
|
703
|
+
type: Literal["response.reasoning_summary_text.delta"]
|
|
704
|
+
item_id: str
|
|
705
|
+
output_index: int
|
|
706
|
+
summary_index: int
|
|
707
|
+
delta: str
|
|
708
|
+
|
|
709
|
+
|
|
710
|
+
class ReasoningSummaryTextDoneEvent(BaseStreamEvent):
|
|
711
|
+
type: Literal["response.reasoning_summary_text.done"]
|
|
712
|
+
item_id: str
|
|
713
|
+
output_index: int
|
|
714
|
+
summary_index: int
|
|
715
|
+
text: str
|
|
716
|
+
|
|
717
|
+
|
|
718
|
+
class ReasoningTextDeltaEvent(BaseStreamEvent):
|
|
719
|
+
type: Literal["response.reasoning_text.delta"]
|
|
720
|
+
item_id: str
|
|
721
|
+
output_index: int
|
|
722
|
+
content_index: int
|
|
723
|
+
delta: str
|
|
724
|
+
|
|
725
|
+
|
|
726
|
+
class ReasoningTextDoneEvent(BaseStreamEvent):
|
|
727
|
+
type: Literal["response.reasoning_text.done"]
|
|
728
|
+
item_id: str
|
|
729
|
+
output_index: int
|
|
730
|
+
content_index: int
|
|
731
|
+
text: str
|
|
732
|
+
|
|
733
|
+
|
|
734
|
+
class FileSearchCallEvent(BaseStreamEvent):
|
|
735
|
+
output_index: int
|
|
736
|
+
item_id: str
|
|
737
|
+
|
|
738
|
+
|
|
739
|
+
class FileSearchCallInProgressEvent(FileSearchCallEvent):
|
|
740
|
+
type: Literal["response.file_search_call.in_progress"]
|
|
741
|
+
|
|
742
|
+
|
|
743
|
+
class FileSearchCallSearchingEvent(FileSearchCallEvent):
|
|
744
|
+
type: Literal["response.file_search_call.searching"]
|
|
745
|
+
|
|
746
|
+
|
|
747
|
+
class FileSearchCallCompletedEvent(FileSearchCallEvent):
|
|
748
|
+
type: Literal["response.file_search_call.completed"]
|
|
749
|
+
|
|
750
|
+
|
|
751
|
+
class WebSearchCallEvent(BaseStreamEvent):
|
|
752
|
+
output_index: int
|
|
753
|
+
item_id: str
|
|
754
|
+
|
|
755
|
+
|
|
756
|
+
class WebSearchCallInProgressEvent(WebSearchCallEvent):
|
|
757
|
+
type: Literal["response.web_search_call.in_progress"]
|
|
758
|
+
|
|
759
|
+
|
|
760
|
+
class WebSearchCallSearchingEvent(WebSearchCallEvent):
|
|
761
|
+
type: Literal["response.web_search_call.searching"]
|
|
762
|
+
|
|
763
|
+
|
|
764
|
+
class WebSearchCallCompletedEvent(WebSearchCallEvent):
|
|
765
|
+
type: Literal["response.web_search_call.completed"]
|
|
766
|
+
|
|
767
|
+
|
|
768
|
+
class CodeInterpreterCallEvent(BaseStreamEvent):
|
|
769
|
+
output_index: int
|
|
770
|
+
item_id: str
|
|
771
|
+
|
|
772
|
+
|
|
773
|
+
class CodeInterpreterCallInProgressEvent(CodeInterpreterCallEvent):
|
|
774
|
+
type: Literal["response.code_interpreter_call.in_progress"]
|
|
775
|
+
|
|
776
|
+
|
|
777
|
+
class CodeInterpreterCallInterpretingEvent(CodeInterpreterCallEvent):
|
|
778
|
+
type: Literal["response.code_interpreter_call.interpreting"]
|
|
779
|
+
|
|
780
|
+
|
|
781
|
+
class CodeInterpreterCallCompletedEvent(CodeInterpreterCallEvent):
|
|
782
|
+
type: Literal["response.code_interpreter_call.completed"]
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
class CodeInterpreterCallCodeDeltaEvent(CodeInterpreterCallEvent):
|
|
786
|
+
type: Literal["response.code_interpreter_call_code.delta"]
|
|
787
|
+
delta: str
|
|
788
|
+
|
|
789
|
+
|
|
790
|
+
class CodeInterpreterCallCodeDoneEvent(CodeInterpreterCallEvent):
|
|
791
|
+
type: Literal["response.code_interpreter_call_code.done"]
|
|
792
|
+
code: str
|
|
793
|
+
|
|
794
|
+
|
|
795
|
+
class ErrorEvent(LlmBaseModel): # Does not inherit from BaseStreamEvent per docs
|
|
796
|
+
type: Literal["error"]
|
|
797
|
+
error: ErrorDetail
|
|
798
|
+
|
|
799
|
+
|
|
800
|
+
# Union type for all possible streaming events (for type annotations)
|
|
801
|
+
StreamEventType = (
|
|
802
|
+
ResponseCreatedEvent
|
|
803
|
+
| ResponseInProgressEvent
|
|
804
|
+
| ResponseCompletedEvent
|
|
805
|
+
| ResponseFailedEvent
|
|
806
|
+
| ResponseIncompleteEvent
|
|
807
|
+
| ResponseOutputItemAddedEvent
|
|
808
|
+
| ResponseOutputItemDoneEvent
|
|
809
|
+
| ResponseContentPartAddedEvent
|
|
810
|
+
| ResponseContentPartDoneEvent
|
|
811
|
+
| ResponseOutputTextDeltaEvent
|
|
812
|
+
| ResponseOutputTextDoneEvent
|
|
813
|
+
| ResponseRefusalDeltaEvent
|
|
814
|
+
| ResponseRefusalDoneEvent
|
|
815
|
+
| ResponseFunctionCallArgumentsDeltaEvent
|
|
816
|
+
| ResponseFunctionCallArgumentsDoneEvent
|
|
817
|
+
| ReasoningSummaryPartAddedEvent
|
|
818
|
+
| ReasoningSummaryPartDoneEvent
|
|
819
|
+
| ReasoningSummaryTextDeltaEvent
|
|
820
|
+
| ReasoningSummaryTextDoneEvent
|
|
821
|
+
| ReasoningTextDeltaEvent
|
|
822
|
+
| ReasoningTextDoneEvent
|
|
823
|
+
| FileSearchCallInProgressEvent
|
|
824
|
+
| FileSearchCallSearchingEvent
|
|
825
|
+
| FileSearchCallCompletedEvent
|
|
826
|
+
| WebSearchCallInProgressEvent
|
|
827
|
+
| WebSearchCallSearchingEvent
|
|
828
|
+
| WebSearchCallCompletedEvent
|
|
829
|
+
| CodeInterpreterCallInProgressEvent
|
|
830
|
+
| CodeInterpreterCallInterpretingEvent
|
|
831
|
+
| CodeInterpreterCallCompletedEvent
|
|
832
|
+
| CodeInterpreterCallCodeDeltaEvent
|
|
833
|
+
| CodeInterpreterCallCodeDoneEvent
|
|
834
|
+
| ErrorEvent
|
|
835
|
+
)
|
|
836
|
+
|
|
837
|
+
# RootModel wrapper for validation (for pydantic parsing)
|
|
838
|
+
AnyStreamEvent = RootModel[StreamEventType]
|
|
839
|
+
|
|
840
|
+
|
|
841
|
+
# Utility functions
|
|
842
|
+
def generate_responses_id() -> str:
|
|
843
|
+
"""Generate an OpenAI-compatible response ID."""
|
|
844
|
+
return f"chatcmpl-{uuid.uuid4().hex[:29]}"
|