ccproxy-api 0.1.7__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccproxy/api/__init__.py +1 -15
- ccproxy/api/app.py +434 -219
- ccproxy/api/bootstrap.py +30 -0
- ccproxy/api/decorators.py +85 -0
- ccproxy/api/dependencies.py +144 -168
- ccproxy/api/format_validation.py +54 -0
- ccproxy/api/middleware/cors.py +6 -3
- ccproxy/api/middleware/errors.py +388 -524
- ccproxy/api/middleware/hooks.py +563 -0
- ccproxy/api/middleware/normalize_headers.py +59 -0
- ccproxy/api/middleware/request_id.py +35 -16
- ccproxy/api/middleware/streaming_hooks.py +292 -0
- ccproxy/api/routes/__init__.py +5 -14
- ccproxy/api/routes/health.py +39 -672
- ccproxy/api/routes/plugins.py +277 -0
- ccproxy/auth/__init__.py +2 -19
- ccproxy/auth/bearer.py +25 -15
- ccproxy/auth/dependencies.py +123 -157
- ccproxy/auth/exceptions.py +0 -12
- ccproxy/auth/manager.py +35 -49
- ccproxy/auth/managers/__init__.py +10 -0
- ccproxy/auth/managers/base.py +523 -0
- ccproxy/auth/managers/base_enhanced.py +63 -0
- ccproxy/auth/managers/token_snapshot.py +77 -0
- ccproxy/auth/models/base.py +65 -0
- ccproxy/auth/models/credentials.py +40 -0
- ccproxy/auth/oauth/__init__.py +4 -18
- ccproxy/auth/oauth/base.py +533 -0
- ccproxy/auth/oauth/cli_errors.py +37 -0
- ccproxy/auth/oauth/flows.py +430 -0
- ccproxy/auth/oauth/protocol.py +366 -0
- ccproxy/auth/oauth/registry.py +408 -0
- ccproxy/auth/oauth/router.py +396 -0
- ccproxy/auth/oauth/routes.py +186 -113
- ccproxy/auth/oauth/session.py +151 -0
- ccproxy/auth/oauth/templates.py +342 -0
- ccproxy/auth/storage/__init__.py +2 -5
- ccproxy/auth/storage/base.py +279 -5
- ccproxy/auth/storage/generic.py +134 -0
- ccproxy/cli/__init__.py +1 -2
- ccproxy/cli/_settings_help.py +351 -0
- ccproxy/cli/commands/auth.py +1519 -793
- ccproxy/cli/commands/config/commands.py +209 -276
- ccproxy/cli/commands/plugins.py +669 -0
- ccproxy/cli/commands/serve.py +75 -810
- ccproxy/cli/commands/status.py +254 -0
- ccproxy/cli/decorators.py +83 -0
- ccproxy/cli/helpers.py +22 -60
- ccproxy/cli/main.py +359 -10
- ccproxy/cli/options/claude_options.py +0 -25
- ccproxy/config/__init__.py +7 -11
- ccproxy/config/core.py +227 -0
- ccproxy/config/env_generator.py +232 -0
- ccproxy/config/runtime.py +67 -0
- ccproxy/config/security.py +36 -3
- ccproxy/config/settings.py +382 -441
- ccproxy/config/toml_generator.py +299 -0
- ccproxy/config/utils.py +452 -0
- ccproxy/core/__init__.py +7 -271
- ccproxy/{_version.py → core/_version.py} +16 -3
- ccproxy/core/async_task_manager.py +516 -0
- ccproxy/core/async_utils.py +47 -14
- ccproxy/core/auth/__init__.py +6 -0
- ccproxy/core/constants.py +16 -50
- ccproxy/core/errors.py +53 -0
- ccproxy/core/id_utils.py +20 -0
- ccproxy/core/interfaces.py +16 -123
- ccproxy/core/logging.py +473 -18
- ccproxy/core/plugins/__init__.py +77 -0
- ccproxy/core/plugins/cli_discovery.py +211 -0
- ccproxy/core/plugins/declaration.py +455 -0
- ccproxy/core/plugins/discovery.py +604 -0
- ccproxy/core/plugins/factories.py +967 -0
- ccproxy/core/plugins/hooks/__init__.py +30 -0
- ccproxy/core/plugins/hooks/base.py +58 -0
- ccproxy/core/plugins/hooks/events.py +46 -0
- ccproxy/core/plugins/hooks/implementations/__init__.py +16 -0
- ccproxy/core/plugins/hooks/implementations/formatters/__init__.py +11 -0
- ccproxy/core/plugins/hooks/implementations/formatters/json.py +552 -0
- ccproxy/core/plugins/hooks/implementations/formatters/raw.py +370 -0
- ccproxy/core/plugins/hooks/implementations/http_tracer.py +431 -0
- ccproxy/core/plugins/hooks/layers.py +44 -0
- ccproxy/core/plugins/hooks/manager.py +186 -0
- ccproxy/core/plugins/hooks/registry.py +139 -0
- ccproxy/core/plugins/hooks/thread_manager.py +203 -0
- ccproxy/core/plugins/hooks/types.py +22 -0
- ccproxy/core/plugins/interfaces.py +416 -0
- ccproxy/core/plugins/loader.py +166 -0
- ccproxy/core/plugins/middleware.py +233 -0
- ccproxy/core/plugins/models.py +59 -0
- ccproxy/core/plugins/protocol.py +180 -0
- ccproxy/core/plugins/runtime.py +519 -0
- ccproxy/{observability/context.py → core/request_context.py} +137 -94
- ccproxy/core/status_report.py +211 -0
- ccproxy/core/transformers.py +13 -8
- ccproxy/data/claude_headers_fallback.json +540 -19
- ccproxy/data/codex_headers_fallback.json +114 -7
- ccproxy/http/__init__.py +30 -0
- ccproxy/http/base.py +95 -0
- ccproxy/http/client.py +323 -0
- ccproxy/http/hooks.py +642 -0
- ccproxy/http/pool.py +279 -0
- ccproxy/llms/formatters/__init__.py +7 -0
- ccproxy/llms/formatters/anthropic_to_openai/__init__.py +55 -0
- ccproxy/llms/formatters/anthropic_to_openai/errors.py +65 -0
- ccproxy/llms/formatters/anthropic_to_openai/requests.py +356 -0
- ccproxy/llms/formatters/anthropic_to_openai/responses.py +153 -0
- ccproxy/llms/formatters/anthropic_to_openai/streams.py +1546 -0
- ccproxy/llms/formatters/base.py +140 -0
- ccproxy/llms/formatters/base_model.py +33 -0
- ccproxy/llms/formatters/common/__init__.py +51 -0
- ccproxy/llms/formatters/common/identifiers.py +48 -0
- ccproxy/llms/formatters/common/streams.py +254 -0
- ccproxy/llms/formatters/common/thinking.py +74 -0
- ccproxy/llms/formatters/common/usage.py +135 -0
- ccproxy/llms/formatters/constants.py +55 -0
- ccproxy/llms/formatters/context.py +116 -0
- ccproxy/llms/formatters/mapping.py +33 -0
- ccproxy/llms/formatters/openai_to_anthropic/__init__.py +55 -0
- ccproxy/llms/formatters/openai_to_anthropic/_helpers.py +141 -0
- ccproxy/llms/formatters/openai_to_anthropic/errors.py +53 -0
- ccproxy/llms/formatters/openai_to_anthropic/requests.py +674 -0
- ccproxy/llms/formatters/openai_to_anthropic/responses.py +285 -0
- ccproxy/llms/formatters/openai_to_anthropic/streams.py +530 -0
- ccproxy/llms/formatters/openai_to_openai/__init__.py +53 -0
- ccproxy/llms/formatters/openai_to_openai/_helpers.py +325 -0
- ccproxy/llms/formatters/openai_to_openai/errors.py +6 -0
- ccproxy/llms/formatters/openai_to_openai/requests.py +388 -0
- ccproxy/llms/formatters/openai_to_openai/responses.py +594 -0
- ccproxy/llms/formatters/openai_to_openai/streams.py +1832 -0
- ccproxy/llms/formatters/utils.py +306 -0
- ccproxy/llms/models/__init__.py +9 -0
- ccproxy/llms/models/anthropic.py +619 -0
- ccproxy/llms/models/openai.py +844 -0
- ccproxy/llms/streaming/__init__.py +26 -0
- ccproxy/llms/streaming/accumulators.py +1074 -0
- ccproxy/llms/streaming/formatters.py +251 -0
- ccproxy/{adapters/openai/streaming.py → llms/streaming/processors.py} +193 -240
- ccproxy/models/__init__.py +8 -159
- ccproxy/models/detection.py +92 -193
- ccproxy/models/provider.py +75 -0
- ccproxy/plugins/access_log/README.md +32 -0
- ccproxy/plugins/access_log/__init__.py +20 -0
- ccproxy/plugins/access_log/config.py +33 -0
- ccproxy/plugins/access_log/formatter.py +126 -0
- ccproxy/plugins/access_log/hook.py +763 -0
- ccproxy/plugins/access_log/logger.py +254 -0
- ccproxy/plugins/access_log/plugin.py +137 -0
- ccproxy/plugins/access_log/writer.py +109 -0
- ccproxy/plugins/analytics/README.md +24 -0
- ccproxy/plugins/analytics/__init__.py +1 -0
- ccproxy/plugins/analytics/config.py +5 -0
- ccproxy/plugins/analytics/ingest.py +85 -0
- ccproxy/plugins/analytics/models.py +97 -0
- ccproxy/plugins/analytics/plugin.py +121 -0
- ccproxy/plugins/analytics/routes.py +163 -0
- ccproxy/plugins/analytics/service.py +284 -0
- ccproxy/plugins/claude_api/README.md +29 -0
- ccproxy/plugins/claude_api/__init__.py +10 -0
- ccproxy/plugins/claude_api/adapter.py +829 -0
- ccproxy/plugins/claude_api/config.py +52 -0
- ccproxy/plugins/claude_api/detection_service.py +461 -0
- ccproxy/plugins/claude_api/health.py +175 -0
- ccproxy/plugins/claude_api/hooks.py +284 -0
- ccproxy/plugins/claude_api/models.py +256 -0
- ccproxy/plugins/claude_api/plugin.py +298 -0
- ccproxy/plugins/claude_api/routes.py +118 -0
- ccproxy/plugins/claude_api/streaming_metrics.py +68 -0
- ccproxy/plugins/claude_api/tasks.py +84 -0
- ccproxy/plugins/claude_sdk/README.md +35 -0
- ccproxy/plugins/claude_sdk/__init__.py +80 -0
- ccproxy/plugins/claude_sdk/adapter.py +749 -0
- ccproxy/plugins/claude_sdk/auth.py +57 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/client.py +63 -39
- ccproxy/plugins/claude_sdk/config.py +210 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/converter.py +6 -6
- ccproxy/plugins/claude_sdk/detection_service.py +163 -0
- ccproxy/{services/claude_sdk_service.py → plugins/claude_sdk/handler.py} +123 -304
- ccproxy/plugins/claude_sdk/health.py +113 -0
- ccproxy/plugins/claude_sdk/hooks.py +115 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/manager.py +42 -32
- ccproxy/{claude_sdk → plugins/claude_sdk}/message_queue.py +8 -8
- ccproxy/{models/claude_sdk.py → plugins/claude_sdk/models.py} +64 -16
- ccproxy/plugins/claude_sdk/options.py +154 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/parser.py +23 -5
- ccproxy/plugins/claude_sdk/plugin.py +269 -0
- ccproxy/plugins/claude_sdk/routes.py +104 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/session_client.py +124 -12
- ccproxy/plugins/claude_sdk/session_pool.py +700 -0
- ccproxy/{claude_sdk → plugins/claude_sdk}/stream_handle.py +48 -43
- ccproxy/{claude_sdk → plugins/claude_sdk}/stream_worker.py +22 -18
- ccproxy/{claude_sdk → plugins/claude_sdk}/streaming.py +50 -16
- ccproxy/plugins/claude_sdk/tasks.py +97 -0
- ccproxy/plugins/claude_shared/README.md +18 -0
- ccproxy/plugins/claude_shared/__init__.py +12 -0
- ccproxy/plugins/claude_shared/model_defaults.py +171 -0
- ccproxy/plugins/codex/README.md +35 -0
- ccproxy/plugins/codex/__init__.py +6 -0
- ccproxy/plugins/codex/adapter.py +635 -0
- ccproxy/{config/codex.py → plugins/codex/config.py} +78 -12
- ccproxy/plugins/codex/detection_service.py +544 -0
- ccproxy/plugins/codex/health.py +162 -0
- ccproxy/plugins/codex/hooks.py +263 -0
- ccproxy/plugins/codex/model_defaults.py +39 -0
- ccproxy/plugins/codex/models.py +263 -0
- ccproxy/plugins/codex/plugin.py +275 -0
- ccproxy/plugins/codex/routes.py +129 -0
- ccproxy/plugins/codex/streaming_metrics.py +324 -0
- ccproxy/plugins/codex/tasks.py +106 -0
- ccproxy/plugins/codex/utils/__init__.py +1 -0
- ccproxy/plugins/codex/utils/sse_parser.py +106 -0
- ccproxy/plugins/command_replay/README.md +34 -0
- ccproxy/plugins/command_replay/__init__.py +17 -0
- ccproxy/plugins/command_replay/config.py +133 -0
- ccproxy/plugins/command_replay/formatter.py +432 -0
- ccproxy/plugins/command_replay/hook.py +294 -0
- ccproxy/plugins/command_replay/plugin.py +161 -0
- ccproxy/plugins/copilot/README.md +39 -0
- ccproxy/plugins/copilot/__init__.py +11 -0
- ccproxy/plugins/copilot/adapter.py +465 -0
- ccproxy/plugins/copilot/config.py +155 -0
- ccproxy/plugins/copilot/data/copilot_fallback.json +41 -0
- ccproxy/plugins/copilot/detection_service.py +255 -0
- ccproxy/plugins/copilot/manager.py +275 -0
- ccproxy/plugins/copilot/model_defaults.py +284 -0
- ccproxy/plugins/copilot/models.py +148 -0
- ccproxy/plugins/copilot/oauth/__init__.py +16 -0
- ccproxy/plugins/copilot/oauth/client.py +494 -0
- ccproxy/plugins/copilot/oauth/models.py +385 -0
- ccproxy/plugins/copilot/oauth/provider.py +602 -0
- ccproxy/plugins/copilot/oauth/storage.py +170 -0
- ccproxy/plugins/copilot/plugin.py +360 -0
- ccproxy/plugins/copilot/routes.py +294 -0
- ccproxy/plugins/credential_balancer/README.md +124 -0
- ccproxy/plugins/credential_balancer/__init__.py +6 -0
- ccproxy/plugins/credential_balancer/config.py +270 -0
- ccproxy/plugins/credential_balancer/factory.py +415 -0
- ccproxy/plugins/credential_balancer/hook.py +51 -0
- ccproxy/plugins/credential_balancer/manager.py +587 -0
- ccproxy/plugins/credential_balancer/plugin.py +146 -0
- ccproxy/plugins/dashboard/README.md +25 -0
- ccproxy/plugins/dashboard/__init__.py +1 -0
- ccproxy/plugins/dashboard/config.py +8 -0
- ccproxy/plugins/dashboard/plugin.py +71 -0
- ccproxy/plugins/dashboard/routes.py +67 -0
- ccproxy/plugins/docker/README.md +32 -0
- ccproxy/{docker → plugins/docker}/__init__.py +3 -0
- ccproxy/{docker → plugins/docker}/adapter.py +108 -10
- ccproxy/plugins/docker/config.py +82 -0
- ccproxy/{docker → plugins/docker}/docker_path.py +4 -3
- ccproxy/{docker → plugins/docker}/middleware.py +2 -2
- ccproxy/plugins/docker/plugin.py +198 -0
- ccproxy/{docker → plugins/docker}/stream_process.py +3 -3
- ccproxy/plugins/duckdb_storage/README.md +26 -0
- ccproxy/plugins/duckdb_storage/__init__.py +1 -0
- ccproxy/plugins/duckdb_storage/config.py +22 -0
- ccproxy/plugins/duckdb_storage/plugin.py +128 -0
- ccproxy/plugins/duckdb_storage/routes.py +51 -0
- ccproxy/plugins/duckdb_storage/storage.py +633 -0
- ccproxy/plugins/max_tokens/README.md +38 -0
- ccproxy/plugins/max_tokens/__init__.py +12 -0
- ccproxy/plugins/max_tokens/adapter.py +235 -0
- ccproxy/plugins/max_tokens/config.py +86 -0
- ccproxy/plugins/max_tokens/models.py +53 -0
- ccproxy/plugins/max_tokens/plugin.py +200 -0
- ccproxy/plugins/max_tokens/service.py +271 -0
- ccproxy/plugins/max_tokens/token_limits.json +54 -0
- ccproxy/plugins/metrics/README.md +35 -0
- ccproxy/plugins/metrics/__init__.py +10 -0
- ccproxy/{observability/metrics.py → plugins/metrics/collector.py} +20 -153
- ccproxy/plugins/metrics/config.py +85 -0
- ccproxy/plugins/metrics/grafana/dashboards/ccproxy-dashboard.json +1720 -0
- ccproxy/plugins/metrics/hook.py +403 -0
- ccproxy/plugins/metrics/plugin.py +268 -0
- ccproxy/{observability → plugins/metrics}/pushgateway.py +57 -59
- ccproxy/plugins/metrics/routes.py +107 -0
- ccproxy/plugins/metrics/tasks.py +117 -0
- ccproxy/plugins/oauth_claude/README.md +35 -0
- ccproxy/plugins/oauth_claude/__init__.py +14 -0
- ccproxy/plugins/oauth_claude/client.py +270 -0
- ccproxy/plugins/oauth_claude/config.py +84 -0
- ccproxy/plugins/oauth_claude/manager.py +482 -0
- ccproxy/plugins/oauth_claude/models.py +266 -0
- ccproxy/plugins/oauth_claude/plugin.py +149 -0
- ccproxy/plugins/oauth_claude/provider.py +571 -0
- ccproxy/plugins/oauth_claude/storage.py +212 -0
- ccproxy/plugins/oauth_codex/README.md +38 -0
- ccproxy/plugins/oauth_codex/__init__.py +14 -0
- ccproxy/plugins/oauth_codex/client.py +224 -0
- ccproxy/plugins/oauth_codex/config.py +95 -0
- ccproxy/plugins/oauth_codex/manager.py +256 -0
- ccproxy/plugins/oauth_codex/models.py +239 -0
- ccproxy/plugins/oauth_codex/plugin.py +146 -0
- ccproxy/plugins/oauth_codex/provider.py +574 -0
- ccproxy/plugins/oauth_codex/storage.py +92 -0
- ccproxy/plugins/permissions/README.md +28 -0
- ccproxy/plugins/permissions/__init__.py +22 -0
- ccproxy/plugins/permissions/config.py +28 -0
- ccproxy/{cli/commands/permission_handler.py → plugins/permissions/handlers/cli.py} +49 -25
- ccproxy/plugins/permissions/handlers/protocol.py +33 -0
- ccproxy/plugins/permissions/handlers/terminal.py +675 -0
- ccproxy/{api/routes → plugins/permissions}/mcp.py +34 -7
- ccproxy/{models/permissions.py → plugins/permissions/models.py} +65 -1
- ccproxy/plugins/permissions/plugin.py +153 -0
- ccproxy/{api/routes/permissions.py → plugins/permissions/routes.py} +20 -16
- ccproxy/{api/services/permission_service.py → plugins/permissions/service.py} +65 -11
- ccproxy/{api → plugins/permissions}/ui/permission_handler_protocol.py +1 -1
- ccproxy/{api → plugins/permissions}/ui/terminal_permission_handler.py +66 -10
- ccproxy/plugins/pricing/README.md +34 -0
- ccproxy/plugins/pricing/__init__.py +6 -0
- ccproxy/{pricing → plugins/pricing}/cache.py +7 -6
- ccproxy/{config/pricing.py → plugins/pricing/config.py} +32 -6
- ccproxy/plugins/pricing/exceptions.py +35 -0
- ccproxy/plugins/pricing/loader.py +440 -0
- ccproxy/{pricing → plugins/pricing}/models.py +13 -23
- ccproxy/plugins/pricing/plugin.py +169 -0
- ccproxy/plugins/pricing/service.py +191 -0
- ccproxy/plugins/pricing/tasks.py +300 -0
- ccproxy/{pricing → plugins/pricing}/updater.py +86 -72
- ccproxy/plugins/pricing/utils.py +99 -0
- ccproxy/plugins/request_tracer/README.md +40 -0
- ccproxy/plugins/request_tracer/__init__.py +7 -0
- ccproxy/plugins/request_tracer/config.py +120 -0
- ccproxy/plugins/request_tracer/hook.py +415 -0
- ccproxy/plugins/request_tracer/plugin.py +255 -0
- ccproxy/scheduler/__init__.py +2 -14
- ccproxy/scheduler/core.py +26 -41
- ccproxy/scheduler/manager.py +61 -105
- ccproxy/scheduler/registry.py +6 -32
- ccproxy/scheduler/tasks.py +268 -276
- ccproxy/services/__init__.py +0 -1
- ccproxy/services/adapters/__init__.py +11 -0
- ccproxy/services/adapters/base.py +123 -0
- ccproxy/services/adapters/chain_composer.py +88 -0
- ccproxy/services/adapters/chain_validation.py +44 -0
- ccproxy/services/adapters/chat_accumulator.py +200 -0
- ccproxy/services/adapters/delta_utils.py +142 -0
- ccproxy/services/adapters/format_adapter.py +136 -0
- ccproxy/services/adapters/format_context.py +11 -0
- ccproxy/services/adapters/format_registry.py +158 -0
- ccproxy/services/adapters/http_adapter.py +1045 -0
- ccproxy/services/adapters/mock_adapter.py +118 -0
- ccproxy/services/adapters/protocols.py +35 -0
- ccproxy/services/adapters/simple_converters.py +571 -0
- ccproxy/services/auth_registry.py +180 -0
- ccproxy/services/cache/__init__.py +6 -0
- ccproxy/services/cache/response_cache.py +261 -0
- ccproxy/services/cli_detection.py +437 -0
- ccproxy/services/config/__init__.py +6 -0
- ccproxy/services/config/proxy_configuration.py +111 -0
- ccproxy/services/container.py +256 -0
- ccproxy/services/factories.py +380 -0
- ccproxy/services/handler_config.py +76 -0
- ccproxy/services/interfaces.py +298 -0
- ccproxy/services/mocking/__init__.py +6 -0
- ccproxy/services/mocking/mock_handler.py +291 -0
- ccproxy/services/tracing/__init__.py +7 -0
- ccproxy/services/tracing/interfaces.py +61 -0
- ccproxy/services/tracing/null_tracer.py +57 -0
- ccproxy/streaming/__init__.py +23 -0
- ccproxy/streaming/buffer.py +1056 -0
- ccproxy/streaming/deferred.py +897 -0
- ccproxy/streaming/handler.py +117 -0
- ccproxy/streaming/interfaces.py +77 -0
- ccproxy/streaming/simple_adapter.py +39 -0
- ccproxy/streaming/sse.py +109 -0
- ccproxy/streaming/sse_parser.py +127 -0
- ccproxy/templates/__init__.py +6 -0
- ccproxy/templates/plugin_scaffold.py +695 -0
- ccproxy/testing/endpoints/__init__.py +33 -0
- ccproxy/testing/endpoints/cli.py +215 -0
- ccproxy/testing/endpoints/config.py +874 -0
- ccproxy/testing/endpoints/console.py +57 -0
- ccproxy/testing/endpoints/models.py +100 -0
- ccproxy/testing/endpoints/runner.py +1903 -0
- ccproxy/testing/endpoints/tools.py +308 -0
- ccproxy/testing/mock_responses.py +70 -1
- ccproxy/testing/response_handlers.py +20 -0
- ccproxy/utils/__init__.py +0 -6
- ccproxy/utils/binary_resolver.py +476 -0
- ccproxy/utils/caching.py +327 -0
- ccproxy/utils/cli_logging.py +101 -0
- ccproxy/utils/command_line.py +251 -0
- ccproxy/utils/headers.py +228 -0
- ccproxy/utils/model_mapper.py +120 -0
- ccproxy/utils/startup_helpers.py +68 -446
- ccproxy/utils/version_checker.py +273 -6
- ccproxy_api-0.2.0.dist-info/METADATA +212 -0
- ccproxy_api-0.2.0.dist-info/RECORD +417 -0
- {ccproxy_api-0.1.7.dist-info → ccproxy_api-0.2.0.dist-info}/WHEEL +1 -1
- ccproxy_api-0.2.0.dist-info/entry_points.txt +24 -0
- ccproxy/__init__.py +0 -4
- ccproxy/adapters/__init__.py +0 -11
- ccproxy/adapters/base.py +0 -80
- ccproxy/adapters/codex/__init__.py +0 -11
- ccproxy/adapters/openai/__init__.py +0 -42
- ccproxy/adapters/openai/adapter.py +0 -953
- ccproxy/adapters/openai/models.py +0 -412
- ccproxy/adapters/openai/response_adapter.py +0 -355
- ccproxy/adapters/openai/response_models.py +0 -178
- ccproxy/api/middleware/headers.py +0 -49
- ccproxy/api/middleware/logging.py +0 -180
- ccproxy/api/middleware/request_content_logging.py +0 -297
- ccproxy/api/middleware/server_header.py +0 -58
- ccproxy/api/responses.py +0 -89
- ccproxy/api/routes/claude.py +0 -371
- ccproxy/api/routes/codex.py +0 -1251
- ccproxy/api/routes/metrics.py +0 -1029
- ccproxy/api/routes/proxy.py +0 -211
- ccproxy/api/services/__init__.py +0 -6
- ccproxy/auth/conditional.py +0 -84
- ccproxy/auth/credentials_adapter.py +0 -93
- ccproxy/auth/models.py +0 -118
- ccproxy/auth/oauth/models.py +0 -48
- ccproxy/auth/openai/__init__.py +0 -13
- ccproxy/auth/openai/credentials.py +0 -166
- ccproxy/auth/openai/oauth_client.py +0 -334
- ccproxy/auth/openai/storage.py +0 -184
- ccproxy/auth/storage/json_file.py +0 -158
- ccproxy/auth/storage/keyring.py +0 -189
- ccproxy/claude_sdk/__init__.py +0 -18
- ccproxy/claude_sdk/options.py +0 -194
- ccproxy/claude_sdk/session_pool.py +0 -550
- ccproxy/cli/docker/__init__.py +0 -34
- ccproxy/cli/docker/adapter_factory.py +0 -157
- ccproxy/cli/docker/params.py +0 -274
- ccproxy/config/auth.py +0 -153
- ccproxy/config/claude.py +0 -348
- ccproxy/config/cors.py +0 -79
- ccproxy/config/discovery.py +0 -95
- ccproxy/config/docker_settings.py +0 -264
- ccproxy/config/observability.py +0 -158
- ccproxy/config/reverse_proxy.py +0 -31
- ccproxy/config/scheduler.py +0 -108
- ccproxy/config/server.py +0 -86
- ccproxy/config/validators.py +0 -231
- ccproxy/core/codex_transformers.py +0 -389
- ccproxy/core/http.py +0 -328
- ccproxy/core/http_transformers.py +0 -812
- ccproxy/core/proxy.py +0 -143
- ccproxy/core/validators.py +0 -288
- ccproxy/models/errors.py +0 -42
- ccproxy/models/messages.py +0 -269
- ccproxy/models/requests.py +0 -107
- ccproxy/models/responses.py +0 -270
- ccproxy/models/types.py +0 -102
- ccproxy/observability/__init__.py +0 -51
- ccproxy/observability/access_logger.py +0 -457
- ccproxy/observability/sse_events.py +0 -303
- ccproxy/observability/stats_printer.py +0 -753
- ccproxy/observability/storage/__init__.py +0 -1
- ccproxy/observability/storage/duckdb_simple.py +0 -677
- ccproxy/observability/storage/models.py +0 -70
- ccproxy/observability/streaming_response.py +0 -107
- ccproxy/pricing/__init__.py +0 -19
- ccproxy/pricing/loader.py +0 -251
- ccproxy/services/claude_detection_service.py +0 -243
- ccproxy/services/codex_detection_service.py +0 -252
- ccproxy/services/credentials/__init__.py +0 -55
- ccproxy/services/credentials/config.py +0 -105
- ccproxy/services/credentials/manager.py +0 -561
- ccproxy/services/credentials/oauth_client.py +0 -481
- ccproxy/services/proxy_service.py +0 -1827
- ccproxy/static/.keep +0 -0
- ccproxy/utils/cost_calculator.py +0 -210
- ccproxy/utils/disconnection_monitor.py +0 -83
- ccproxy/utils/model_mapping.py +0 -199
- ccproxy/utils/models_provider.py +0 -150
- ccproxy/utils/simple_request_logger.py +0 -284
- ccproxy/utils/streaming_metrics.py +0 -199
- ccproxy_api-0.1.7.dist-info/METADATA +0 -615
- ccproxy_api-0.1.7.dist-info/RECORD +0 -191
- ccproxy_api-0.1.7.dist-info/entry_points.txt +0 -4
- /ccproxy/{api/middleware/auth.py → auth/models/__init__.py} +0 -0
- /ccproxy/{claude_sdk → plugins/claude_sdk}/exceptions.py +0 -0
- /ccproxy/{docker → plugins/docker}/models.py +0 -0
- /ccproxy/{docker → plugins/docker}/protocol.py +0 -0
- /ccproxy/{docker → plugins/docker}/validators.py +0 -0
- /ccproxy/{auth/oauth/storage.py → plugins/permissions/handlers/__init__.py} +0 -0
- /ccproxy/{api → plugins/permissions}/ui/__init__.py +0 -0
- {ccproxy_api-0.1.7.dist-info → ccproxy_api-0.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1074 @@
|
|
|
1
|
+
"""Stream accumulators for different LLM streaming formats.
|
|
2
|
+
|
|
3
|
+
These accumulators process streaming response chunks and rebuild complete response objects
|
|
4
|
+
with all elements like content blocks, tool calls, thinking/reasoning, etc.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
import structlog
|
|
13
|
+
from pydantic import TypeAdapter, ValidationError
|
|
14
|
+
|
|
15
|
+
from ccproxy.llms.models import openai as openai_models
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
logger = structlog.get_logger(__name__)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
_RESPONSES_STREAM_EVENT_ADAPTER = TypeAdapter(openai_models.AnyStreamEvent)
|
|
22
|
+
_RESPONSE_OBJECT_ADAPTER = TypeAdapter(openai_models.ResponseObject)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class StreamAccumulator:
|
|
26
|
+
"""Base class for accumulating streaming response chunks."""
|
|
27
|
+
|
|
28
|
+
def __init__(self) -> None:
|
|
29
|
+
self.tools: dict[str, dict[str, Any]] = {}
|
|
30
|
+
self.content_blocks: list[dict[str, Any]] = []
|
|
31
|
+
self.current_content_block: str | None = None
|
|
32
|
+
self.text_content: str = ""
|
|
33
|
+
|
|
34
|
+
def accumulate(self, event_name: str, event_data: dict[str, Any]) -> None:
|
|
35
|
+
"""Accumulate streaming events.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
event_name: Name of the event (e.g., 'content_block_start')
|
|
39
|
+
event_data: Data associated with the event
|
|
40
|
+
"""
|
|
41
|
+
raise NotImplementedError
|
|
42
|
+
|
|
43
|
+
def get_complete_tool_calls(self) -> list[dict[str, Any]]:
|
|
44
|
+
"""Get complete tool calls accumulated so far.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
List of complete tool calls
|
|
48
|
+
"""
|
|
49
|
+
raise NotImplementedError
|
|
50
|
+
|
|
51
|
+
def rebuild_response_object(self, response: dict[str, Any]) -> dict[str, Any]:
|
|
52
|
+
"""Rebuild the complete response object with accumulated content.
|
|
53
|
+
|
|
54
|
+
This method takes a response object and rebuilds it to include all accumulated
|
|
55
|
+
content like tool calls, content blocks, thinking/reasoning, etc.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
response: The original response object
|
|
59
|
+
|
|
60
|
+
Returns:
|
|
61
|
+
The updated response with all accumulated content
|
|
62
|
+
"""
|
|
63
|
+
raise NotImplementedError
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class ClaudeAccumulator(StreamAccumulator):
|
|
67
|
+
"""Accumulate Anthropic/Claude streaming events."""
|
|
68
|
+
|
|
69
|
+
def __init__(self) -> None:
|
|
70
|
+
super().__init__()
|
|
71
|
+
self._index_to_key: dict[str, str] = {}
|
|
72
|
+
self.content_blocks: list[dict[str, Any]] = []
|
|
73
|
+
self.content_block_map: dict[str, dict[str, Any]] = {} # Maps block_id to block
|
|
74
|
+
self.message_metadata: dict[str, Any] = {
|
|
75
|
+
"id": None,
|
|
76
|
+
"type": "message",
|
|
77
|
+
"role": "assistant",
|
|
78
|
+
"model": None,
|
|
79
|
+
}
|
|
80
|
+
self._usage: dict[str, int] = {}
|
|
81
|
+
self.stop_reason: str | None = None
|
|
82
|
+
|
|
83
|
+
def accumulate(self, event_name: str, event_data: dict[str, Any]) -> None:
|
|
84
|
+
"""Accumulate Claude streaming events.
|
|
85
|
+
|
|
86
|
+
Processes Claude-specific event types like:
|
|
87
|
+
- content_block_start
|
|
88
|
+
- content_block_delta
|
|
89
|
+
- content_block_stop
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
event_name: Name of the event
|
|
93
|
+
event_data: Data associated with the event
|
|
94
|
+
"""
|
|
95
|
+
if event_name == "message_start":
|
|
96
|
+
if (
|
|
97
|
+
isinstance(event_data, dict)
|
|
98
|
+
and event_data.get("type") == "message_start"
|
|
99
|
+
):
|
|
100
|
+
message = event_data.get("message", {})
|
|
101
|
+
if isinstance(message, dict):
|
|
102
|
+
self.message_metadata["id"] = (
|
|
103
|
+
message.get("id") or self.message_metadata["id"]
|
|
104
|
+
)
|
|
105
|
+
self.message_metadata["type"] = message.get("type", "message")
|
|
106
|
+
self.message_metadata["role"] = message.get("role", "assistant")
|
|
107
|
+
self.message_metadata["model"] = (
|
|
108
|
+
message.get("model") or self.message_metadata["model"]
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
usage = message.get("usage")
|
|
112
|
+
if isinstance(usage, dict):
|
|
113
|
+
self._merge_usage(usage)
|
|
114
|
+
|
|
115
|
+
elif event_name == "message_delta":
|
|
116
|
+
if (
|
|
117
|
+
isinstance(event_data, dict)
|
|
118
|
+
and event_data.get("type") == "message_delta"
|
|
119
|
+
):
|
|
120
|
+
delta = event_data.get("delta")
|
|
121
|
+
if isinstance(delta, dict):
|
|
122
|
+
stop_reason = delta.get("stop_reason")
|
|
123
|
+
if isinstance(stop_reason, str):
|
|
124
|
+
self.stop_reason = stop_reason
|
|
125
|
+
|
|
126
|
+
usage = event_data.get("usage")
|
|
127
|
+
if isinstance(usage, dict):
|
|
128
|
+
self._merge_usage(usage)
|
|
129
|
+
|
|
130
|
+
elif event_name == "message_stop":
|
|
131
|
+
if (
|
|
132
|
+
isinstance(event_data, dict)
|
|
133
|
+
and event_data.get("type") == "message_stop"
|
|
134
|
+
):
|
|
135
|
+
# No additional fields required, but keep hook for completeness.
|
|
136
|
+
pass
|
|
137
|
+
|
|
138
|
+
if event_name == "content_block_start":
|
|
139
|
+
if (
|
|
140
|
+
isinstance(event_data, dict)
|
|
141
|
+
and event_data.get("type") == "content_block_start"
|
|
142
|
+
):
|
|
143
|
+
block = event_data.get("content_block", {})
|
|
144
|
+
if not isinstance(block, dict):
|
|
145
|
+
return
|
|
146
|
+
|
|
147
|
+
index_value = str(event_data.get("index", 0))
|
|
148
|
+
block_id = block.get("id") or f"block_{index_value}_{len(self.tools)}"
|
|
149
|
+
self._index_to_key[index_value] = block_id
|
|
150
|
+
|
|
151
|
+
# Store block based on its type
|
|
152
|
+
block_type = block.get("type", "")
|
|
153
|
+
|
|
154
|
+
if block_type == "tool_use":
|
|
155
|
+
input_payload = block.get("input")
|
|
156
|
+
order = len(self.tools)
|
|
157
|
+
self.tools[block_id] = {
|
|
158
|
+
"id": block.get("id"),
|
|
159
|
+
"name": block.get("name"),
|
|
160
|
+
"input": input_payload
|
|
161
|
+
if isinstance(input_payload, dict)
|
|
162
|
+
else {},
|
|
163
|
+
"partial_json": "",
|
|
164
|
+
"index": order,
|
|
165
|
+
"order": order,
|
|
166
|
+
"type": "tool_use",
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
# Save all content blocks for rebuilding the full response
|
|
170
|
+
self.content_block_map[block_id] = {
|
|
171
|
+
"id": block.get("id", block_id),
|
|
172
|
+
"type": block_type,
|
|
173
|
+
"index": int(index_value),
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
# Add type-specific fields
|
|
177
|
+
if block_type == "text":
|
|
178
|
+
self.content_block_map[block_id]["text"] = ""
|
|
179
|
+
elif block_type == "tool_use":
|
|
180
|
+
self.content_block_map[block_id]["name"] = block.get("name")
|
|
181
|
+
self.content_block_map[block_id]["input"] = block.get("input", {})
|
|
182
|
+
elif block_type == "thinking":
|
|
183
|
+
self.content_block_map[block_id]["thinking"] = ""
|
|
184
|
+
signature = block.get("signature")
|
|
185
|
+
if isinstance(signature, str) and signature:
|
|
186
|
+
self.content_block_map[block_id]["signature"] = signature
|
|
187
|
+
|
|
188
|
+
# Set current content block for delta updates
|
|
189
|
+
self.current_content_block = (
|
|
190
|
+
str(block_id) if block_id is not None else None
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
elif event_name == "content_block_delta":
|
|
194
|
+
if (
|
|
195
|
+
isinstance(event_data, dict)
|
|
196
|
+
and event_data.get("type") == "content_block_delta"
|
|
197
|
+
):
|
|
198
|
+
index_value = str(event_data.get("index", 0))
|
|
199
|
+
block_id = self._index_to_key.get(index_value)
|
|
200
|
+
delta = event_data.get("delta", {})
|
|
201
|
+
|
|
202
|
+
if block_id and isinstance(delta, dict):
|
|
203
|
+
# For tool use blocks
|
|
204
|
+
if (
|
|
205
|
+
delta.get("type") == "input_json_delta"
|
|
206
|
+
and block_id in self.tools
|
|
207
|
+
):
|
|
208
|
+
self.tools[block_id]["partial_json"] += delta.get(
|
|
209
|
+
"partial_json", ""
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
# For text blocks
|
|
213
|
+
elif (
|
|
214
|
+
delta.get("type") in {"text_delta", "text"}
|
|
215
|
+
and block_id in self.content_block_map
|
|
216
|
+
):
|
|
217
|
+
block = self.content_block_map[block_id]
|
|
218
|
+
if block.get("type") == "text":
|
|
219
|
+
block["text"] = block.get("text", "") + delta.get(
|
|
220
|
+
"text", ""
|
|
221
|
+
)
|
|
222
|
+
self.text_content += delta.get("text", "")
|
|
223
|
+
|
|
224
|
+
# For thinking blocks
|
|
225
|
+
elif (
|
|
226
|
+
delta.get("type") in {"thinking_delta", "thinking"}
|
|
227
|
+
and block_id in self.content_block_map
|
|
228
|
+
):
|
|
229
|
+
block = self.content_block_map[block_id]
|
|
230
|
+
if block.get("type") == "thinking":
|
|
231
|
+
block["thinking"] = block.get("thinking", "") + delta.get(
|
|
232
|
+
"thinking", ""
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
elif event_name == "content_block_stop":
|
|
236
|
+
if (
|
|
237
|
+
isinstance(event_data, dict)
|
|
238
|
+
and event_data.get("type") == "content_block_stop"
|
|
239
|
+
):
|
|
240
|
+
index_value = str(event_data.get("index", 0))
|
|
241
|
+
block_id = self._index_to_key.get(index_value)
|
|
242
|
+
|
|
243
|
+
# Finalize tool use blocks by parsing JSON
|
|
244
|
+
if block_id in self.tools and self.tools[block_id]["partial_json"]:
|
|
245
|
+
try:
|
|
246
|
+
payload = self.tools[block_id]["partial_json"]
|
|
247
|
+
self.tools[block_id]["input"] = json.loads(payload)
|
|
248
|
+
|
|
249
|
+
# Also update in content block map
|
|
250
|
+
if block_id in self.content_block_map:
|
|
251
|
+
self.content_block_map[block_id]["input"] = json.loads(
|
|
252
|
+
payload
|
|
253
|
+
)
|
|
254
|
+
except json.JSONDecodeError as exc:
|
|
255
|
+
logger.warning(
|
|
256
|
+
"claude_tool_json_decode_failed",
|
|
257
|
+
error=str(exc),
|
|
258
|
+
raw=self.tools[block_id]["partial_json"],
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
# Finalize the current content block and add to ordered list
|
|
262
|
+
if block_id in self.content_block_map:
|
|
263
|
+
block = self.content_block_map[block_id]
|
|
264
|
+
if block not in self.content_blocks:
|
|
265
|
+
self.content_blocks.append(block)
|
|
266
|
+
|
|
267
|
+
def get_complete_tool_calls(self) -> list[dict[str, Any]]:
|
|
268
|
+
"""Get complete tool calls accumulated so far.
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
List of complete tool calls
|
|
272
|
+
"""
|
|
273
|
+
complete: list[dict[str, Any]] = []
|
|
274
|
+
|
|
275
|
+
for tool_data in self.tools.values():
|
|
276
|
+
if tool_data.get("input") is None:
|
|
277
|
+
continue
|
|
278
|
+
|
|
279
|
+
complete.append(
|
|
280
|
+
{
|
|
281
|
+
"id": tool_data.get("id"),
|
|
282
|
+
"type": "function",
|
|
283
|
+
"name": tool_data.get("name"),
|
|
284
|
+
"input": tool_data.get("input"),
|
|
285
|
+
"function": {
|
|
286
|
+
"name": tool_data.get("name"),
|
|
287
|
+
"arguments": json.dumps(
|
|
288
|
+
tool_data.get("input", {}), ensure_ascii=False
|
|
289
|
+
),
|
|
290
|
+
},
|
|
291
|
+
"index": tool_data.get("index"),
|
|
292
|
+
"order": tool_data.get("order"),
|
|
293
|
+
}
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
return complete
|
|
297
|
+
|
|
298
|
+
def rebuild_response_object(self, response: dict[str, Any]) -> dict[str, Any]:
|
|
299
|
+
"""Rebuild the complete Claude response with all accumulated content.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
response: Original Claude response
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
Rebuilt response with complete content
|
|
306
|
+
"""
|
|
307
|
+
content_blocks: list[dict[str, Any]] = []
|
|
308
|
+
if self.content_blocks:
|
|
309
|
+
sorted_blocks = sorted(self.content_blocks, key=lambda x: x.get("index", 0))
|
|
310
|
+
for block in sorted_blocks:
|
|
311
|
+
block_type = block.get("type")
|
|
312
|
+
if block_type == "text":
|
|
313
|
+
content_blocks.append(
|
|
314
|
+
{
|
|
315
|
+
"type": "text",
|
|
316
|
+
"text": block.get("text", ""),
|
|
317
|
+
}
|
|
318
|
+
)
|
|
319
|
+
elif block_type == "tool_use":
|
|
320
|
+
entry = {
|
|
321
|
+
"type": "tool_use",
|
|
322
|
+
"id": block.get("id"),
|
|
323
|
+
"name": block.get("name"),
|
|
324
|
+
"input": block.get("input", {}),
|
|
325
|
+
}
|
|
326
|
+
content_blocks.append(
|
|
327
|
+
{k: v for k, v in entry.items() if v not in (None, "")}
|
|
328
|
+
)
|
|
329
|
+
elif block_type == "thinking":
|
|
330
|
+
content_blocks.append(
|
|
331
|
+
{
|
|
332
|
+
"type": "thinking",
|
|
333
|
+
"thinking": block.get("thinking", ""),
|
|
334
|
+
"signature": block.get("signature", ""),
|
|
335
|
+
}
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
usage_payload = {
|
|
339
|
+
"input_tokens": int(self._usage.get("input_tokens", 0)),
|
|
340
|
+
"output_tokens": int(self._usage.get("output_tokens", 0)),
|
|
341
|
+
}
|
|
342
|
+
if "cache_read_input_tokens" in self._usage:
|
|
343
|
+
usage_payload["cache_read_input_tokens"] = int(
|
|
344
|
+
self._usage.get("cache_read_input_tokens", 0)
|
|
345
|
+
)
|
|
346
|
+
else:
|
|
347
|
+
usage_payload["cache_read_input_tokens"] = 0
|
|
348
|
+
|
|
349
|
+
rebuilt: dict[str, Any] = {
|
|
350
|
+
"id": self.message_metadata.get("id") or response.get("id"),
|
|
351
|
+
"type": self.message_metadata.get("type", "message"),
|
|
352
|
+
"role": self.message_metadata.get("role", "assistant"),
|
|
353
|
+
"content": content_blocks,
|
|
354
|
+
"model": self.message_metadata.get("model") or response.get("model"),
|
|
355
|
+
"stop_reason": self.stop_reason or response.get("stop_reason"),
|
|
356
|
+
"usage": usage_payload,
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
if self.text_content:
|
|
360
|
+
rebuilt["text"] = self.text_content
|
|
361
|
+
|
|
362
|
+
return rebuilt
|
|
363
|
+
|
|
364
|
+
def get_block_info(self, index: int) -> tuple[str, dict[str, Any]] | None:
|
|
365
|
+
"""Return (block_id, block_data) for a content block index."""
|
|
366
|
+
|
|
367
|
+
if index < 0:
|
|
368
|
+
return None
|
|
369
|
+
|
|
370
|
+
block_id = self._index_to_key.get(str(index))
|
|
371
|
+
if not block_id:
|
|
372
|
+
return None
|
|
373
|
+
|
|
374
|
+
block = self.content_block_map.get(block_id)
|
|
375
|
+
if block is None:
|
|
376
|
+
return None
|
|
377
|
+
|
|
378
|
+
return block_id, block
|
|
379
|
+
|
|
380
|
+
def get_tool_entry(
|
|
381
|
+
self,
|
|
382
|
+
identifier: int | str,
|
|
383
|
+
) -> dict[str, Any] | None:
|
|
384
|
+
"""Fetch the tool metadata tracked by the accumulator.
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
identifier: Either the integer index from the stream event or the
|
|
388
|
+
underlying block identifier tracked by the accumulator.
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
The tracked tool entry if present.
|
|
392
|
+
"""
|
|
393
|
+
|
|
394
|
+
block_id: str | None
|
|
395
|
+
if isinstance(identifier, int):
|
|
396
|
+
info = self.get_block_info(identifier)
|
|
397
|
+
block_id = info[0] if info else None
|
|
398
|
+
else:
|
|
399
|
+
block_id = identifier
|
|
400
|
+
|
|
401
|
+
if not block_id:
|
|
402
|
+
return None
|
|
403
|
+
|
|
404
|
+
return self.tools.get(block_id)
|
|
405
|
+
|
|
406
|
+
def _merge_usage(self, usage: dict[str, Any]) -> None:
|
|
407
|
+
for key, value in usage.items():
|
|
408
|
+
if isinstance(value, int | float):
|
|
409
|
+
self._usage[key] = int(value)
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
class OpenAIAccumulator(StreamAccumulator):
|
|
413
|
+
"""Accumulate tool calls emitted via OpenAI chat/completion deltas."""
|
|
414
|
+
|
|
415
|
+
def __init__(self) -> None:
|
|
416
|
+
super().__init__()
|
|
417
|
+
# Track the most recent entry key per choice index so anonymous deltas
|
|
418
|
+
# append to the correct in-flight tool call instead of creating a new slot.
|
|
419
|
+
self._index_to_key: dict[str, str] = {}
|
|
420
|
+
self.choices: dict[int, dict[str, Any]] = {}
|
|
421
|
+
self.message_content: dict[int, str] = {}
|
|
422
|
+
|
|
423
|
+
def accumulate(self, event_name: str, event_data: dict[str, Any]) -> None:
|
|
424
|
+
"""Accumulate OpenAI streaming events.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
event_name: Name of the event
|
|
428
|
+
event_data: Data associated with the event
|
|
429
|
+
"""
|
|
430
|
+
if not isinstance(event_data, dict) or "choices" not in event_data:
|
|
431
|
+
return
|
|
432
|
+
|
|
433
|
+
for choice in event_data.get("choices", []):
|
|
434
|
+
if not isinstance(choice, dict):
|
|
435
|
+
continue
|
|
436
|
+
|
|
437
|
+
# Track choice index
|
|
438
|
+
choice_index = choice.get("index", 0)
|
|
439
|
+
|
|
440
|
+
# Initialize choice if not already tracked
|
|
441
|
+
if choice_index not in self.choices:
|
|
442
|
+
self.choices[choice_index] = {
|
|
443
|
+
"index": choice_index,
|
|
444
|
+
"message": {"role": "assistant", "content": ""},
|
|
445
|
+
"finish_reason": None,
|
|
446
|
+
}
|
|
447
|
+
self.message_content[choice_index] = ""
|
|
448
|
+
|
|
449
|
+
# Update finish reason if provided
|
|
450
|
+
if "finish_reason" in choice:
|
|
451
|
+
self.choices[choice_index]["finish_reason"] = choice["finish_reason"]
|
|
452
|
+
|
|
453
|
+
# Update message content if provided
|
|
454
|
+
delta = choice.get("delta", {})
|
|
455
|
+
if not isinstance(delta, dict):
|
|
456
|
+
continue
|
|
457
|
+
|
|
458
|
+
# Update message role if provided
|
|
459
|
+
if "role" in delta:
|
|
460
|
+
self.choices[choice_index]["message"]["role"] = delta["role"]
|
|
461
|
+
|
|
462
|
+
# Update message content if provided
|
|
463
|
+
if "content" in delta and delta["content"] is not None:
|
|
464
|
+
content = delta["content"]
|
|
465
|
+
self.message_content[choice_index] += content
|
|
466
|
+
self.choices[choice_index]["message"]["content"] += content
|
|
467
|
+
self.text_content += content
|
|
468
|
+
|
|
469
|
+
# Process tool calls
|
|
470
|
+
if "tool_calls" not in delta:
|
|
471
|
+
continue
|
|
472
|
+
|
|
473
|
+
for tool_call in delta.get("tool_calls", []) or []:
|
|
474
|
+
if not isinstance(tool_call, dict):
|
|
475
|
+
continue
|
|
476
|
+
|
|
477
|
+
index = int(tool_call.get("index", 0))
|
|
478
|
+
index_key = str(index)
|
|
479
|
+
|
|
480
|
+
previous_key = self._index_to_key.get(index_key)
|
|
481
|
+
tool_id = tool_call.get("id")
|
|
482
|
+
if isinstance(tool_id, str) and tool_id:
|
|
483
|
+
key = tool_id
|
|
484
|
+
else:
|
|
485
|
+
key = previous_key or f"call_{index}"
|
|
486
|
+
|
|
487
|
+
self._index_to_key[index_key] = key
|
|
488
|
+
|
|
489
|
+
migrated_entry = None
|
|
490
|
+
if previous_key and previous_key != key:
|
|
491
|
+
migrated_entry = self.tools.pop(previous_key, None)
|
|
492
|
+
|
|
493
|
+
entry = self.tools.get(key)
|
|
494
|
+
if entry is None:
|
|
495
|
+
if migrated_entry is not None:
|
|
496
|
+
entry = migrated_entry
|
|
497
|
+
else:
|
|
498
|
+
entry = {
|
|
499
|
+
"id": None,
|
|
500
|
+
"type": None,
|
|
501
|
+
"function": {"name": None, "arguments": ""},
|
|
502
|
+
"index": index,
|
|
503
|
+
"order": len(self.tools),
|
|
504
|
+
}
|
|
505
|
+
self.tools[key] = entry
|
|
506
|
+
|
|
507
|
+
entry.setdefault("function", {"name": None, "arguments": ""})
|
|
508
|
+
entry.setdefault("order", len(self.tools))
|
|
509
|
+
entry["index"] = index
|
|
510
|
+
|
|
511
|
+
if isinstance(tool_id, str) and tool_id:
|
|
512
|
+
entry["id"] = tool_id
|
|
513
|
+
elif not entry.get("id"):
|
|
514
|
+
entry["id"] = key
|
|
515
|
+
|
|
516
|
+
if "type" in tool_call:
|
|
517
|
+
entry["type"] = tool_call["type"]
|
|
518
|
+
|
|
519
|
+
function = tool_call.get("function", {})
|
|
520
|
+
if isinstance(function, dict):
|
|
521
|
+
if "name" in function:
|
|
522
|
+
name_value = function["name"]
|
|
523
|
+
if name_value:
|
|
524
|
+
entry["function"]["name"] = name_value
|
|
525
|
+
if "arguments" in function:
|
|
526
|
+
entry["function"]["arguments"] += function["arguments"]
|
|
527
|
+
|
|
528
|
+
def get_complete_tool_calls(self) -> list[dict[str, Any]]:
|
|
529
|
+
"""Get complete tool calls accumulated so far.
|
|
530
|
+
|
|
531
|
+
Returns:
|
|
532
|
+
List of complete tool calls
|
|
533
|
+
"""
|
|
534
|
+
complete: list[dict[str, Any]] = []
|
|
535
|
+
|
|
536
|
+
for call_data in self.tools.values():
|
|
537
|
+
arguments = call_data["function"].get("arguments")
|
|
538
|
+
if not arguments:
|
|
539
|
+
continue
|
|
540
|
+
|
|
541
|
+
complete.append(
|
|
542
|
+
{
|
|
543
|
+
"id": call_data.get("id"),
|
|
544
|
+
"type": call_data.get("type"),
|
|
545
|
+
"index": call_data.get("index"),
|
|
546
|
+
"order": call_data.get("order"),
|
|
547
|
+
"function": {
|
|
548
|
+
"name": call_data["function"].get("name"),
|
|
549
|
+
"arguments": arguments,
|
|
550
|
+
},
|
|
551
|
+
}
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
return complete
|
|
555
|
+
|
|
556
|
+
def rebuild_response_object(self, response: dict[str, Any]) -> dict[str, Any]:
|
|
557
|
+
"""Rebuild the complete OpenAI response with all accumulated content.
|
|
558
|
+
|
|
559
|
+
Args:
|
|
560
|
+
response: Original OpenAI response
|
|
561
|
+
|
|
562
|
+
Returns:
|
|
563
|
+
Rebuilt response with complete content
|
|
564
|
+
"""
|
|
565
|
+
# Create a copy of the original response
|
|
566
|
+
rebuilt = dict(response)
|
|
567
|
+
|
|
568
|
+
# Rebuild choices with accumulated data
|
|
569
|
+
if self.choices:
|
|
570
|
+
# Convert choices dict to list and sort by index
|
|
571
|
+
choice_list = list(self.choices.values())
|
|
572
|
+
choice_list.sort(key=lambda x: x.get("index", 0))
|
|
573
|
+
|
|
574
|
+
# Update choices in the response
|
|
575
|
+
rebuilt["choices"] = choice_list
|
|
576
|
+
|
|
577
|
+
# Update messages with tool calls
|
|
578
|
+
tool_calls = self.get_complete_tool_calls()
|
|
579
|
+
if tool_calls:
|
|
580
|
+
# Add tool calls to each choice's message
|
|
581
|
+
for choice in rebuilt["choices"]:
|
|
582
|
+
if "message" in choice:
|
|
583
|
+
choice["message"]["tool_calls"] = tool_calls
|
|
584
|
+
|
|
585
|
+
return rebuilt
|
|
586
|
+
|
|
587
|
+
|
|
588
|
+
class ResponsesAccumulator(StreamAccumulator):
|
|
589
|
+
"""Accumulate events emitted by the OpenAI Responses API using typed models."""
|
|
590
|
+
|
|
591
|
+
def __init__(self) -> None:
|
|
592
|
+
super().__init__()
|
|
593
|
+
self._items: dict[str, openai_models.OutputItem] = {}
|
|
594
|
+
self._items_by_index: dict[int, str] = {}
|
|
595
|
+
self._text_fragments: dict[tuple[str, int], list[str]] = {}
|
|
596
|
+
self._reasoning_summary: dict[
|
|
597
|
+
str, dict[int, openai_models.ReasoningSummaryPart]
|
|
598
|
+
] = {}
|
|
599
|
+
self._reasoning_text: dict[tuple[str, int], list[str]] = {}
|
|
600
|
+
self._function_arguments: dict[str, list[str]] = {}
|
|
601
|
+
self._latest_response: openai_models.ResponseObject | None = None
|
|
602
|
+
self.completed_response: openai_models.ResponseObject | None = None
|
|
603
|
+
self._sequence_counter = 0
|
|
604
|
+
|
|
605
|
+
def accumulate(
|
|
606
|
+
self,
|
|
607
|
+
event_name: str,
|
|
608
|
+
event_data: dict[str, Any] | openai_models.BaseStreamEvent,
|
|
609
|
+
) -> None:
|
|
610
|
+
"""Accumulate Responses API streaming events."""
|
|
611
|
+
|
|
612
|
+
event = self._coerce_stream_event(event_name, event_data)
|
|
613
|
+
if event is None:
|
|
614
|
+
return
|
|
615
|
+
|
|
616
|
+
if isinstance(event, openai_models.ResponseCreatedEvent):
|
|
617
|
+
self._latest_response = event.response
|
|
618
|
+
return
|
|
619
|
+
|
|
620
|
+
if isinstance(event, openai_models.ResponseInProgressEvent):
|
|
621
|
+
self._latest_response = event.response
|
|
622
|
+
return
|
|
623
|
+
|
|
624
|
+
if isinstance(event, openai_models.ResponseCompletedEvent):
|
|
625
|
+
self.completed_response = event.response
|
|
626
|
+
return
|
|
627
|
+
|
|
628
|
+
if isinstance(event, openai_models.ResponseOutputItemAddedEvent):
|
|
629
|
+
self._record_output_item(event.output_index, event.item)
|
|
630
|
+
return
|
|
631
|
+
|
|
632
|
+
if isinstance(event, openai_models.ResponseOutputItemDoneEvent):
|
|
633
|
+
self._merge_output_item(event.output_index, event.item)
|
|
634
|
+
return
|
|
635
|
+
|
|
636
|
+
if isinstance(event, openai_models.ResponseOutputTextDeltaEvent):
|
|
637
|
+
self._accumulate_text_delta(
|
|
638
|
+
item_id=event.item_id,
|
|
639
|
+
content_index=event.content_index,
|
|
640
|
+
delta=event.delta,
|
|
641
|
+
)
|
|
642
|
+
return
|
|
643
|
+
|
|
644
|
+
if isinstance(event, openai_models.ResponseOutputTextDoneEvent):
|
|
645
|
+
self._finalize_text(
|
|
646
|
+
item_id=event.item_id,
|
|
647
|
+
content_index=event.content_index,
|
|
648
|
+
text=event.text,
|
|
649
|
+
)
|
|
650
|
+
return
|
|
651
|
+
|
|
652
|
+
if isinstance(event, openai_models.ResponseFunctionCallArgumentsDeltaEvent):
|
|
653
|
+
self._accumulate_function_arguments(event.item_id, event.delta)
|
|
654
|
+
return
|
|
655
|
+
|
|
656
|
+
if isinstance(event, openai_models.ResponseFunctionCallArgumentsDoneEvent):
|
|
657
|
+
self._finalize_function_arguments(event.item_id, event.arguments)
|
|
658
|
+
return
|
|
659
|
+
|
|
660
|
+
if isinstance(event, openai_models.ReasoningSummaryPartAddedEvent):
|
|
661
|
+
self._store_reasoning_summary_part(
|
|
662
|
+
item_id=event.item_id,
|
|
663
|
+
summary_index=event.summary_index,
|
|
664
|
+
part=event.part,
|
|
665
|
+
)
|
|
666
|
+
return
|
|
667
|
+
|
|
668
|
+
if isinstance(event, openai_models.ReasoningSummaryPartDoneEvent):
|
|
669
|
+
self._store_reasoning_summary_part(
|
|
670
|
+
item_id=event.item_id,
|
|
671
|
+
summary_index=event.summary_index,
|
|
672
|
+
part=event.part,
|
|
673
|
+
)
|
|
674
|
+
return
|
|
675
|
+
|
|
676
|
+
if isinstance(event, openai_models.ReasoningSummaryTextDeltaEvent):
|
|
677
|
+
self._accumulate_reasoning_text(
|
|
678
|
+
item_id=event.item_id,
|
|
679
|
+
summary_index=event.summary_index,
|
|
680
|
+
delta=event.delta,
|
|
681
|
+
)
|
|
682
|
+
return
|
|
683
|
+
|
|
684
|
+
if isinstance(event, openai_models.ReasoningSummaryTextDoneEvent):
|
|
685
|
+
self._finalize_reasoning_text(
|
|
686
|
+
item_id=event.item_id,
|
|
687
|
+
summary_index=event.summary_index,
|
|
688
|
+
text=event.text,
|
|
689
|
+
)
|
|
690
|
+
return
|
|
691
|
+
|
|
692
|
+
def get_complete_tool_calls(self) -> list[dict[str, Any]]:
|
|
693
|
+
"""Get complete tool calls accumulated so far."""
|
|
694
|
+
|
|
695
|
+
complete: list[dict[str, Any]] = []
|
|
696
|
+
for item in self._items.values():
|
|
697
|
+
if item.type != "function_call":
|
|
698
|
+
continue
|
|
699
|
+
arguments = self._get_function_arguments(item.id)
|
|
700
|
+
if not (item.name and arguments):
|
|
701
|
+
continue
|
|
702
|
+
if item.status and item.status != "completed":
|
|
703
|
+
continue
|
|
704
|
+
|
|
705
|
+
complete.append(
|
|
706
|
+
{
|
|
707
|
+
"id": item.id,
|
|
708
|
+
"type": "function_call",
|
|
709
|
+
"call_id": item.call_id,
|
|
710
|
+
"function": {
|
|
711
|
+
"name": item.name,
|
|
712
|
+
"arguments": arguments,
|
|
713
|
+
},
|
|
714
|
+
}
|
|
715
|
+
)
|
|
716
|
+
|
|
717
|
+
return complete
|
|
718
|
+
|
|
719
|
+
def rebuild_response_object(self, response: dict[str, Any]) -> dict[str, Any]:
|
|
720
|
+
"""Rebuild a complete Responses API payload with accumulated data."""
|
|
721
|
+
|
|
722
|
+
base_response = self.completed_response or self._latest_response
|
|
723
|
+
response_model = self._coerce_response_object(base_response or response)
|
|
724
|
+
if response_model is None:
|
|
725
|
+
response_model = openai_models.ResponseObject(
|
|
726
|
+
id=str(response.get("id", "response")),
|
|
727
|
+
created_at=int(response.get("created_at", 0)),
|
|
728
|
+
status=str(response.get("status", "completed")),
|
|
729
|
+
model=str(response.get("model", "")),
|
|
730
|
+
output=[],
|
|
731
|
+
parallel_tool_calls=bool(response.get("parallel_tool_calls", False)),
|
|
732
|
+
)
|
|
733
|
+
|
|
734
|
+
outputs = self._build_outputs()
|
|
735
|
+
if outputs:
|
|
736
|
+
response_model = response_model.model_copy(update={"output": outputs})
|
|
737
|
+
|
|
738
|
+
function_calls = self.get_complete_tool_calls()
|
|
739
|
+
reasoning_summary = self._build_reasoning_summary()
|
|
740
|
+
|
|
741
|
+
payload = response_model.model_dump()
|
|
742
|
+
|
|
743
|
+
if function_calls:
|
|
744
|
+
payload["tool_calls"] = function_calls
|
|
745
|
+
|
|
746
|
+
if not reasoning_summary:
|
|
747
|
+
fallback_summary: list[dict[str, Any]] = []
|
|
748
|
+
for output_entry in payload.get("output", []):
|
|
749
|
+
if not isinstance(output_entry, dict):
|
|
750
|
+
continue
|
|
751
|
+
if output_entry.get("type") != "reasoning":
|
|
752
|
+
continue
|
|
753
|
+
summary_list = output_entry.get("summary")
|
|
754
|
+
if isinstance(summary_list, list):
|
|
755
|
+
for part in summary_list:
|
|
756
|
+
if isinstance(part, dict):
|
|
757
|
+
fallback_summary.append(part)
|
|
758
|
+
if fallback_summary:
|
|
759
|
+
reasoning_summary = fallback_summary
|
|
760
|
+
|
|
761
|
+
if reasoning_summary:
|
|
762
|
+
reasoning_obj = payload.get("reasoning") or {}
|
|
763
|
+
reasoning_obj["summary"] = reasoning_summary
|
|
764
|
+
payload["reasoning"] = reasoning_obj
|
|
765
|
+
|
|
766
|
+
if self.text_content:
|
|
767
|
+
payload["text"] = self.text_content
|
|
768
|
+
|
|
769
|
+
return payload
|
|
770
|
+
|
|
771
|
+
def get_completed_response(self) -> dict[str, Any] | None:
|
|
772
|
+
"""Return the final response payload captured from the stream, if any."""
|
|
773
|
+
|
|
774
|
+
if isinstance(self.completed_response, openai_models.ResponseObject):
|
|
775
|
+
return self.completed_response.model_dump()
|
|
776
|
+
return None
|
|
777
|
+
|
|
778
|
+
def _coerce_stream_event(
|
|
779
|
+
self,
|
|
780
|
+
event_name: str,
|
|
781
|
+
event_data: dict[str, Any] | openai_models.BaseStreamEvent,
|
|
782
|
+
) -> openai_models.BaseStreamEvent | openai_models.ErrorEvent | None:
|
|
783
|
+
if isinstance(event_data, openai_models.BaseStreamEvent):
|
|
784
|
+
# Update sequence counter for events that have sequence_number
|
|
785
|
+
self._sequence_counter = max(
|
|
786
|
+
self._sequence_counter, event_data.sequence_number
|
|
787
|
+
)
|
|
788
|
+
return event_data
|
|
789
|
+
# Special handling for ErrorEvent which doesn't inherit from BaseStreamEvent
|
|
790
|
+
elif isinstance(event_data, openai_models.ErrorEvent):
|
|
791
|
+
return event_data
|
|
792
|
+
|
|
793
|
+
if not isinstance(event_data, dict):
|
|
794
|
+
return None
|
|
795
|
+
|
|
796
|
+
payload = dict(event_data)
|
|
797
|
+
payload.setdefault("type", event_name)
|
|
798
|
+
if "sequence_number" not in payload:
|
|
799
|
+
self._sequence_counter += 1
|
|
800
|
+
payload["sequence_number"] = self._sequence_counter
|
|
801
|
+
|
|
802
|
+
try:
|
|
803
|
+
wrapper = _RESPONSES_STREAM_EVENT_ADAPTER.validate_python(payload)
|
|
804
|
+
except ValidationError as exc:
|
|
805
|
+
logger.debug(
|
|
806
|
+
"responses_accumulator_invalid_event",
|
|
807
|
+
event_type=event_name,
|
|
808
|
+
error=str(exc),
|
|
809
|
+
)
|
|
810
|
+
return None
|
|
811
|
+
|
|
812
|
+
event = wrapper.root
|
|
813
|
+
# Only update sequence counter if the event has sequence_number
|
|
814
|
+
# ErrorEvent doesn't inherit from BaseStreamEvent and lacks this attribute
|
|
815
|
+
if hasattr(event, "sequence_number"):
|
|
816
|
+
self._sequence_counter = max(self._sequence_counter, event.sequence_number)
|
|
817
|
+
return event
|
|
818
|
+
|
|
819
|
+
def _record_output_item(
|
|
820
|
+
self, output_index: int, item: openai_models.OutputItem
|
|
821
|
+
) -> None:
|
|
822
|
+
self._items[item.id] = item
|
|
823
|
+
self._items_by_index[output_index] = item.id
|
|
824
|
+
if item.text:
|
|
825
|
+
self.text_content = item.text
|
|
826
|
+
|
|
827
|
+
def _merge_output_item(
|
|
828
|
+
self, output_index: int, item: openai_models.OutputItem
|
|
829
|
+
) -> None:
|
|
830
|
+
existing = self._items.get(item.id)
|
|
831
|
+
if existing is not None:
|
|
832
|
+
merged = existing.model_copy(update=item.model_dump(exclude_unset=True))
|
|
833
|
+
else:
|
|
834
|
+
merged = item
|
|
835
|
+
self._items[item.id] = merged
|
|
836
|
+
self._items_by_index[output_index] = item.id
|
|
837
|
+
if merged.text:
|
|
838
|
+
self.text_content = merged.text
|
|
839
|
+
|
|
840
|
+
def _accumulate_text_delta(
|
|
841
|
+
self, *, item_id: str, content_index: int, delta: str
|
|
842
|
+
) -> None:
|
|
843
|
+
key = (item_id, content_index)
|
|
844
|
+
fragments = self._text_fragments.setdefault(key, [])
|
|
845
|
+
fragments.append(delta)
|
|
846
|
+
combined = "".join(fragments)
|
|
847
|
+
self._update_output_item_text(item_id, combined)
|
|
848
|
+
|
|
849
|
+
def _finalize_text(self, *, item_id: str, content_index: int, text: str) -> None:
|
|
850
|
+
key = (item_id, content_index)
|
|
851
|
+
fragments = self._text_fragments.get(key, [])
|
|
852
|
+
final_text = text or "".join(fragments)
|
|
853
|
+
self._update_output_item_text(item_id, final_text)
|
|
854
|
+
|
|
855
|
+
def _update_output_item_text(self, item_id: str, text: str) -> None:
|
|
856
|
+
item = self._items.get(item_id)
|
|
857
|
+
if item is None:
|
|
858
|
+
return
|
|
859
|
+
updated = item.model_copy(update={"text": text})
|
|
860
|
+
self._items[item_id] = updated
|
|
861
|
+
self.text_content = text
|
|
862
|
+
|
|
863
|
+
def _accumulate_function_arguments(self, item_id: str, delta: str) -> None:
|
|
864
|
+
args = self._function_arguments.setdefault(item_id, [])
|
|
865
|
+
args.append(delta)
|
|
866
|
+
combined = "".join(args)
|
|
867
|
+
self._update_output_item_arguments(item_id, combined)
|
|
868
|
+
|
|
869
|
+
def _finalize_function_arguments(self, item_id: str, arguments: str) -> None:
|
|
870
|
+
if arguments:
|
|
871
|
+
self._function_arguments[item_id] = [arguments]
|
|
872
|
+
self._update_output_item_arguments(item_id, arguments)
|
|
873
|
+
|
|
874
|
+
def _update_output_item_arguments(self, item_id: str, arguments: str) -> None:
|
|
875
|
+
item = self._items.get(item_id)
|
|
876
|
+
if item is None:
|
|
877
|
+
return
|
|
878
|
+
updated = item.model_copy(
|
|
879
|
+
update={"arguments": arguments, "status": item.status or "completed"}
|
|
880
|
+
)
|
|
881
|
+
self._items[item_id] = updated
|
|
882
|
+
|
|
883
|
+
def _store_reasoning_summary_part(
|
|
884
|
+
self,
|
|
885
|
+
*,
|
|
886
|
+
item_id: str,
|
|
887
|
+
summary_index: int,
|
|
888
|
+
part: openai_models.ReasoningSummaryPart,
|
|
889
|
+
) -> None:
|
|
890
|
+
entry = self._reasoning_summary.setdefault(item_id, {})
|
|
891
|
+
entry[summary_index] = part
|
|
892
|
+
|
|
893
|
+
def _accumulate_reasoning_text(
|
|
894
|
+
self, *, item_id: str, summary_index: int, delta: str
|
|
895
|
+
) -> None:
|
|
896
|
+
key = (item_id, summary_index)
|
|
897
|
+
fragments = self._reasoning_text.setdefault(key, [])
|
|
898
|
+
fragments.append(delta)
|
|
899
|
+
text_value = "".join(fragments)
|
|
900
|
+
part = self._reasoning_summary.setdefault(item_id, {}).get(summary_index)
|
|
901
|
+
if part is not None:
|
|
902
|
+
self._reasoning_summary[item_id][summary_index] = part.model_copy(
|
|
903
|
+
update={"text": text_value}
|
|
904
|
+
)
|
|
905
|
+
else:
|
|
906
|
+
self._reasoning_summary.setdefault(item_id, {})[summary_index] = (
|
|
907
|
+
openai_models.ReasoningSummaryPart(type="summary_text", text=text_value)
|
|
908
|
+
)
|
|
909
|
+
|
|
910
|
+
def _finalize_reasoning_text(
|
|
911
|
+
self, *, item_id: str, summary_index: int, text: str
|
|
912
|
+
) -> None:
|
|
913
|
+
final_text = text or "".join(
|
|
914
|
+
self._reasoning_text.get((item_id, summary_index), [])
|
|
915
|
+
)
|
|
916
|
+
part = self._reasoning_summary.setdefault(item_id, {}).get(summary_index)
|
|
917
|
+
if part is not None:
|
|
918
|
+
self._reasoning_summary[item_id][summary_index] = part.model_copy(
|
|
919
|
+
update={"text": final_text}
|
|
920
|
+
)
|
|
921
|
+
else:
|
|
922
|
+
self._reasoning_summary[item_id][summary_index] = (
|
|
923
|
+
openai_models.ReasoningSummaryPart(type="summary_text", text=final_text)
|
|
924
|
+
)
|
|
925
|
+
|
|
926
|
+
def _get_function_arguments(self, item_id: str) -> str | None:
|
|
927
|
+
explicit = self._items.get(item_id)
|
|
928
|
+
if explicit and explicit.arguments:
|
|
929
|
+
return explicit.arguments
|
|
930
|
+
fragments = self._function_arguments.get(item_id)
|
|
931
|
+
if not fragments:
|
|
932
|
+
return None
|
|
933
|
+
return "".join(fragments)
|
|
934
|
+
|
|
935
|
+
def _coerce_response_object(
|
|
936
|
+
self, response: dict[str, Any] | openai_models.ResponseObject | None
|
|
937
|
+
) -> openai_models.ResponseObject | None:
|
|
938
|
+
if isinstance(response, openai_models.ResponseObject):
|
|
939
|
+
return response
|
|
940
|
+
if not isinstance(response, dict):
|
|
941
|
+
return None
|
|
942
|
+
|
|
943
|
+
payload = dict(response)
|
|
944
|
+
payload.setdefault("object", "response")
|
|
945
|
+
payload.setdefault("created_at", int(payload.get("created_at") or 0))
|
|
946
|
+
payload.setdefault("status", payload.get("status") or "completed")
|
|
947
|
+
payload.setdefault("model", payload.get("model") or "")
|
|
948
|
+
if isinstance(payload.get("output"), dict):
|
|
949
|
+
payload["output"] = [payload["output"]]
|
|
950
|
+
payload.setdefault("output", payload.get("output") or [])
|
|
951
|
+
payload.setdefault(
|
|
952
|
+
"parallel_tool_calls", payload.get("parallel_tool_calls", False)
|
|
953
|
+
)
|
|
954
|
+
|
|
955
|
+
try:
|
|
956
|
+
return _RESPONSE_OBJECT_ADAPTER.validate_python(payload)
|
|
957
|
+
except ValidationError as exc:
|
|
958
|
+
logger.debug(
|
|
959
|
+
"responses_accumulator_response_normalization_failed",
|
|
960
|
+
error=str(exc),
|
|
961
|
+
)
|
|
962
|
+
return openai_models.ResponseObject(
|
|
963
|
+
id=str(payload.get("id") or "response"),
|
|
964
|
+
created_at=int(payload.get("created_at") or 0),
|
|
965
|
+
status=str(payload.get("status") or "completed"),
|
|
966
|
+
model=str(payload.get("model") or ""),
|
|
967
|
+
output=[],
|
|
968
|
+
parallel_tool_calls=bool(payload.get("parallel_tool_calls") or False),
|
|
969
|
+
)
|
|
970
|
+
|
|
971
|
+
def _build_outputs(
|
|
972
|
+
self,
|
|
973
|
+
) -> list[
|
|
974
|
+
openai_models.MessageOutput
|
|
975
|
+
| openai_models.ReasoningOutput
|
|
976
|
+
| openai_models.FunctionCallOutput
|
|
977
|
+
| dict[str, Any]
|
|
978
|
+
]:
|
|
979
|
+
outputs: list[
|
|
980
|
+
openai_models.MessageOutput
|
|
981
|
+
| openai_models.ReasoningOutput
|
|
982
|
+
| openai_models.FunctionCallOutput
|
|
983
|
+
| dict[str, Any]
|
|
984
|
+
] = []
|
|
985
|
+
|
|
986
|
+
for index in sorted(self._items_by_index):
|
|
987
|
+
item_id = self._items_by_index[index]
|
|
988
|
+
item = self._items.get(item_id)
|
|
989
|
+
if item is None:
|
|
990
|
+
continue
|
|
991
|
+
|
|
992
|
+
if item.type == "function_call":
|
|
993
|
+
outputs.append(
|
|
994
|
+
openai_models.FunctionCallOutput(
|
|
995
|
+
type="function_call",
|
|
996
|
+
id=item.id,
|
|
997
|
+
status=item.status or "completed",
|
|
998
|
+
name=item.name,
|
|
999
|
+
call_id=item.call_id,
|
|
1000
|
+
arguments=self._get_function_arguments(item.id),
|
|
1001
|
+
)
|
|
1002
|
+
)
|
|
1003
|
+
continue
|
|
1004
|
+
|
|
1005
|
+
if item.type == "reasoning":
|
|
1006
|
+
summary_map = self._reasoning_summary.get(item.id, {})
|
|
1007
|
+
summary_entries: list[dict[str, Any]] = []
|
|
1008
|
+
for key in sorted(summary_map):
|
|
1009
|
+
summary_part = summary_map[key]
|
|
1010
|
+
summary_entries.append(summary_part.model_dump())
|
|
1011
|
+
if not summary_entries and item.summary:
|
|
1012
|
+
for part in item.summary:
|
|
1013
|
+
if hasattr(part, "model_dump"):
|
|
1014
|
+
summary_entries.append(part.model_dump())
|
|
1015
|
+
else:
|
|
1016
|
+
summary_entries.append(part)
|
|
1017
|
+
outputs.append(
|
|
1018
|
+
openai_models.ReasoningOutput(
|
|
1019
|
+
type="reasoning",
|
|
1020
|
+
id=item.id,
|
|
1021
|
+
status=item.status or "completed",
|
|
1022
|
+
summary=summary_entries or item.summary,
|
|
1023
|
+
)
|
|
1024
|
+
)
|
|
1025
|
+
continue
|
|
1026
|
+
|
|
1027
|
+
text_value = item.text or self._combined_text(item.id)
|
|
1028
|
+
content_entries: list[Any] = []
|
|
1029
|
+
if text_value:
|
|
1030
|
+
content_entries.append(
|
|
1031
|
+
openai_models.OutputTextContent(type="output_text", text=text_value)
|
|
1032
|
+
)
|
|
1033
|
+
elif item.content:
|
|
1034
|
+
content_entries.extend(item.content)
|
|
1035
|
+
|
|
1036
|
+
outputs.append(
|
|
1037
|
+
openai_models.MessageOutput(
|
|
1038
|
+
type="message",
|
|
1039
|
+
id=item.id,
|
|
1040
|
+
status=item.status or "completed",
|
|
1041
|
+
role="assistant"
|
|
1042
|
+
if item.role is None or item.role not in ("assistant", "user")
|
|
1043
|
+
else ("assistant" if item.role != "user" else "user"),
|
|
1044
|
+
content=[
|
|
1045
|
+
part.model_dump()
|
|
1046
|
+
if isinstance(part, openai_models.OutputTextContent)
|
|
1047
|
+
else part
|
|
1048
|
+
for part in content_entries
|
|
1049
|
+
],
|
|
1050
|
+
)
|
|
1051
|
+
)
|
|
1052
|
+
|
|
1053
|
+
return outputs
|
|
1054
|
+
|
|
1055
|
+
def _combined_text(self, item_id: str) -> str | None:
|
|
1056
|
+
values: list[str] = []
|
|
1057
|
+
for (candidate_id, _), fragments in self._text_fragments.items():
|
|
1058
|
+
if candidate_id == item_id:
|
|
1059
|
+
values.extend(fragments)
|
|
1060
|
+
if values:
|
|
1061
|
+
return "".join(values)
|
|
1062
|
+
return None
|
|
1063
|
+
|
|
1064
|
+
def _build_reasoning_summary(self) -> list[dict[str, Any]]:
|
|
1065
|
+
summary: list[dict[str, Any]] = []
|
|
1066
|
+
for item_id, parts in self._reasoning_summary.items():
|
|
1067
|
+
item = self._items.get(item_id)
|
|
1068
|
+
status = item.status if item else "completed"
|
|
1069
|
+
for key in sorted(parts):
|
|
1070
|
+
part = parts[key]
|
|
1071
|
+
entry = part.model_dump()
|
|
1072
|
+
entry.setdefault("status", status)
|
|
1073
|
+
summary.append(entry)
|
|
1074
|
+
return summary
|