@vybestack/llxprt-code-core 0.1.12 → 0.1.13-hotfix1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/adapters/IStreamAdapter.d.ts +18 -0
- package/dist/src/adapters/IStreamAdapter.js +7 -0
- package/dist/src/adapters/IStreamAdapter.js.map +1 -0
- package/dist/src/code_assist/oauth2.d.ts +1 -1
- package/dist/src/code_assist/oauth2.js +51 -29
- package/dist/src/code_assist/oauth2.js.map +1 -1
- package/dist/src/code_assist/oauth2.test.js +36 -7
- package/dist/src/code_assist/oauth2.test.js.map +1 -1
- package/dist/src/code_assist/server.test.js +10 -2
- package/dist/src/code_assist/server.test.js.map +1 -1
- package/dist/src/config/config.d.ts +28 -5
- package/dist/src/config/config.js +29 -4
- package/dist/src/config/config.js.map +1 -1
- package/dist/src/config/config.test.js +2 -3
- package/dist/src/config/config.test.js.map +1 -1
- package/dist/src/core/client.d.ts +4 -2
- package/dist/src/core/client.js +68 -7
- package/dist/src/core/client.js.map +1 -1
- package/dist/src/core/client.test.js +8 -0
- package/dist/src/core/client.test.js.map +1 -1
- package/dist/src/core/contentGenerator.d.ts +3 -2
- package/dist/src/core/contentGenerator.js +6 -8
- package/dist/src/core/contentGenerator.js.map +1 -1
- package/dist/src/core/contentGenerator.test.js +12 -5
- package/dist/src/core/contentGenerator.test.js.map +1 -1
- package/dist/src/core/coreToolScheduler.test.js +4 -2
- package/dist/src/core/coreToolScheduler.test.js.map +1 -1
- package/dist/src/core/geminiChat.js +50 -3
- package/dist/src/core/geminiChat.js.map +1 -1
- package/dist/src/core/modelCheck.d.ts +1 -1
- package/dist/src/core/modelCheck.js +10 -3
- package/dist/src/core/modelCheck.js.map +1 -1
- package/dist/src/core/nonInteractiveToolExecutor.test.js +3 -0
- package/dist/src/core/nonInteractiveToolExecutor.test.js.map +1 -1
- package/dist/src/core/prompts.d.ts +1 -1
- package/dist/src/core/prompts.js +14 -2
- package/dist/src/core/prompts.js.map +1 -1
- package/dist/src/core/turn.js +6 -0
- package/dist/src/core/turn.js.map +1 -1
- package/dist/src/index.d.ts +29 -1
- package/dist/src/index.js +33 -0
- package/dist/src/index.js.map +1 -1
- package/dist/src/mcp/oauth-provider.d.ts +142 -0
- package/dist/src/mcp/oauth-provider.js +446 -0
- package/dist/src/mcp/oauth-provider.js.map +1 -0
- package/dist/src/mcp/oauth-provider.test.js +520 -0
- package/dist/src/mcp/oauth-provider.test.js.map +1 -0
- package/dist/src/mcp/oauth-token-storage.d.ts +81 -0
- package/dist/src/mcp/oauth-token-storage.js +149 -0
- package/dist/src/mcp/oauth-token-storage.js.map +1 -0
- package/dist/src/mcp/oauth-token-storage.test.d.ts +6 -0
- package/dist/src/mcp/oauth-token-storage.test.js +205 -0
- package/dist/src/mcp/oauth-token-storage.test.js.map +1 -0
- package/dist/src/mcp/oauth-utils.d.ts +109 -0
- package/dist/src/mcp/oauth-utils.js +183 -0
- package/dist/src/mcp/oauth-utils.js.map +1 -0
- package/dist/src/mcp/oauth-utils.test.d.ts +6 -0
- package/dist/src/mcp/oauth-utils.test.js +144 -0
- package/dist/src/mcp/oauth-utils.test.js.map +1 -0
- package/dist/src/parsers/TextToolCallParser.d.ts +35 -0
- package/dist/src/parsers/TextToolCallParser.js +248 -0
- package/dist/src/parsers/TextToolCallParser.js.map +1 -0
- package/dist/src/parsers/TextToolCallParser.test.d.ts +1 -0
- package/dist/src/parsers/TextToolCallParser.test.js +225 -0
- package/dist/src/parsers/TextToolCallParser.test.js.map +1 -0
- package/dist/src/providers/ContentGeneratorRole.d.ts +14 -0
- package/dist/src/providers/ContentGeneratorRole.js +16 -0
- package/dist/src/providers/ContentGeneratorRole.js.map +1 -0
- package/dist/src/providers/IMessage.d.ts +38 -0
- package/dist/src/providers/IMessage.js +17 -0
- package/dist/src/providers/IMessage.js.map +1 -0
- package/dist/src/providers/IModel.d.ts +23 -0
- package/dist/src/providers/IModel.js +17 -0
- package/dist/src/providers/IModel.js.map +1 -0
- package/dist/src/providers/IProvider.d.ts +36 -0
- package/dist/src/providers/IProvider.js +17 -0
- package/dist/src/providers/IProvider.js.map +1 -0
- package/dist/src/providers/IProviderConfig.d.ts +31 -0
- package/dist/src/providers/IProviderConfig.js +7 -0
- package/dist/src/providers/IProviderConfig.js.map +1 -0
- package/dist/src/providers/IProviderManager.d.ts +53 -0
- package/dist/src/providers/IProviderManager.js +7 -0
- package/dist/src/providers/IProviderManager.js.map +1 -0
- package/dist/src/providers/ITool.d.ts +23 -0
- package/dist/src/providers/ITool.js +17 -0
- package/dist/src/providers/ITool.js.map +1 -0
- package/dist/src/providers/ProviderContentGenerator.d.ts +1 -1
- package/dist/src/providers/ProviderManager.d.ts +24 -0
- package/dist/src/providers/ProviderManager.gemini-switch.test.d.ts +6 -0
- package/dist/src/providers/ProviderManager.gemini-switch.test.js +57 -0
- package/dist/src/providers/ProviderManager.gemini-switch.test.js.map +1 -0
- package/dist/src/providers/ProviderManager.js +116 -0
- package/dist/src/providers/ProviderManager.js.map +1 -0
- package/dist/src/providers/ProviderManager.test.d.ts +6 -0
- package/dist/src/providers/ProviderManager.test.js +284 -0
- package/dist/src/providers/ProviderManager.test.js.map +1 -0
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.d.ts +2 -1
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.js +15 -2
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.js.map +1 -1
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.test.js +20 -0
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.test.js.map +1 -1
- package/dist/src/providers/anthropic/AnthropicProvider.d.ts +57 -0
- package/dist/src/providers/anthropic/AnthropicProvider.js +490 -0
- package/dist/src/providers/anthropic/AnthropicProvider.js.map +1 -0
- package/dist/src/providers/anthropic/AnthropicProvider.test.d.ts +1 -0
- package/dist/src/providers/anthropic/AnthropicProvider.test.js +486 -0
- package/dist/src/providers/anthropic/AnthropicProvider.test.js.map +1 -0
- package/dist/src/providers/errors.d.ts +13 -0
- package/dist/src/providers/errors.js +19 -0
- package/dist/src/providers/errors.js.map +1 -0
- package/dist/src/providers/gemini/GeminiProvider.d.ts +97 -0
- package/dist/src/providers/gemini/GeminiProvider.integration.test.d.ts +6 -0
- package/dist/src/providers/gemini/GeminiProvider.integration.test.js +90 -0
- package/dist/src/providers/gemini/GeminiProvider.integration.test.js.map +1 -0
- package/dist/src/providers/gemini/GeminiProvider.js +937 -0
- package/dist/src/providers/gemini/GeminiProvider.js.map +1 -0
- package/dist/src/providers/gemini/GeminiProvider.test.d.ts +6 -0
- package/dist/src/providers/gemini/GeminiProvider.test.js +136 -0
- package/dist/src/providers/gemini/GeminiProvider.test.js.map +1 -0
- package/dist/src/providers/integration/TEST_INSTRUCTIONS.md +197 -0
- package/dist/src/providers/integration/multi-provider.integration.test.d.ts +6 -0
- package/dist/src/providers/integration/multi-provider.integration.test.js +292 -0
- package/dist/src/providers/integration/multi-provider.integration.test.js.map +1 -0
- package/dist/src/providers/openai/ConversationCache.accumTokens.test.d.ts +1 -0
- package/dist/src/providers/openai/ConversationCache.accumTokens.test.js +97 -0
- package/dist/src/providers/openai/ConversationCache.accumTokens.test.js.map +1 -0
- package/dist/src/providers/openai/ConversationCache.d.ts +20 -0
- package/dist/src/providers/openai/ConversationCache.js +109 -0
- package/dist/src/providers/openai/ConversationCache.js.map +1 -0
- package/dist/src/providers/openai/ConversationCache.test.d.ts +1 -0
- package/dist/src/providers/openai/ConversationCache.test.js +113 -0
- package/dist/src/providers/openai/ConversationCache.test.js.map +1 -0
- package/dist/src/providers/openai/IChatGenerateParams.d.ts +11 -0
- package/dist/src/providers/openai/IChatGenerateParams.js +2 -0
- package/dist/src/providers/openai/IChatGenerateParams.js.map +1 -0
- package/dist/src/providers/openai/OpenAIProvider.callResponses.stateless.test.d.ts +1 -0
- package/dist/src/providers/openai/OpenAIProvider.callResponses.stateless.test.js +189 -0
- package/dist/src/providers/openai/OpenAIProvider.callResponses.stateless.test.js.map +1 -0
- package/dist/src/providers/openai/OpenAIProvider.d.ts +80 -0
- package/dist/src/providers/openai/OpenAIProvider.integration.test.d.ts +6 -0
- package/dist/src/providers/openai/OpenAIProvider.integration.test.js +125 -0
- package/dist/src/providers/openai/OpenAIProvider.integration.test.js.map +1 -0
- package/dist/src/providers/openai/OpenAIProvider.js +523 -0
- package/dist/src/providers/openai/OpenAIProvider.js.map +1 -0
- package/dist/src/providers/openai/OpenAIProvider.responses.test.d.ts +1 -0
- package/dist/src/providers/openai/OpenAIProvider.responses.test.js +326 -0
- package/dist/src/providers/openai/OpenAIProvider.responses.test.js.map +1 -0
- package/dist/src/providers/openai/OpenAIProvider.responsesIntegration.test.d.ts +1 -0
- package/dist/src/providers/openai/OpenAIProvider.responsesIntegration.test.js +213 -0
- package/dist/src/providers/openai/OpenAIProvider.responsesIntegration.test.js.map +1 -0
- package/dist/src/providers/openai/OpenAIProvider.shouldUseResponses.test.d.ts +1 -0
- package/dist/src/providers/openai/OpenAIProvider.shouldUseResponses.test.js +58 -0
- package/dist/src/providers/openai/OpenAIProvider.shouldUseResponses.test.js.map +1 -0
- package/dist/src/providers/openai/OpenAIProvider.stateful.integration.test.d.ts +6 -0
- package/dist/src/providers/openai/OpenAIProvider.stateful.integration.test.js +105 -0
- package/dist/src/providers/openai/OpenAIProvider.stateful.integration.test.js.map +1 -0
- package/dist/src/providers/openai/OpenAIProvider.switch.test.d.ts +1 -0
- package/dist/src/providers/openai/OpenAIProvider.switch.test.js +256 -0
- package/dist/src/providers/openai/OpenAIProvider.switch.test.js.map +1 -0
- package/dist/src/providers/openai/OpenAIProvider.test.d.ts +16 -0
- package/dist/src/providers/openai/OpenAIProvider.test.js +214 -0
- package/dist/src/providers/openai/OpenAIProvider.test.js.map +1 -0
- package/dist/src/providers/openai/RESPONSES_API_MODELS.d.ts +2 -0
- package/dist/src/providers/openai/RESPONSES_API_MODELS.js +14 -0
- package/dist/src/providers/openai/RESPONSES_API_MODELS.js.map +1 -0
- package/dist/src/providers/openai/ResponsesContextTrim.integration.test.d.ts +1 -0
- package/dist/src/providers/openai/ResponsesContextTrim.integration.test.js +210 -0
- package/dist/src/providers/openai/ResponsesContextTrim.integration.test.js.map +1 -0
- package/dist/src/providers/openai/__tests__/formatArrayResponse.test.d.ts +1 -0
- package/dist/src/providers/openai/__tests__/formatArrayResponse.test.js +65 -0
- package/dist/src/providers/openai/__tests__/formatArrayResponse.test.js.map +1 -0
- package/dist/src/providers/openai/buildResponsesRequest.d.ts +73 -0
- package/dist/src/providers/openai/buildResponsesRequest.js +165 -0
- package/dist/src/providers/openai/buildResponsesRequest.js.map +1 -0
- package/dist/src/providers/openai/buildResponsesRequest.stripToolCalls.test.d.ts +1 -0
- package/dist/src/providers/openai/buildResponsesRequest.stripToolCalls.test.js +129 -0
- package/dist/src/providers/openai/buildResponsesRequest.stripToolCalls.test.js.map +1 -0
- package/dist/src/providers/openai/buildResponsesRequest.test.d.ts +1 -0
- package/dist/src/providers/openai/buildResponsesRequest.test.js +406 -0
- package/dist/src/providers/openai/buildResponsesRequest.test.js.map +1 -0
- package/dist/src/providers/openai/buildResponsesRequest.undefined.test.d.ts +1 -0
- package/dist/src/providers/openai/buildResponsesRequest.undefined.test.js +50 -0
- package/dist/src/providers/openai/buildResponsesRequest.undefined.test.js.map +1 -0
- package/dist/src/providers/openai/docs/accessing-provider-info.md +172 -0
- package/dist/src/providers/openai/docs/params-mapping.md +91 -0
- package/dist/src/providers/openai/docs/responses-api-tool-calls.md +96 -0
- package/dist/src/providers/openai/estimateRemoteTokens.d.ts +26 -0
- package/dist/src/providers/openai/estimateRemoteTokens.js +75 -0
- package/dist/src/providers/openai/estimateRemoteTokens.js.map +1 -0
- package/dist/src/providers/openai/estimateRemoteTokens.test.d.ts +1 -0
- package/dist/src/providers/openai/estimateRemoteTokens.test.js +125 -0
- package/dist/src/providers/openai/estimateRemoteTokens.test.js.map +1 -0
- package/dist/src/providers/openai/getOpenAIProviderInfo.d.ts +46 -0
- package/dist/src/providers/openai/getOpenAIProviderInfo.js +75 -0
- package/dist/src/providers/openai/getOpenAIProviderInfo.js.map +1 -0
- package/dist/src/providers/openai/parseResponsesStream.d.ts +3 -0
- package/dist/src/providers/openai/parseResponsesStream.js +462 -0
- package/dist/src/providers/openai/parseResponsesStream.js.map +1 -0
- package/dist/src/providers/openai/parseResponsesStream.responsesToolCalls.test.d.ts +1 -0
- package/dist/src/providers/openai/parseResponsesStream.responsesToolCalls.test.js +192 -0
- package/dist/src/providers/openai/parseResponsesStream.responsesToolCalls.test.js.map +1 -0
- package/dist/src/providers/openai/parseResponsesStream.test.d.ts +1 -0
- package/dist/src/providers/openai/parseResponsesStream.test.js +151 -0
- package/dist/src/providers/openai/parseResponsesStream.test.js.map +1 -0
- package/dist/src/providers/tokenizers/AnthropicTokenizer.d.ts +19 -0
- package/dist/src/providers/tokenizers/AnthropicTokenizer.js +37 -0
- package/dist/src/providers/tokenizers/AnthropicTokenizer.js.map +1 -0
- package/dist/src/providers/tokenizers/ITokenizer.d.ts +18 -0
- package/dist/src/providers/tokenizers/ITokenizer.js +17 -0
- package/dist/src/providers/tokenizers/ITokenizer.js.map +1 -0
- package/dist/src/providers/tokenizers/OpenAITokenizer.d.ts +24 -0
- package/dist/src/providers/tokenizers/OpenAITokenizer.js +56 -0
- package/dist/src/providers/tokenizers/OpenAITokenizer.js.map +1 -0
- package/dist/src/providers/types/IProviderConfig.d.ts +102 -0
- package/dist/src/providers/types/IProviderConfig.js +17 -0
- package/dist/src/providers/types/IProviderConfig.js.map +1 -0
- package/dist/src/providers/types.d.ts +4 -69
- package/dist/src/services/ideContext.d.ts +2 -0
- package/dist/src/services/ideContext.js +8 -0
- package/dist/src/services/ideContext.js.map +1 -1
- package/dist/src/services/ideContext.test.js +10 -0
- package/dist/src/services/ideContext.test.js.map +1 -1
- package/dist/src/services/loopDetectionService.d.ts +17 -1
- package/dist/src/services/loopDetectionService.js +117 -2
- package/dist/src/services/loopDetectionService.js.map +1 -1
- package/dist/src/services/loopDetectionService.test.js +109 -2
- package/dist/src/services/loopDetectionService.test.js.map +1 -1
- package/dist/src/telemetry/clearcut-logger/clearcut-logger.d.ts +2 -0
- package/dist/src/telemetry/clearcut-logger/clearcut-logger.js +40 -2
- package/dist/src/telemetry/clearcut-logger/clearcut-logger.js.map +1 -1
- package/dist/src/telemetry/clearcut-logger/event-metadata-key.d.ts +2 -0
- package/dist/src/telemetry/clearcut-logger/event-metadata-key.js +4 -0
- package/dist/src/telemetry/clearcut-logger/event-metadata-key.js.map +1 -1
- package/dist/src/telemetry/sdk.js +0 -2
- package/dist/src/telemetry/sdk.js.map +1 -1
- package/dist/src/telemetry/types.d.ts +2 -1
- package/dist/src/telemetry/types.js +1 -0
- package/dist/src/telemetry/types.js.map +1 -1
- package/dist/src/telemetry/uiTelemetry.d.ts +1 -0
- package/dist/src/telemetry/uiTelemetry.js +7 -0
- package/dist/src/telemetry/uiTelemetry.js.map +1 -1
- package/dist/src/telemetry/uiTelemetry.test.js +92 -0
- package/dist/src/telemetry/uiTelemetry.test.js.map +1 -1
- package/dist/src/tools/IToolFormatter.d.ts +40 -0
- package/dist/src/tools/IToolFormatter.js +17 -0
- package/dist/src/tools/IToolFormatter.js.map +1 -0
- package/dist/src/tools/ToolFormatter.d.ts +45 -0
- package/dist/src/tools/ToolFormatter.js +216 -0
- package/dist/src/tools/ToolFormatter.js.map +1 -0
- package/dist/src/tools/ToolFormatter.test.d.ts +16 -0
- package/dist/src/tools/ToolFormatter.test.js +349 -0
- package/dist/src/tools/ToolFormatter.test.js.map +1 -0
- package/dist/src/tools/ToolFormatter.toResponsesTool.test.d.ts +1 -0
- package/dist/src/tools/ToolFormatter.toResponsesTool.test.js +241 -0
- package/dist/src/tools/ToolFormatter.toResponsesTool.test.js.map +1 -0
- package/dist/src/tools/edit.d.ts +7 -1
- package/dist/src/tools/edit.js +19 -7
- package/dist/src/tools/edit.js.map +1 -1
- package/dist/src/tools/glob.js +2 -2
- package/dist/src/tools/glob.js.map +1 -1
- package/dist/src/tools/grep.js +2 -2
- package/dist/src/tools/grep.js.map +1 -1
- package/dist/src/tools/ls.js +2 -2
- package/dist/src/tools/ls.js.map +1 -1
- package/dist/src/tools/mcp-client.d.ts +0 -2
- package/dist/src/tools/mcp-client.js +8 -20
- package/dist/src/tools/mcp-client.js.map +1 -1
- package/dist/src/tools/mcp-client.test.js +1 -72
- package/dist/src/tools/mcp-client.test.js.map +1 -1
- package/dist/src/tools/mcp-tool.d.ts +11 -5
- package/dist/src/tools/mcp-tool.js +33 -9
- package/dist/src/tools/mcp-tool.js.map +1 -1
- package/dist/src/tools/mcp-tool.test.js +40 -24
- package/dist/src/tools/mcp-tool.test.js.map +1 -1
- package/dist/src/tools/memoryTool.js +2 -2
- package/dist/src/tools/memoryTool.js.map +1 -1
- package/dist/src/tools/read-file.d.ts +2 -1
- package/dist/src/tools/read-file.js +5 -2
- package/dist/src/tools/read-file.js.map +1 -1
- package/dist/src/tools/read-many-files.js +2 -2
- package/dist/src/tools/read-many-files.js.map +1 -1
- package/dist/src/tools/shell.js +2 -2
- package/dist/src/tools/shell.js.map +1 -1
- package/dist/src/tools/todo-read.js +2 -2
- package/dist/src/tools/todo-read.js.map +1 -1
- package/dist/src/tools/todo-write.js +2 -2
- package/dist/src/tools/todo-write.js.map +1 -1
- package/dist/src/tools/tool-registry.d.ts +0 -1
- package/dist/src/tools/tool-registry.js +11 -8
- package/dist/src/tools/tool-registry.js.map +1 -1
- package/dist/src/tools/tool-registry.test.js +36 -10
- package/dist/src/tools/tool-registry.test.js.map +1 -1
- package/dist/src/tools/tools.d.ts +37 -2
- package/dist/src/tools/tools.js +25 -2
- package/dist/src/tools/tools.js.map +1 -1
- package/dist/src/tools/web-fetch.integration.test.d.ts +6 -0
- package/dist/src/tools/web-fetch.integration.test.js +532 -0
- package/dist/src/tools/web-fetch.integration.test.js.map +1 -0
- package/dist/src/tools/web-fetch.js +57 -50
- package/dist/src/tools/web-fetch.js.map +1 -1
- package/dist/src/tools/web-search.js +34 -6
- package/dist/src/tools/web-search.js.map +1 -1
- package/dist/src/tools/web-search.test.d.ts +6 -0
- package/dist/src/tools/web-search.test.js +229 -0
- package/dist/src/tools/web-search.test.js.map +1 -0
- package/dist/src/tools/write-file.js +12 -5
- package/dist/src/tools/write-file.js.map +1 -1
- package/dist/src/utils/browser.d.ts +13 -0
- package/dist/src/utils/browser.js +49 -0
- package/dist/src/utils/browser.js.map +1 -0
- package/dist/src/utils/errors.js +4 -4
- package/dist/src/utils/errors.js.map +1 -1
- package/dist/src/utils/memoryDiscovery.js +5 -1
- package/dist/src/utils/memoryDiscovery.js.map +1 -1
- package/dist/src/utils/quotaErrorDetection.js +0 -2
- package/dist/src/utils/quotaErrorDetection.js.map +1 -1
- package/dist/src/utils/retry.d.ts +6 -0
- package/dist/src/utils/retry.js +1 -1
- package/dist/src/utils/retry.js.map +1 -1
- package/dist/src/utils/user_account.js +6 -1
- package/dist/src/utils/user_account.js.map +1 -1
- package/dist/tsconfig.tsbuildinfo +1 -1
- package/package.json +6 -3
- package/dist/src/tools/web-fetch.test.js +0 -70
- package/dist/src/tools/web-fetch.test.js.map +0 -1
- package/dist/vybestack-llxprt-code-core-0.1.12.tgz +0 -0
- /package/dist/src/{tools/web-fetch.test.d.ts → mcp/oauth-provider.test.d.ts} +0 -0
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
# Accessing OpenAI Provider Information
|
|
2
|
+
|
|
3
|
+
This document explains how to access the OpenAI provider instance, conversation cache, and remote token information from within the React components.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
The OpenAI provider information can be accessed through the Config object that's passed to the App component. We've created utilities to make this access type-safe and convenient.
|
|
8
|
+
|
|
9
|
+
## Key Components
|
|
10
|
+
|
|
11
|
+
### 1. `getOpenAIProviderInfo()` Function
|
|
12
|
+
|
|
13
|
+
Located in `src/providers/openai/getOpenAIProviderInfo.ts`, this function extracts OpenAI provider information from the Config object:
|
|
14
|
+
|
|
15
|
+
```typescript
|
|
16
|
+
import { getOpenAIProviderInfo } from '../../providers/openai/getOpenAIProviderInfo.js';
|
|
17
|
+
|
|
18
|
+
// In your component
|
|
19
|
+
const openAIInfo = getOpenAIProviderInfo(config);
|
|
20
|
+
|
|
21
|
+
if (openAIInfo.provider) {
|
|
22
|
+
console.log('OpenAI provider is active');
|
|
23
|
+
console.log('Current model:', openAIInfo.currentModel);
|
|
24
|
+
console.log('Using Responses API:', openAIInfo.isResponsesAPI);
|
|
25
|
+
}
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
### 2. `useOpenAIProviderInfo()` Hook
|
|
29
|
+
|
|
30
|
+
Located in `src/ui/hooks/useOpenAIProviderInfo.ts`, this React hook provides reactive access to provider information:
|
|
31
|
+
|
|
32
|
+
```typescript
|
|
33
|
+
import { useOpenAIProviderInfo } from './hooks/useOpenAIProviderInfo.js';
|
|
34
|
+
|
|
35
|
+
function MyComponent({ config }) {
|
|
36
|
+
const openAIInfo = useOpenAIProviderInfo(config);
|
|
37
|
+
|
|
38
|
+
// Access conversation cache
|
|
39
|
+
const cachedMessages = openAIInfo.getCachedConversation(
|
|
40
|
+
conversationId,
|
|
41
|
+
parentId,
|
|
42
|
+
);
|
|
43
|
+
|
|
44
|
+
// Check if using Responses API
|
|
45
|
+
if (openAIInfo.isResponsesAPI) {
|
|
46
|
+
// Handle Responses API specific logic
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
### 3. `OpenAIProviderContext`
|
|
52
|
+
|
|
53
|
+
Located in `src/ui/contexts/OpenAIProviderContext.tsx`, this context provides global access to OpenAI provider state including remote token tracking:
|
|
54
|
+
|
|
55
|
+
```typescript
|
|
56
|
+
// Wrap your app
|
|
57
|
+
<OpenAIProviderContextProvider config={config}>
|
|
58
|
+
<App {...props} />
|
|
59
|
+
</OpenAIProviderContextProvider>
|
|
60
|
+
|
|
61
|
+
// Use in components
|
|
62
|
+
import { useOpenAIProviderContext } from '../contexts/OpenAIProviderContext.js';
|
|
63
|
+
|
|
64
|
+
function TokenDisplay() {
|
|
65
|
+
const { remoteTokenStats, isResponsesAPI } = useOpenAIProviderContext();
|
|
66
|
+
|
|
67
|
+
if (isResponsesAPI && remoteTokenStats.lastUpdated) {
|
|
68
|
+
return (
|
|
69
|
+
<div>
|
|
70
|
+
Remote Tokens: {remoteTokenStats.totalTokenCount}
|
|
71
|
+
</div>
|
|
72
|
+
);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
return null;
|
|
76
|
+
}
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
## Integration Example
|
|
80
|
+
|
|
81
|
+
Here's how to integrate OpenAI provider information access in the App component:
|
|
82
|
+
|
|
83
|
+
```typescript
|
|
84
|
+
// In App.tsx
|
|
85
|
+
import { useOpenAIProviderInfo } from './hooks/useOpenAIProviderInfo.js';
|
|
86
|
+
|
|
87
|
+
const App = ({ config, settings, startupWarnings = [] }: AppProps) => {
|
|
88
|
+
// Access OpenAI provider info
|
|
89
|
+
const openAIInfo = useOpenAIProviderInfo(config);
|
|
90
|
+
|
|
91
|
+
// Use in your component logic
|
|
92
|
+
useEffect(() => {
|
|
93
|
+
if (openAIInfo.isOpenAIActive && openAIInfo.isResponsesAPI) {
|
|
94
|
+
console.log(
|
|
95
|
+
'OpenAI Responses API is active for model:',
|
|
96
|
+
openAIInfo.currentModel,
|
|
97
|
+
);
|
|
98
|
+
}
|
|
99
|
+
}, [openAIInfo]);
|
|
100
|
+
|
|
101
|
+
// Access conversation cache when needed
|
|
102
|
+
const handleConversationLookup = (
|
|
103
|
+
conversationId: string,
|
|
104
|
+
parentId: string,
|
|
105
|
+
) => {
|
|
106
|
+
const cached = openAIInfo.getCachedConversation(conversationId, parentId);
|
|
107
|
+
if (cached) {
|
|
108
|
+
console.log('Found cached conversation:', cached);
|
|
109
|
+
}
|
|
110
|
+
};
|
|
111
|
+
|
|
112
|
+
// ... rest of component
|
|
113
|
+
};
|
|
114
|
+
```
|
|
115
|
+
|
|
116
|
+
## Tracking Remote Tokens
|
|
117
|
+
|
|
118
|
+
To track remote tokens from the Responses API, you need to update the token stats when receiving responses:
|
|
119
|
+
|
|
120
|
+
```typescript
|
|
121
|
+
// In your API response handler
|
|
122
|
+
const handleResponsesAPIResponse = (response: any) => {
|
|
123
|
+
// Extract token information from response
|
|
124
|
+
const usage = response.usage;
|
|
125
|
+
|
|
126
|
+
if (usage) {
|
|
127
|
+
// Update remote token stats in context
|
|
128
|
+
updateRemoteTokenStats({
|
|
129
|
+
promptTokenCount: usage.prompt_tokens,
|
|
130
|
+
candidatesTokenCount: usage.completion_tokens,
|
|
131
|
+
totalTokenCount: usage.total_tokens,
|
|
132
|
+
});
|
|
133
|
+
}
|
|
134
|
+
};
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
## Type Safety
|
|
138
|
+
|
|
139
|
+
All interfaces are properly typed:
|
|
140
|
+
|
|
141
|
+
```typescript
|
|
142
|
+
interface OpenAIProviderInfo {
|
|
143
|
+
provider: OpenAIProvider | null;
|
|
144
|
+
conversationCache: ConversationCache | null;
|
|
145
|
+
isResponsesAPI: boolean;
|
|
146
|
+
currentModel: string | null;
|
|
147
|
+
remoteTokenInfo: {
|
|
148
|
+
promptTokenCount?: number;
|
|
149
|
+
candidatesTokenCount?: number;
|
|
150
|
+
totalTokenCount?: number;
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
## Best Practices
|
|
156
|
+
|
|
157
|
+
1. **Check for null values**: Always check if the provider is active before accessing its properties
|
|
158
|
+
2. **Use the hook in components**: Prefer `useOpenAIProviderInfo` hook over direct function calls for reactive updates
|
|
159
|
+
3. **Context for global state**: Use `OpenAIProviderContext` when you need to share provider state across multiple components
|
|
160
|
+
4. **Handle provider switches**: The utilities automatically handle when users switch between providers
|
|
161
|
+
|
|
162
|
+
## Accessing Internal Properties
|
|
163
|
+
|
|
164
|
+
Since some properties of OpenAIProvider are private, we use type casting to access them:
|
|
165
|
+
|
|
166
|
+
```typescript
|
|
167
|
+
// Access private properties (use with caution)
|
|
168
|
+
const conversationCache = (openaiProvider as any).conversationCache;
|
|
169
|
+
const shouldUseResponses = (openaiProvider as any).shouldUseResponses;
|
|
170
|
+
```
|
|
171
|
+
|
|
172
|
+
This approach provides type safety where possible while still allowing access to necessary internal state.
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# OpenAI Responses API Parameter Mapping
|
|
2
|
+
|
|
3
|
+
This document describes how parameters are mapped from our internal `ResponsesRequestParams` to the OpenAI Responses API format.
|
|
4
|
+
|
|
5
|
+
## Field Mapping Table
|
|
6
|
+
|
|
7
|
+
| Internal Field | Responses API Field | Type | Notes |
|
|
8
|
+
| ------------------- | ------------------- | ------------------------ | -------------------------------------------------------------- |
|
|
9
|
+
| `messages` | `messages` | `IMessage[]` | Direct mapping. Cannot be used with `prompt`. |
|
|
10
|
+
| `prompt` | `prompt` | `string` | Shortcut for simple queries. Cannot be used with `messages`. |
|
|
11
|
+
| `tools` | `tools` | `ITool[]` | Maximum 16 tools allowed. Total JSON size must be <32KB. |
|
|
12
|
+
| `stream` | `stream` | `boolean` | Enables streaming responses. |
|
|
13
|
+
| `conversationId` | `conversation_id` | `string` | For stateful conversations. Triggers message trimming warning. |
|
|
14
|
+
| `parentId` | `parent_id` | `string` | Parent message ID for conversation threading. |
|
|
15
|
+
| `tool_choice` | `tool_choice` | `string \| object` | Tool selection strategy. |
|
|
16
|
+
| `stateful` | `stateful` | `boolean` | Enables stateful conversation mode. |
|
|
17
|
+
| `model` | `model` | `string` | **Required**. Model identifier. |
|
|
18
|
+
| `temperature` | `temperature` | `number` | Sampling temperature (0-2). |
|
|
19
|
+
| `max_tokens` | `max_tokens` | `number` | Maximum tokens to generate. |
|
|
20
|
+
| `top_p` | `top_p` | `number` | Nucleus sampling parameter. |
|
|
21
|
+
| `frequency_penalty` | `frequency_penalty` | `number` | Frequency penalty (-2 to 2). |
|
|
22
|
+
| `presence_penalty` | `presence_penalty` | `number` | Presence penalty (-2 to 2). |
|
|
23
|
+
| `stop` | `stop` | `string \| string[]` | Stop sequences. |
|
|
24
|
+
| `n` | `n` | `number` | Number of completions to generate. |
|
|
25
|
+
| `logprobs` | `logprobs` | `boolean` | Include log probabilities. |
|
|
26
|
+
| `top_logprobs` | `top_logprobs` | `number` | Number of top log probabilities to return. |
|
|
27
|
+
| `response_format` | `response_format` | `object` | Response format specification. |
|
|
28
|
+
| `seed` | `seed` | `number` | Random seed for deterministic output. |
|
|
29
|
+
| `logit_bias` | `logit_bias` | `Record<string, number>` | Token bias adjustments. |
|
|
30
|
+
| `user` | `user` | `string` | End-user identifier. |
|
|
31
|
+
|
|
32
|
+
## Validation Rules
|
|
33
|
+
|
|
34
|
+
### 1. Message/Prompt Exclusivity
|
|
35
|
+
|
|
36
|
+
- Either `messages` or `prompt` must be provided, but not both
|
|
37
|
+
- Throws error if both are specified
|
|
38
|
+
- Throws error if neither is specified
|
|
39
|
+
|
|
40
|
+
### 2. Tool Constraints
|
|
41
|
+
|
|
42
|
+
- Maximum 16 tools allowed
|
|
43
|
+
- Total JSON size of tools must be less than 32KB
|
|
44
|
+
- Throws error if limits are exceeded
|
|
45
|
+
|
|
46
|
+
### 3. Stateful Mode Warnings
|
|
47
|
+
|
|
48
|
+
- When `conversationId` is provided with `messages`, a warning is logged
|
|
49
|
+
- Future implementations may trim messages to maintain context window
|
|
50
|
+
|
|
51
|
+
### 4. Required Fields
|
|
52
|
+
|
|
53
|
+
- `model` is always required for the Responses API
|
|
54
|
+
|
|
55
|
+
## Usage Examples
|
|
56
|
+
|
|
57
|
+
### Simple Prompt Request
|
|
58
|
+
|
|
59
|
+
```typescript
|
|
60
|
+
const request = buildResponsesRequest({
|
|
61
|
+
model: 'gpt-4o',
|
|
62
|
+
prompt: 'Hello, how are you?',
|
|
63
|
+
stream: true,
|
|
64
|
+
});
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
### Conversation with Tools
|
|
68
|
+
|
|
69
|
+
```typescript
|
|
70
|
+
const request = buildResponsesRequest({
|
|
71
|
+
model: 'gpt-4o',
|
|
72
|
+
messages: [{ role: 'user', content: 'What is the weather?' }],
|
|
73
|
+
tools: [weatherTool],
|
|
74
|
+
tool_choice: 'auto',
|
|
75
|
+
temperature: 0.7,
|
|
76
|
+
});
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### Stateful Conversation
|
|
80
|
+
|
|
81
|
+
```typescript
|
|
82
|
+
const request = buildResponsesRequest({
|
|
83
|
+
model: 'gpt-4o',
|
|
84
|
+
messages: [
|
|
85
|
+
/* conversation history */
|
|
86
|
+
],
|
|
87
|
+
conversationId: 'conv-123',
|
|
88
|
+
parentId: 'msg-456',
|
|
89
|
+
stateful: true,
|
|
90
|
+
});
|
|
91
|
+
```
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# OpenAI Responses API Tool Call Support
|
|
2
|
+
|
|
3
|
+
This document describes how tool calls are handled when using the OpenAI Responses API (e.g., with o3 models).
|
|
4
|
+
|
|
5
|
+
## Event Flow
|
|
6
|
+
|
|
7
|
+
The Responses API uses a different event structure than the Chat Completions API:
|
|
8
|
+
|
|
9
|
+
1. **`response.output_item.added`** - Signals the start of a new function call
|
|
10
|
+
- Contains `item.id`, `item.call_id`, `item.name`
|
|
11
|
+
- `item.type` must be `"function_call"`
|
|
12
|
+
|
|
13
|
+
2. **`response.function_call_arguments.delta`** - Streams function arguments
|
|
14
|
+
- Contains `item_id` to match the function call
|
|
15
|
+
- Contains `delta` with partial argument JSON
|
|
16
|
+
|
|
17
|
+
3. **`response.output_item.done`** - Signals function call completion
|
|
18
|
+
- Contains the complete `item` with final `arguments`
|
|
19
|
+
- Parser yields the complete tool call at this point
|
|
20
|
+
|
|
21
|
+
## Implementation Details
|
|
22
|
+
|
|
23
|
+
### State Management
|
|
24
|
+
|
|
25
|
+
The parser maintains a `Map` of in-progress function calls:
|
|
26
|
+
|
|
27
|
+
```typescript
|
|
28
|
+
const functionCalls = new Map<
|
|
29
|
+
string,
|
|
30
|
+
{
|
|
31
|
+
id: string; // call_id or item.id
|
|
32
|
+
name: string; // function name
|
|
33
|
+
arguments: string; // accumulated JSON arguments
|
|
34
|
+
output_index: number; // position in output array
|
|
35
|
+
}
|
|
36
|
+
>();
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
### Event Processing
|
|
40
|
+
|
|
41
|
+
1. **On `response.output_item.added`**: Create new function call entry
|
|
42
|
+
2. **On `response.function_call_arguments.delta`**: Append to arguments
|
|
43
|
+
3. **On `response.output_item.done`**: Yield complete tool call and cleanup
|
|
44
|
+
|
|
45
|
+
### Output Format
|
|
46
|
+
|
|
47
|
+
Tool calls are yielded in the standard format expected by the rest of the system:
|
|
48
|
+
|
|
49
|
+
```typescript
|
|
50
|
+
{
|
|
51
|
+
role: ContentGeneratorRole.ASSISTANT,
|
|
52
|
+
content: '',
|
|
53
|
+
tool_calls: [{
|
|
54
|
+
id: string,
|
|
55
|
+
type: 'function',
|
|
56
|
+
function: {
|
|
57
|
+
name: string,
|
|
58
|
+
arguments: string // JSON string
|
|
59
|
+
}
|
|
60
|
+
}]
|
|
61
|
+
}
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
## Example Event Sequence
|
|
65
|
+
|
|
66
|
+
```
|
|
67
|
+
// 1. Function call starts
|
|
68
|
+
{"type":"response.output_item.added","item":{"id":"fc_123","type":"function_call","call_id":"call_abc","name":"get_weather"}}
|
|
69
|
+
|
|
70
|
+
// 2. Arguments stream in
|
|
71
|
+
{"type":"response.function_call_arguments.delta","item_id":"fc_123","delta":"{\"location\":"}
|
|
72
|
+
{"type":"response.function_call_arguments.delta","item_id":"fc_123","delta":"\"San Francisco, CA\"}"}
|
|
73
|
+
|
|
74
|
+
// 3. Function call completes
|
|
75
|
+
{"type":"response.output_item.done","item":{"id":"fc_123","type":"function_call","arguments":"{\"location\":\"San Francisco, CA\"}","call_id":"call_abc","name":"get_weather"}}
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
## Edge Cases Handled
|
|
79
|
+
|
|
80
|
+
1. **No call_id**: Falls back to `item.id` if `call_id` is not provided
|
|
81
|
+
2. **Empty arguments**: Yields empty string for functions with no parameters
|
|
82
|
+
3. **Concurrent calls**: Tracks multiple function calls by their unique IDs
|
|
83
|
+
4. **Interleaved content**: Text deltas and tool calls can be mixed in the stream
|
|
84
|
+
5. **Final arguments**: Uses `item.arguments` from the done event as the source of truth
|
|
85
|
+
|
|
86
|
+
## Testing
|
|
87
|
+
|
|
88
|
+
See `parseResponsesStream.responsesToolCalls.test.ts` for comprehensive test coverage including:
|
|
89
|
+
|
|
90
|
+
- Basic tool call parsing
|
|
91
|
+
- Streaming argument assembly
|
|
92
|
+
- Multiple concurrent tool calls
|
|
93
|
+
- Empty arguments
|
|
94
|
+
- Interleaved content and tool calls
|
|
95
|
+
- Usage data handling
|
|
96
|
+
- Edge cases
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { IMessage } from '../IMessage.js';
|
|
2
|
+
import { ConversationCache } from './ConversationCache.js';
|
|
3
|
+
export declare const MODEL_CONTEXT_SIZE: Record<string, number>;
|
|
4
|
+
/**
|
|
5
|
+
* Estimates the total tokens used including remote stored context
|
|
6
|
+
* @param model The model being used
|
|
7
|
+
* @param cache The conversation cache instance
|
|
8
|
+
* @param conversationId The conversation ID
|
|
9
|
+
* @param parentId The parent message ID
|
|
10
|
+
* @param promptTokens The tokens in the current prompt
|
|
11
|
+
* @returns Object with token usage information
|
|
12
|
+
*/
|
|
13
|
+
export declare function estimateRemoteTokens(model: string, cache: ConversationCache, conversationId: string | undefined, parentId: string | undefined, promptTokens: number): {
|
|
14
|
+
totalTokens: number;
|
|
15
|
+
remoteTokens: number;
|
|
16
|
+
promptTokens: number;
|
|
17
|
+
maxTokens: number;
|
|
18
|
+
contextUsedPercent: number;
|
|
19
|
+
tokensRemaining: number;
|
|
20
|
+
};
|
|
21
|
+
/**
|
|
22
|
+
* Estimates tokens for a message array (rough approximation)
|
|
23
|
+
* @param messages Array of messages
|
|
24
|
+
* @returns Estimated token count
|
|
25
|
+
*/
|
|
26
|
+
export declare function estimateMessagesTokens(messages: IMessage[]): number;
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
// Model context size configuration
|
|
2
|
+
export const MODEL_CONTEXT_SIZE = {
|
|
3
|
+
'gpt-4.1': 1_000_000,
|
|
4
|
+
o3: 200_000,
|
|
5
|
+
'o3-pro': 200_000,
|
|
6
|
+
'o3-mini': 200_000,
|
|
7
|
+
o1: 200_000,
|
|
8
|
+
'o1-mini': 200_000,
|
|
9
|
+
'gpt-4o': 128_000,
|
|
10
|
+
'gpt-4o-mini': 128_000,
|
|
11
|
+
'gpt-4o-realtime': 128_000,
|
|
12
|
+
'gpt-4-turbo': 128_000,
|
|
13
|
+
'gpt-4-turbo-preview': 128_000,
|
|
14
|
+
'gpt-3.5-turbo': 16_385,
|
|
15
|
+
// Default fallback
|
|
16
|
+
default: 128_000,
|
|
17
|
+
};
|
|
18
|
+
/**
|
|
19
|
+
* Estimates the total tokens used including remote stored context
|
|
20
|
+
* @param model The model being used
|
|
21
|
+
* @param cache The conversation cache instance
|
|
22
|
+
* @param conversationId The conversation ID
|
|
23
|
+
* @param parentId The parent message ID
|
|
24
|
+
* @param promptTokens The tokens in the current prompt
|
|
25
|
+
* @returns Object with token usage information
|
|
26
|
+
*/
|
|
27
|
+
export function estimateRemoteTokens(model, cache, conversationId, parentId, promptTokens) {
|
|
28
|
+
// Find the context size by checking if model starts with known prefixes
|
|
29
|
+
let maxTokens = MODEL_CONTEXT_SIZE.default;
|
|
30
|
+
for (const [knownModel, contextSize] of Object.entries(MODEL_CONTEXT_SIZE)) {
|
|
31
|
+
if (knownModel !== 'default' && model.startsWith(knownModel)) {
|
|
32
|
+
maxTokens = contextSize;
|
|
33
|
+
break;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
// Get accumulated tokens from cache
|
|
37
|
+
const remoteTokens = conversationId && parentId
|
|
38
|
+
? cache.getAccumulatedTokens(conversationId, parentId)
|
|
39
|
+
: 0;
|
|
40
|
+
const totalTokens = remoteTokens + promptTokens;
|
|
41
|
+
const tokensRemaining = Math.max(0, maxTokens - totalTokens);
|
|
42
|
+
const contextUsedPercent = Math.min(100, (totalTokens / maxTokens) * 100);
|
|
43
|
+
return {
|
|
44
|
+
totalTokens,
|
|
45
|
+
remoteTokens,
|
|
46
|
+
promptTokens,
|
|
47
|
+
maxTokens,
|
|
48
|
+
contextUsedPercent,
|
|
49
|
+
tokensRemaining,
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
/**
|
|
53
|
+
* Estimates tokens for a message array (rough approximation)
|
|
54
|
+
* @param messages Array of messages
|
|
55
|
+
* @returns Estimated token count
|
|
56
|
+
*/
|
|
57
|
+
export function estimateMessagesTokens(messages) {
|
|
58
|
+
// Rough estimation: ~4 characters per token
|
|
59
|
+
let totalChars = 0;
|
|
60
|
+
for (const message of messages) {
|
|
61
|
+
// Add role tokens (usually 1-2 tokens)
|
|
62
|
+
totalChars += 8;
|
|
63
|
+
// Add content
|
|
64
|
+
if (message.content) {
|
|
65
|
+
totalChars += message.content.length;
|
|
66
|
+
}
|
|
67
|
+
// Add tool calls overhead
|
|
68
|
+
if (message.tool_calls) {
|
|
69
|
+
totalChars += JSON.stringify(message.tool_calls).length;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
// Rough approximation: 4 characters per token
|
|
73
|
+
return Math.ceil(totalChars / 4);
|
|
74
|
+
}
|
|
75
|
+
//# sourceMappingURL=estimateRemoteTokens.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"estimateRemoteTokens.js","sourceRoot":"","sources":["../../../../src/providers/openai/estimateRemoteTokens.ts"],"names":[],"mappings":"AAGA,mCAAmC;AACnC,MAAM,CAAC,MAAM,kBAAkB,GAA2B;IACxD,SAAS,EAAE,SAAS;IACpB,EAAE,EAAE,OAAO;IACX,QAAQ,EAAE,OAAO;IACjB,SAAS,EAAE,OAAO;IAClB,EAAE,EAAE,OAAO;IACX,SAAS,EAAE,OAAO;IAClB,QAAQ,EAAE,OAAO;IACjB,aAAa,EAAE,OAAO;IACtB,iBAAiB,EAAE,OAAO;IAC1B,aAAa,EAAE,OAAO;IACtB,qBAAqB,EAAE,OAAO;IAC9B,eAAe,EAAE,MAAM;IACvB,mBAAmB;IACnB,OAAO,EAAE,OAAO;CACjB,CAAC;AAEF;;;;;;;;GAQG;AACH,MAAM,UAAU,oBAAoB,CAClC,KAAa,EACb,KAAwB,EACxB,cAAkC,EAClC,QAA4B,EAC5B,YAAoB;IASpB,wEAAwE;IACxE,IAAI,SAAS,GAAG,kBAAkB,CAAC,OAAO,CAAC;IAC3C,KAAK,MAAM,CAAC,UAAU,EAAE,WAAW,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,kBAAkB,CAAC,EAAE,CAAC;QAC3E,IAAI,UAAU,KAAK,SAAS,IAAI,KAAK,CAAC,UAAU,CAAC,UAAU,CAAC,EAAE,CAAC;YAC7D,SAAS,GAAG,WAAW,CAAC;YACxB,MAAM;QACR,CAAC;IACH,CAAC;IAED,oCAAoC;IACpC,MAAM,YAAY,GAChB,cAAc,IAAI,QAAQ;QACxB,CAAC,CAAC,KAAK,CAAC,oBAAoB,CAAC,cAAc,EAAE,QAAQ,CAAC;QACtD,CAAC,CAAC,CAAC,CAAC;IAER,MAAM,WAAW,GAAG,YAAY,GAAG,YAAY,CAAC;IAChD,MAAM,eAAe,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,SAAS,GAAG,WAAW,CAAC,CAAC;IAC7D,MAAM,kBAAkB,GAAG,IAAI,CAAC,GAAG,CAAC,GAAG,EAAE,CAAC,WAAW,GAAG,SAAS,CAAC,GAAG,GAAG,CAAC,CAAC;IAE1E,OAAO;QACL,WAAW;QACX,YAAY;QACZ,YAAY;QACZ,SAAS;QACT,kBAAkB;QAClB,eAAe;KAChB,CAAC;AACJ,CAAC;AAED;;;;GAIG;AACH,MAAM,UAAU,sBAAsB,CAAC,QAAoB;IACzD,4CAA4C;IAC5C,IAAI,UAAU,GAAG,CAAC,CAAC;IAEnB,KAAK,MAAM,OAAO,IAAI,QAAQ,EAAE,CAAC;QAC/B,uCAAuC;QACvC,UAAU,IAAI,CAAC,CAAC;QAEhB,cAAc;QACd,IAAI,OAAO,CAAC,OAAO,EAAE,CAAC;YACpB,UAAU,IAAI,OAAO,CAAC,OAAO,CAAC,MAAM,CAAC;QACvC,CAAC;QAED,0BAA0B;QAC1B,IAAI,OAAO,CAAC,UAAU,EAAE,CAAC;YACvB,UAAU,IAAI,IAAI,CAAC,SAAS,CAAC,OAAO,CAAC,UAAU,CAAC,CAAC,MAAM,CAAC;QAC1D,CAAC;IACH,CAAC;IAED,8CAA8C;IAC9C,OAAO,IAAI,CAAC,IAAI,CAAC,UAAU,GAAG,CAAC,CAAC,CAAC;AACnC,CAAC"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import { describe, it, expect, beforeEach } from 'vitest';
|
|
2
|
+
import { estimateRemoteTokens, estimateMessagesTokens, MODEL_CONTEXT_SIZE, } from './estimateRemoteTokens.js';
|
|
3
|
+
import { ConversationCache } from './ConversationCache.js';
|
|
4
|
+
import { ContentGeneratorRole } from '../ContentGeneratorRole.js';
|
|
5
|
+
describe('estimateRemoteTokens', () => {
|
|
6
|
+
let cache;
|
|
7
|
+
beforeEach(() => {
|
|
8
|
+
cache = new ConversationCache();
|
|
9
|
+
});
|
|
10
|
+
it('should calculate context usage with no remote tokens', () => {
|
|
11
|
+
const result = estimateRemoteTokens('gpt-4o', cache, undefined, undefined, 1000);
|
|
12
|
+
expect(result).toEqual({
|
|
13
|
+
totalTokens: 1000,
|
|
14
|
+
remoteTokens: 0,
|
|
15
|
+
promptTokens: 1000,
|
|
16
|
+
maxTokens: 128000,
|
|
17
|
+
contextUsedPercent: (1000 / 128000) * 100,
|
|
18
|
+
tokensRemaining: 127000,
|
|
19
|
+
});
|
|
20
|
+
});
|
|
21
|
+
it('should include remote tokens in calculation', () => {
|
|
22
|
+
// Set up cache with accumulated tokens
|
|
23
|
+
cache.set('conv1', 'parent1', [], 50000);
|
|
24
|
+
const result = estimateRemoteTokens('gpt-4o', cache, 'conv1', 'parent1', 2000);
|
|
25
|
+
expect(result).toEqual({
|
|
26
|
+
totalTokens: 52000,
|
|
27
|
+
remoteTokens: 50000,
|
|
28
|
+
promptTokens: 2000,
|
|
29
|
+
maxTokens: 128000,
|
|
30
|
+
contextUsedPercent: (52000 / 128000) * 100,
|
|
31
|
+
tokensRemaining: 76000,
|
|
32
|
+
});
|
|
33
|
+
});
|
|
34
|
+
it('should handle context overflow', () => {
|
|
35
|
+
// Set up cache with large accumulated tokens
|
|
36
|
+
cache.set('conv1', 'parent1', [], 125000);
|
|
37
|
+
const result = estimateRemoteTokens('gpt-4o', cache, 'conv1', 'parent1', 5000);
|
|
38
|
+
expect(result).toEqual({
|
|
39
|
+
totalTokens: 130000,
|
|
40
|
+
remoteTokens: 125000,
|
|
41
|
+
promptTokens: 5000,
|
|
42
|
+
maxTokens: 128000,
|
|
43
|
+
contextUsedPercent: 100, // Capped at 100%
|
|
44
|
+
tokensRemaining: 0, // No tokens remaining
|
|
45
|
+
});
|
|
46
|
+
});
|
|
47
|
+
it('should use correct model context sizes', () => {
|
|
48
|
+
// Test GPT-3.5 with smaller context
|
|
49
|
+
const result = estimateRemoteTokens('gpt-3.5-turbo', cache, undefined, undefined, 10000);
|
|
50
|
+
expect(result.maxTokens).toBe(16385);
|
|
51
|
+
expect(result.tokensRemaining).toBe(6385);
|
|
52
|
+
// Test o3 model
|
|
53
|
+
const o3Result = estimateRemoteTokens('o3', cache, undefined, undefined, 10000);
|
|
54
|
+
expect(o3Result.maxTokens).toBe(200000);
|
|
55
|
+
});
|
|
56
|
+
it('should use default context size for unknown models', () => {
|
|
57
|
+
const result = estimateRemoteTokens('unknown-model', cache, undefined, undefined, 1000);
|
|
58
|
+
expect(result.maxTokens).toBe(MODEL_CONTEXT_SIZE.default);
|
|
59
|
+
});
|
|
60
|
+
});
|
|
61
|
+
describe('estimateMessagesTokens', () => {
|
|
62
|
+
it('should estimate tokens for simple messages', () => {
|
|
63
|
+
const messages = [
|
|
64
|
+
{ role: ContentGeneratorRole.USER, content: 'Hello, how are you?' },
|
|
65
|
+
{
|
|
66
|
+
role: ContentGeneratorRole.ASSISTANT,
|
|
67
|
+
content: 'I am doing well, thank you!',
|
|
68
|
+
},
|
|
69
|
+
];
|
|
70
|
+
const tokens = estimateMessagesTokens(messages);
|
|
71
|
+
// Rough calculation: ~4 chars per token + role overhead
|
|
72
|
+
// "Hello, how are you?" = 19 chars
|
|
73
|
+
// "I am doing well, thank you!" = 28 chars
|
|
74
|
+
// Plus role overhead (8 chars each) = 16
|
|
75
|
+
// Total: 19 + 28 + 16 = 63 chars / 4 = ~16 tokens
|
|
76
|
+
expect(tokens).toBeGreaterThan(10);
|
|
77
|
+
expect(tokens).toBeLessThan(25);
|
|
78
|
+
});
|
|
79
|
+
it('should handle empty messages', () => {
|
|
80
|
+
const messages = [];
|
|
81
|
+
const tokens = estimateMessagesTokens(messages);
|
|
82
|
+
expect(tokens).toBe(0);
|
|
83
|
+
});
|
|
84
|
+
it('should include tool calls in estimation', () => {
|
|
85
|
+
const messages = [
|
|
86
|
+
{
|
|
87
|
+
role: ContentGeneratorRole.ASSISTANT,
|
|
88
|
+
content: 'Let me search for that.',
|
|
89
|
+
tool_calls: [
|
|
90
|
+
{
|
|
91
|
+
id: 'call_123',
|
|
92
|
+
type: 'function',
|
|
93
|
+
function: {
|
|
94
|
+
name: 'search',
|
|
95
|
+
arguments: '{"query": "weather in San Francisco"}',
|
|
96
|
+
},
|
|
97
|
+
},
|
|
98
|
+
],
|
|
99
|
+
},
|
|
100
|
+
];
|
|
101
|
+
const tokens = estimateMessagesTokens(messages);
|
|
102
|
+
// Should include content + tool call JSON
|
|
103
|
+
expect(tokens).toBeGreaterThan(20);
|
|
104
|
+
});
|
|
105
|
+
it('should handle messages with no content', () => {
|
|
106
|
+
const messages = [
|
|
107
|
+
{ role: ContentGeneratorRole.USER, content: '' }, // No content
|
|
108
|
+
{ role: ContentGeneratorRole.ASSISTANT, content: '' }, // Empty content
|
|
109
|
+
];
|
|
110
|
+
const tokens = estimateMessagesTokens(messages);
|
|
111
|
+
// Should still count role overhead
|
|
112
|
+
expect(tokens).toBeGreaterThan(0);
|
|
113
|
+
});
|
|
114
|
+
it('should handle very long messages', () => {
|
|
115
|
+
const longContent = 'a'.repeat(10000); // 10k characters
|
|
116
|
+
const messages = [
|
|
117
|
+
{ role: ContentGeneratorRole.USER, content: longContent },
|
|
118
|
+
];
|
|
119
|
+
const tokens = estimateMessagesTokens(messages);
|
|
120
|
+
// ~10k chars / 4 = ~2500 tokens
|
|
121
|
+
expect(tokens).toBeGreaterThan(2000);
|
|
122
|
+
expect(tokens).toBeLessThan(3000);
|
|
123
|
+
});
|
|
124
|
+
});
|
|
125
|
+
//# sourceMappingURL=estimateRemoteTokens.test.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"estimateRemoteTokens.test.js","sourceRoot":"","sources":["../../../../src/providers/openai/estimateRemoteTokens.test.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,EAAE,EAAE,MAAM,EAAE,UAAU,EAAE,MAAM,QAAQ,CAAC;AAC1D,OAAO,EACL,oBAAoB,EACpB,sBAAsB,EACtB,kBAAkB,GACnB,MAAM,2BAA2B,CAAC;AACnC,OAAO,EAAE,iBAAiB,EAAE,MAAM,wBAAwB,CAAC;AAE3D,OAAO,EAAE,oBAAoB,EAAE,MAAM,4BAA4B,CAAC;AAClE,QAAQ,CAAC,sBAAsB,EAAE,GAAG,EAAE;IACpC,IAAI,KAAwB,CAAC;IAE7B,UAAU,CAAC,GAAG,EAAE;QACd,KAAK,GAAG,IAAI,iBAAiB,EAAE,CAAC;IAClC,CAAC,CAAC,CAAC;IAEH,EAAE,CAAC,sDAAsD,EAAE,GAAG,EAAE;QAC9D,MAAM,MAAM,GAAG,oBAAoB,CACjC,QAAQ,EACR,KAAK,EACL,SAAS,EACT,SAAS,EACT,IAAI,CACL,CAAC;QAEF,MAAM,CAAC,MAAM,CAAC,CAAC,OAAO,CAAC;YACrB,WAAW,EAAE,IAAI;YACjB,YAAY,EAAE,CAAC;YACf,YAAY,EAAE,IAAI;YAClB,SAAS,EAAE,MAAM;YACjB,kBAAkB,EAAE,CAAC,IAAI,GAAG,MAAM,CAAC,GAAG,GAAG;YACzC,eAAe,EAAE,MAAM;SACxB,CAAC,CAAC;IACL,CAAC,CAAC,CAAC;IAEH,EAAE,CAAC,6CAA6C,EAAE,GAAG,EAAE;QACrD,uCAAuC;QACvC,KAAK,CAAC,GAAG,CAAC,OAAO,EAAE,SAAS,EAAE,EAAE,EAAE,KAAK,CAAC,CAAC;QAEzC,MAAM,MAAM,GAAG,oBAAoB,CACjC,QAAQ,EACR,KAAK,EACL,OAAO,EACP,SAAS,EACT,IAAI,CACL,CAAC;QAEF,MAAM,CAAC,MAAM,CAAC,CAAC,OAAO,CAAC;YACrB,WAAW,EAAE,KAAK;YAClB,YAAY,EAAE,KAAK;YACnB,YAAY,EAAE,IAAI;YAClB,SAAS,EAAE,MAAM;YACjB,kBAAkB,EAAE,CAAC,KAAK,GAAG,MAAM,CAAC,GAAG,GAAG;YAC1C,eAAe,EAAE,KAAK;SACvB,CAAC,CAAC;IACL,CAAC,CAAC,CAAC;IAEH,EAAE,CAAC,gCAAgC,EAAE,GAAG,EAAE;QACxC,6CAA6C;QAC7C,KAAK,CAAC,GAAG,CAAC,OAAO,EAAE,SAAS,EAAE,EAAE,EAAE,MAAM,CAAC,CAAC;QAE1C,MAAM,MAAM,GAAG,oBAAoB,CACjC,QAAQ,EACR,KAAK,EACL,OAAO,EACP,SAAS,EACT,IAAI,CACL,CAAC;QAEF,MAAM,CAAC,MAAM,CAAC,CAAC,OAAO,CAAC;YACrB,WAAW,EAAE,MAAM;YACnB,YAAY,EAAE,MAAM;YACpB,YAAY,EAAE,IAAI;YAClB,SAAS,EAAE,MAAM;YACjB,kBAAkB,EAAE,GAAG,EAAE,iBAAiB;YAC1C,eAAe,EAAE,CAAC,EAAE,sBAAsB;SAC3C,CAAC,CAAC;IACL,CAAC,CAAC,CAAC;IAEH,EAAE,CAAC,wCAAwC,EAAE,GAAG,EAAE;QAChD,oCAAoC;QACpC,MAAM,MAAM,GAAG,oBAAoB,CACjC,eAAe,EACf,KAAK,EACL,SAAS,EACT,SAAS,EACT,KAAK,CACN,CAAC;QAEF,MAAM,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;QACrC,MAAM,CAAC,MAAM,CAAC,eAAe,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;QAE1C,gBAAgB;QAChB,MAAM,QAAQ,GAAG,oBAAoB,CACnC,IAAI,EACJ,KAAK,EACL,SAAS,EACT,SAAS,EACT,KAAK,CACN,CAAC;QAEF,MAAM,CAAC,QAAQ,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;IAC1C,CAAC,CAAC,CAAC;IAEH,EAAE,CAAC,oDAAoD,EAAE,GAAG,EAAE;QAC5D,MAAM,MAAM,GAAG,oBAAoB,CACjC,eAAe,EACf,KAAK,EACL,SAAS,EACT,SAAS,EACT,IAAI,CACL,CAAC;QAEF,MAAM,CAAC,MAAM,CAAC,SAAS,CAAC,CAAC,IAAI,CAAC,kBAAkB,CAAC,OAAO,CAAC,CAAC;IAC5D,CAAC,CAAC,CAAC;AACL,CAAC,CAAC,CAAC;AAEH,QAAQ,CAAC,wBAAwB,EAAE,GAAG,EAAE;IACtC,EAAE,CAAC,4CAA4C,EAAE,GAAG,EAAE;QACpD,MAAM,QAAQ,GAAe;YAC3B,EAAE,IAAI,EAAE,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,qBAAqB,EAAE;YACnE;gBACE,IAAI,EAAE,oBAAoB,CAAC,SAAS;gBACpC,OAAO,EAAE,6BAA6B;aACvC;SACF,CAAC;QAEF,MAAM,MAAM,GAAG,sBAAsB,CAAC,QAAQ,CAAC,CAAC;QAEhD,wDAAwD;QACxD,mCAAmC;QACnC,2CAA2C;QAC3C,yCAAyC;QACzC,kDAAkD;QAClD,MAAM,CAAC,MAAM,CAAC,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC;QACnC,MAAM,CAAC,MAAM,CAAC,CAAC,YAAY,CAAC,EAAE,CAAC,CAAC;IAClC,CAAC,CAAC,CAAC;IAEH,EAAE,CAAC,8BAA8B,EAAE,GAAG,EAAE;QACtC,MAAM,QAAQ,GAAe,EAAE,CAAC;QAChC,MAAM,MAAM,GAAG,sBAAsB,CAAC,QAAQ,CAAC,CAAC;QAChD,MAAM,CAAC,MAAM,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;IACzB,CAAC,CAAC,CAAC;IAEH,EAAE,CAAC,yCAAyC,EAAE,GAAG,EAAE;QACjD,MAAM,QAAQ,GAAe;YAC3B;gBACE,IAAI,EAAE,oBAAoB,CAAC,SAAS;gBACpC,OAAO,EAAE,yBAAyB;gBAClC,UAAU,EAAE;oBACV;wBACE,EAAE,EAAE,UAAU;wBACd,IAAI,EAAE,UAAU;wBAChB,QAAQ,EAAE;4BACR,IAAI,EAAE,QAAQ;4BACd,SAAS,EAAE,uCAAuC;yBACnD;qBACF;iBACF;aACF;SACF,CAAC;QAEF,MAAM,MAAM,GAAG,sBAAsB,CAAC,QAAQ,CAAC,CAAC;QAEhD,0CAA0C;QAC1C,MAAM,CAAC,MAAM,CAAC,CAAC,eAAe,CAAC,EAAE,CAAC,CAAC;IACrC,CAAC,CAAC,CAAC;IAEH,EAAE,CAAC,wCAAwC,EAAE,GAAG,EAAE;QAChD,MAAM,QAAQ,GAAe;YAC3B,EAAE,IAAI,EAAE,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,EAAE,EAAE,EAAE,aAAa;YAC/D,EAAE,IAAI,EAAE,oBAAoB,CAAC,SAAS,EAAE,OAAO,EAAE,EAAE,EAAE,EAAE,gBAAgB;SACxE,CAAC;QAEF,MAAM,MAAM,GAAG,sBAAsB,CAAC,QAAQ,CAAC,CAAC;QAEhD,mCAAmC;QACnC,MAAM,CAAC,MAAM,CAAC,CAAC,eAAe,CAAC,CAAC,CAAC,CAAC;IACpC,CAAC,CAAC,CAAC;IAEH,EAAE,CAAC,kCAAkC,EAAE,GAAG,EAAE;QAC1C,MAAM,WAAW,GAAG,GAAG,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,iBAAiB;QACxD,MAAM,QAAQ,GAAe;YAC3B,EAAE,IAAI,EAAE,oBAAoB,CAAC,IAAI,EAAE,OAAO,EAAE,WAAW,EAAE;SAC1D,CAAC;QAEF,MAAM,MAAM,GAAG,sBAAsB,CAAC,QAAQ,CAAC,CAAC;QAEhD,gCAAgC;QAChC,MAAM,CAAC,MAAM,CAAC,CAAC,eAAe,CAAC,IAAI,CAAC,CAAC;QACrC,MAAM,CAAC,MAAM,CAAC,CAAC,YAAY,CAAC,IAAI,CAAC,CAAC;IACpC,CAAC,CAAC,CAAC;AACL,CAAC,CAAC,CAAC"}
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @license
|
|
3
|
+
* Copyright 2025 Vybestack LLC
|
|
4
|
+
* SPDX-License-Identifier: Apache-2.0
|
|
5
|
+
*/
|
|
6
|
+
import { ProviderManager } from '../ProviderManager.js';
|
|
7
|
+
import { ConversationCache } from './ConversationCache.js';
|
|
8
|
+
type OpenAIProviderLike = {
|
|
9
|
+
name: string;
|
|
10
|
+
getCurrentModel?: () => string;
|
|
11
|
+
getConversationCache?: () => ConversationCache;
|
|
12
|
+
shouldUseResponses?: (model: string) => boolean;
|
|
13
|
+
[key: string]: unknown;
|
|
14
|
+
};
|
|
15
|
+
export interface OpenAIProviderInfo {
|
|
16
|
+
provider: OpenAIProviderLike | null;
|
|
17
|
+
conversationCache: ConversationCache | null;
|
|
18
|
+
isResponsesAPI: boolean;
|
|
19
|
+
currentModel: string | null;
|
|
20
|
+
remoteTokenInfo: {
|
|
21
|
+
promptTokenCount?: number;
|
|
22
|
+
candidatesTokenCount?: number;
|
|
23
|
+
totalTokenCount?: number;
|
|
24
|
+
};
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Retrieves OpenAI provider information from the current ProviderManager instance
|
|
28
|
+
* @param providerManager The ProviderManager instance
|
|
29
|
+
* @returns OpenAI provider info if available, null values otherwise
|
|
30
|
+
*/
|
|
31
|
+
export declare function getOpenAIProviderInfo(providerManager: ProviderManager | null | undefined): OpenAIProviderInfo;
|
|
32
|
+
export {};
|
|
33
|
+
/**
|
|
34
|
+
* Example usage:
|
|
35
|
+
*
|
|
36
|
+
* const openAIInfo = getOpenAIProviderInfo(providerManager);
|
|
37
|
+
* if (openAIInfo.provider && openAIInfo.conversationCache) {
|
|
38
|
+
* // Access conversation cache
|
|
39
|
+
* const cachedMessages = openAIInfo.conversationCache.get(conversationId, parentId);
|
|
40
|
+
*
|
|
41
|
+
* // Check if using Responses API
|
|
42
|
+
* if (openAIInfo.isResponsesAPI) {
|
|
43
|
+
* console.log('Using OpenAI Responses API');
|
|
44
|
+
* }
|
|
45
|
+
* }
|
|
46
|
+
*/
|