@vybestack/llxprt-code-core 0.7.0-nightly.251211.134f1920b → 0.7.0-nightly.251211.f9e1b74e4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.ts +2 -1
- package/dist/index.js +1 -1
- package/dist/index.js.map +1 -1
- package/dist/src/adapters/IStreamAdapter.d.ts +2 -2
- package/dist/src/auth/anthropic-device-flow.d.ts +1 -1
- package/dist/src/auth/precedence.d.ts +1 -1
- package/dist/src/auth/qwen-device-flow.d.ts +1 -1
- package/dist/src/auth/token-store.d.ts +1 -1
- package/dist/src/auth/token-store.js.map +1 -1
- package/dist/src/code_assist/codeAssist.d.ts +1 -1
- package/dist/src/code_assist/codeAssist.js.map +1 -1
- package/dist/src/code_assist/converter.d.ts +1 -1
- package/dist/src/code_assist/server.d.ts +3 -3
- package/dist/src/config/config.d.ts +3 -3
- package/dist/src/config/config.js +1 -1
- package/dist/src/config/config.js.map +1 -1
- package/dist/src/config/profileManager.d.ts +1 -1
- package/dist/src/config/subagentManager.d.ts +1 -1
- package/dist/src/confirmation-bus/message-bus.d.ts +1 -1
- package/dist/src/core/client.d.ts +3 -3
- package/dist/src/core/client.js.map +1 -1
- package/dist/src/core/contentGenerator.d.ts +1 -1
- package/dist/src/core/coreToolScheduler.d.ts +4 -3
- package/dist/src/core/coreToolScheduler.js +28 -0
- package/dist/src/core/coreToolScheduler.js.map +1 -1
- package/dist/src/core/geminiChat.d.ts +2 -2
- package/dist/src/core/googleGenAIWrapper.d.ts +2 -2
- package/dist/src/core/logger.d.ts +1 -1
- package/dist/src/core/loggingContentGenerator.d.ts +2 -2
- package/dist/src/core/nonInteractiveToolExecutor.d.ts +1 -1
- package/dist/src/core/nonInteractiveToolExecutor.js.map +1 -1
- package/dist/src/core/subagent.d.ts +1 -1
- package/dist/src/core/subagent.js.map +1 -1
- package/dist/src/core/turn.d.ts +2 -2
- package/dist/src/debug/ConfigurationManager.d.ts +1 -1
- package/dist/src/ide/ide-client.d.ts +1 -1
- package/dist/src/ide/process-utils.js +45 -25
- package/dist/src/ide/process-utils.js.map +1 -1
- package/dist/src/index.d.ts +4 -2
- package/dist/src/index.js +2 -2
- package/dist/src/index.js.map +1 -1
- package/dist/src/mcp/file-token-store.d.ts +1 -1
- package/dist/src/mcp/google-auth-provider.d.ts +2 -2
- package/dist/src/mcp/oauth-provider.d.ts +1 -1
- package/dist/src/mcp/oauth-provider.js +1 -1
- package/dist/src/mcp/oauth-provider.js.map +1 -1
- package/dist/src/mcp/oauth-utils.d.ts +1 -1
- package/dist/src/prompt-config/TemplateEngine.d.ts +1 -1
- package/dist/src/prompt-config/prompt-cache.d.ts +1 -1
- package/dist/src/prompt-config/prompt-resolver.d.ts +1 -1
- package/dist/src/prompts/mcp-prompts.d.ts +1 -1
- package/dist/src/prompts/prompt-registry.d.ts +1 -1
- package/dist/src/providers/BaseProvider.d.ts +5 -5
- package/dist/src/providers/IProvider.d.ts +3 -3
- package/dist/src/providers/IProviderManager.d.ts +2 -2
- package/dist/src/providers/LoggingProviderWrapper.d.ts +2 -2
- package/dist/src/providers/LoggingProviderWrapper.js.map +1 -1
- package/dist/src/providers/ProviderContentGenerator.d.ts +2 -2
- package/dist/src/providers/ProviderManager.d.ts +3 -3
- package/dist/src/providers/anthropic/AnthropicProvider.d.ts +5 -5
- package/dist/src/providers/anthropic/AnthropicProvider.js +1 -1
- package/dist/src/providers/anthropic/AnthropicProvider.js.map +1 -1
- package/dist/src/providers/gemini/GeminiProvider.d.ts +4 -4
- package/dist/src/providers/openai/ConversationCache.d.ts +1 -1
- package/dist/src/providers/openai/IChatGenerateParams.d.ts +1 -1
- package/dist/src/providers/openai/OpenAIProvider.d.ts +14 -6
- package/dist/src/providers/openai/OpenAIProvider.js +149 -8
- package/dist/src/providers/openai/OpenAIProvider.js.map +1 -1
- package/dist/src/providers/openai/ToolCallPipeline.d.ts +2 -2
- package/dist/src/providers/openai/buildResponsesRequest.d.ts +3 -3
- package/dist/src/providers/openai/estimateRemoteTokens.d.ts +1 -1
- package/dist/src/providers/openai/parseResponsesStream.d.ts +1 -1
- package/dist/src/providers/openai/syntheticToolResponses.d.ts +1 -1
- package/dist/src/providers/openai-responses/OpenAIResponsesProvider.d.ts +4 -4
- package/dist/src/providers/openai-vercel/OpenAIVercelProvider.d.ts +6 -6
- package/dist/src/providers/test-utils/providerTestConfig.d.ts +1 -1
- package/dist/src/providers/tokenizers/AnthropicTokenizer.d.ts +1 -1
- package/dist/src/providers/tokenizers/OpenAITokenizer.d.ts +1 -1
- package/dist/src/providers/tokenizers/OpenAITokenizer.js.map +1 -1
- package/dist/src/services/fileDiscoveryService.js +1 -1
- package/dist/src/services/fileDiscoveryService.js.map +1 -1
- package/dist/src/services/gitService.js.map +1 -1
- package/dist/src/services/history/ContentConverters.d.ts +1 -1
- package/dist/src/services/history/HistoryService.d.ts +2 -2
- package/dist/src/services/loopDetectionService.d.ts +1 -1
- package/dist/src/services/loopDetectionService.js.map +1 -1
- package/dist/src/services/shellExecutionService.js.map +1 -1
- package/dist/src/services/todo-reminder-service.d.ts +1 -1
- package/dist/src/services/tool-call-tracker-service.d.ts +1 -1
- package/dist/src/settings/SettingsService.d.ts +1 -1
- package/dist/src/telemetry/file-exporters.d.ts +4 -4
- package/dist/src/telemetry/file-exporters.js.map +1 -1
- package/dist/src/telemetry/index.d.ts +2 -1
- package/dist/src/telemetry/index.js.map +1 -1
- package/dist/src/telemetry/loggers.js +1 -1
- package/dist/src/telemetry/loggers.js.map +1 -1
- package/dist/src/telemetry/loggers.test.circular.js.map +1 -1
- package/dist/src/telemetry/metrics.d.ts +2 -2
- package/dist/src/telemetry/types.d.ts +1 -1
- package/dist/src/telemetry/types.js.map +1 -1
- package/dist/src/test-utils/config.js.map +1 -1
- package/dist/src/test-utils/tools.d.ts +2 -2
- package/dist/src/todo/todoFormatter.d.ts +1 -1
- package/dist/src/tools/IToolFormatter.d.ts +2 -2
- package/dist/src/tools/ToolFormatter.d.ts +3 -3
- package/dist/src/tools/codesearch.d.ts +1 -1
- package/dist/src/tools/delete_line_range.d.ts +1 -1
- package/dist/src/tools/diffOptions.d.ts +1 -1
- package/dist/src/tools/direct-web-fetch.d.ts +1 -1
- package/dist/src/tools/direct-web-fetch.js.map +1 -1
- package/dist/src/tools/edit.d.ts +2 -2
- package/dist/src/tools/edit.js.map +1 -1
- package/dist/src/tools/exa-web-search.d.ts +1 -1
- package/dist/src/tools/google-web-fetch.d.ts +1 -1
- package/dist/src/tools/google-web-search-invocation.d.ts +2 -2
- package/dist/src/tools/google-web-search-invocation.js.map +1 -1
- package/dist/src/tools/google-web-search.d.ts +3 -3
- package/dist/src/tools/google-web-search.js.map +1 -1
- package/dist/src/tools/grep.d.ts +1 -1
- package/dist/src/tools/insert_at_line.d.ts +1 -1
- package/dist/src/tools/list-subagents.d.ts +1 -1
- package/dist/src/tools/ls.d.ts +1 -1
- package/dist/src/tools/mcp-tool.d.ts +2 -2
- package/dist/src/tools/memoryTool.d.ts +12 -4
- package/dist/src/tools/memoryTool.js +81 -29
- package/dist/src/tools/memoryTool.js.map +1 -1
- package/dist/src/tools/modifiable-tool.d.ts +2 -2
- package/dist/src/tools/modifiable-tool.js.map +1 -1
- package/dist/src/tools/read-file.d.ts +1 -1
- package/dist/src/tools/read-many-files.d.ts +1 -1
- package/dist/src/tools/read_line_range.d.ts +1 -1
- package/dist/src/tools/ripGrep.d.ts +1 -1
- package/dist/src/tools/shell.d.ts +1 -1
- package/dist/src/tools/task.d.ts +1 -1
- package/dist/src/tools/todo-events.d.ts +1 -1
- package/dist/src/tools/todo-pause.d.ts +1 -1
- package/dist/src/tools/todo-pause.js.map +1 -1
- package/dist/src/tools/todo-read.d.ts +1 -1
- package/dist/src/tools/todo-read.js.map +1 -1
- package/dist/src/tools/todo-store.d.ts +1 -1
- package/dist/src/tools/todo-store.js.map +1 -1
- package/dist/src/tools/todo-write.d.ts +2 -2
- package/dist/src/tools/todo-write.js.map +1 -1
- package/dist/src/tools/tool-registry.d.ts +3 -3
- package/dist/src/tools/tools.d.ts +6 -5
- package/dist/src/tools/tools.js +1 -1
- package/dist/src/tools/tools.js.map +1 -1
- package/dist/src/tools/write-file.d.ts +2 -2
- package/dist/src/tools/write-file.js.map +1 -1
- package/dist/src/utils/environmentContext.d.ts +1 -1
- package/dist/src/utils/errorReporting.d.ts +1 -1
- package/dist/src/utils/fileUtils.d.ts +2 -2
- package/dist/src/utils/filesearch/fileSearch.js.map +1 -1
- package/dist/src/utils/generateContentResponseUtilities.d.ts +1 -1
- package/dist/src/utils/generateContentResponseUtilities.js.map +1 -1
- package/dist/src/utils/messageInspectors.d.ts +1 -1
- package/dist/src/utils/partUtils.d.ts +1 -1
- package/dist/src/utils/quotaErrorDetection.d.ts +1 -1
- package/dist/src/utils/summarizer.d.ts +1 -1
- package/package.json +1 -1
|
@@ -1557,6 +1557,8 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
1557
1557
|
let streamingUsage = null;
|
|
1558
1558
|
// Track total chunks for debugging empty responses
|
|
1559
1559
|
let totalChunksReceived = 0;
|
|
1560
|
+
// Track finish_reason for detecting empty responses (issue #584)
|
|
1561
|
+
let lastFinishReason = null;
|
|
1560
1562
|
try {
|
|
1561
1563
|
// Handle streaming response
|
|
1562
1564
|
for await (const chunk of response) {
|
|
@@ -1635,6 +1637,7 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
1635
1637
|
}
|
|
1636
1638
|
// Check for finish_reason to detect proper stream ending
|
|
1637
1639
|
if (choice.finish_reason) {
|
|
1640
|
+
lastFinishReason = choice.finish_reason;
|
|
1638
1641
|
logger.debug(() => `[Streaming] Stream finished with reason: ${choice.finish_reason}`, {
|
|
1639
1642
|
model,
|
|
1640
1643
|
finishReason: choice.finish_reason,
|
|
@@ -2086,6 +2089,28 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
2086
2089
|
},
|
|
2087
2090
|
};
|
|
2088
2091
|
}
|
|
2092
|
+
// Detect and handle empty streaming responses after tool calls (issue #584)
|
|
2093
|
+
// Some models (like gpt-oss-120b on OpenRouter) return finish_reason=stop with tools but no text
|
|
2094
|
+
const hasToolsButNoText = lastFinishReason === 'stop' &&
|
|
2095
|
+
accumulatedToolCalls.length > 0 &&
|
|
2096
|
+
_accumulatedText.length === 0 &&
|
|
2097
|
+
textBuffer.length === 0 &&
|
|
2098
|
+
accumulatedReasoningContent.length === 0 &&
|
|
2099
|
+
accumulatedThinkingContent.length === 0;
|
|
2100
|
+
if (hasToolsButNoText) {
|
|
2101
|
+
logger.log(() => `[OpenAIProvider] Model returned tool calls but no text (finish_reason=stop). Requesting continuation for model '${model}'.`, {
|
|
2102
|
+
model,
|
|
2103
|
+
toolCallCount: accumulatedToolCalls.length,
|
|
2104
|
+
baseURL: baseURL ?? this.getBaseURL(),
|
|
2105
|
+
});
|
|
2106
|
+
// Request continuation after tool calls (delegated to shared method)
|
|
2107
|
+
const toolCallsForContinuation = accumulatedToolCalls.map((tc) => ({
|
|
2108
|
+
id: tc.id,
|
|
2109
|
+
type: tc.type,
|
|
2110
|
+
function: tc.function,
|
|
2111
|
+
}));
|
|
2112
|
+
yield* this.requestContinuationAfterToolCalls(toolCallsForContinuation, messagesWithSystem, requestBody, client, abortSignal, model, logger, customHeaders);
|
|
2113
|
+
}
|
|
2089
2114
|
// Detect and warn about empty streaming responses (common with Kimi K2 after tool calls)
|
|
2090
2115
|
// Only warn if we truly got nothing - not even reasoning content
|
|
2091
2116
|
if (_accumulatedText.length === 0 &&
|
|
@@ -2723,6 +2748,10 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
2723
2748
|
let accumulatedReasoningContent = '';
|
|
2724
2749
|
// Track token usage from streaming chunks
|
|
2725
2750
|
let streamingUsage = null;
|
|
2751
|
+
// Track finish_reason for detecting empty responses (issue #584)
|
|
2752
|
+
let lastFinishReason = null;
|
|
2753
|
+
// Store pipeline result to avoid duplicate process() calls (CodeRabbit review #764)
|
|
2754
|
+
let cachedPipelineResult = null;
|
|
2726
2755
|
const allChunks = []; // Collect all chunks first
|
|
2727
2756
|
try {
|
|
2728
2757
|
// Handle streaming response - collect all chunks
|
|
@@ -2805,6 +2834,7 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
2805
2834
|
}
|
|
2806
2835
|
// Check for finish_reason to detect proper stream ending
|
|
2807
2836
|
if (choice.finish_reason) {
|
|
2837
|
+
lastFinishReason = choice.finish_reason;
|
|
2808
2838
|
logger.debug(() => `[Streaming] Stream finished with reason: ${choice.finish_reason}`, {
|
|
2809
2839
|
model,
|
|
2810
2840
|
finishReason: choice.finish_reason,
|
|
@@ -3175,12 +3205,12 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
3175
3205
|
}
|
|
3176
3206
|
}
|
|
3177
3207
|
// Process and emit tool calls using the pipeline
|
|
3178
|
-
|
|
3179
|
-
if (
|
|
3180
|
-
|
|
3208
|
+
cachedPipelineResult = await this.toolCallPipeline.process(abortSignal);
|
|
3209
|
+
if (cachedPipelineResult.normalized.length > 0 ||
|
|
3210
|
+
cachedPipelineResult.failed.length > 0) {
|
|
3181
3211
|
const blocks = [];
|
|
3182
3212
|
// Process successful tool calls
|
|
3183
|
-
for (const normalizedCall of
|
|
3213
|
+
for (const normalizedCall of cachedPipelineResult.normalized) {
|
|
3184
3214
|
const sanitizedArgs = this.sanitizeToolArgumentsString(normalizedCall.originalArgs ?? normalizedCall.args);
|
|
3185
3215
|
// Process tool parameters with double-escape handling
|
|
3186
3216
|
const processedParameters = processToolParameters(sanitizedArgs, normalizedCall.name);
|
|
@@ -3192,7 +3222,7 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
3192
3222
|
});
|
|
3193
3223
|
}
|
|
3194
3224
|
// Handle failed tool calls (could emit as errors or warnings)
|
|
3195
|
-
for (const failed of
|
|
3225
|
+
for (const failed of cachedPipelineResult.failed) {
|
|
3196
3226
|
this.getLogger().warn(`Tool call validation failed for index ${failed.index}: ${failed.validationErrors.join(', ')}`);
|
|
3197
3227
|
}
|
|
3198
3228
|
if (blocks.length > 0) {
|
|
@@ -3240,11 +3270,44 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
3240
3270
|
},
|
|
3241
3271
|
};
|
|
3242
3272
|
}
|
|
3273
|
+
// Detect and handle empty streaming responses after tool calls (issue #584)
|
|
3274
|
+
// Some models (like gpt-oss-120b on OpenRouter) return finish_reason=stop with tools but no text
|
|
3275
|
+
// Use cachedPipelineResult instead of pipelineStats.collector.totalCalls since process() resets the collector (CodeRabbit review #764)
|
|
3276
|
+
const toolCallCount = (cachedPipelineResult?.normalized.length ?? 0) +
|
|
3277
|
+
(cachedPipelineResult?.failed.length ?? 0);
|
|
3278
|
+
const hasToolsButNoText = lastFinishReason === 'stop' &&
|
|
3279
|
+
toolCallCount > 0 &&
|
|
3280
|
+
_accumulatedText.length === 0 &&
|
|
3281
|
+
textBuffer.length === 0 &&
|
|
3282
|
+
accumulatedReasoningContent.length === 0 &&
|
|
3283
|
+
accumulatedThinkingContent.length === 0;
|
|
3284
|
+
if (hasToolsButNoText) {
|
|
3285
|
+
logger.log(() => `[OpenAIProvider] Model returned tool calls but no text (finish_reason=stop). Requesting continuation for model '${model}'.`, {
|
|
3286
|
+
model,
|
|
3287
|
+
toolCallCount,
|
|
3288
|
+
baseURL: baseURL ?? this.getBaseURL(),
|
|
3289
|
+
});
|
|
3290
|
+
// Note: In pipeline mode, tool calls have already been processed.
|
|
3291
|
+
// We need to get the normalized tool calls from the cached pipeline result to build continuation messages.
|
|
3292
|
+
// Use cached result to avoid duplicate process() call that would return empty results (CodeRabbit review #764)
|
|
3293
|
+
if (!cachedPipelineResult) {
|
|
3294
|
+
throw new Error('Pipeline result not cached - this should not happen in pipeline mode');
|
|
3295
|
+
}
|
|
3296
|
+
const toolCallsForHistory = cachedPipelineResult.normalized.map((normalizedCall, index) => ({
|
|
3297
|
+
id: `call_${index}`,
|
|
3298
|
+
type: 'function',
|
|
3299
|
+
function: {
|
|
3300
|
+
name: normalizedCall.name,
|
|
3301
|
+
arguments: JSON.stringify(normalizedCall.args),
|
|
3302
|
+
},
|
|
3303
|
+
}));
|
|
3304
|
+
// Request continuation after tool calls (delegated to shared method)
|
|
3305
|
+
yield* this.requestContinuationAfterToolCalls(toolCallsForHistory, messagesWithSystem, requestBody, client, abortSignal, model, logger, customHeaders);
|
|
3306
|
+
}
|
|
3243
3307
|
// Detect and warn about empty streaming responses (common with Kimi K2 after tool calls)
|
|
3244
3308
|
// Only warn if we truly got nothing - not even reasoning content
|
|
3245
|
-
const pipelineStats = this.toolCallPipeline.getStats();
|
|
3246
3309
|
if (_accumulatedText.length === 0 &&
|
|
3247
|
-
|
|
3310
|
+
toolCallCount === 0 &&
|
|
3248
3311
|
textBuffer.length === 0 &&
|
|
3249
3312
|
accumulatedReasoningContent.length === 0 &&
|
|
3250
3313
|
accumulatedThinkingContent.length === 0) {
|
|
@@ -3268,7 +3331,7 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
3268
3331
|
// Log what we DID get for debugging
|
|
3269
3332
|
logger.debug(() => `[Streaming pipeline] Stream completed with accumulated content`, {
|
|
3270
3333
|
textLength: _accumulatedText.length,
|
|
3271
|
-
toolCallCount
|
|
3334
|
+
toolCallCount,
|
|
3272
3335
|
textBufferLength: textBuffer.length,
|
|
3273
3336
|
reasoningLength: accumulatedReasoningContent.length,
|
|
3274
3337
|
thinkingLength: accumulatedThinkingContent.length,
|
|
@@ -3612,5 +3675,83 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
3612
3675
|
};
|
|
3613
3676
|
return { thinking: thinkingBlock, toolCalls };
|
|
3614
3677
|
}
|
|
3678
|
+
/**
|
|
3679
|
+
* Request continuation after tool calls when model returned no text.
|
|
3680
|
+
* This is a helper to avoid code duplication between legacy and pipeline paths.
|
|
3681
|
+
*
|
|
3682
|
+
* @plan PLAN-20250120-DEBUGLOGGING.P15
|
|
3683
|
+
* @issue #584, #764 (CodeRabbit review)
|
|
3684
|
+
*/
|
|
3685
|
+
async *requestContinuationAfterToolCalls(toolCalls, messagesWithSystem, requestBody, client, abortSignal, model, logger, customHeaders) {
|
|
3686
|
+
// Build continuation messages
|
|
3687
|
+
const continuationMessages = [
|
|
3688
|
+
...messagesWithSystem,
|
|
3689
|
+
// Add the assistant's tool calls
|
|
3690
|
+
{
|
|
3691
|
+
role: 'assistant',
|
|
3692
|
+
tool_calls: toolCalls,
|
|
3693
|
+
},
|
|
3694
|
+
// Add placeholder tool responses (tools have NOT been executed yet - only acknowledged)
|
|
3695
|
+
...toolCalls.map((tc) => ({
|
|
3696
|
+
role: 'tool',
|
|
3697
|
+
tool_call_id: tc.id,
|
|
3698
|
+
content: '[Tool call acknowledged - awaiting execution]',
|
|
3699
|
+
})),
|
|
3700
|
+
// Add continuation prompt
|
|
3701
|
+
{
|
|
3702
|
+
role: 'user',
|
|
3703
|
+
content: 'The tool calls above have been registered. Please continue with your response.',
|
|
3704
|
+
},
|
|
3705
|
+
];
|
|
3706
|
+
// Make a continuation request (wrap in try-catch since tools were already yielded)
|
|
3707
|
+
try {
|
|
3708
|
+
const continuationResponse = await client.chat.completions.create({
|
|
3709
|
+
...requestBody,
|
|
3710
|
+
messages: continuationMessages,
|
|
3711
|
+
stream: true, // Always stream for consistency
|
|
3712
|
+
}, {
|
|
3713
|
+
...(abortSignal ? { signal: abortSignal } : {}),
|
|
3714
|
+
...(customHeaders ? { headers: customHeaders } : {}),
|
|
3715
|
+
});
|
|
3716
|
+
let accumulatedText = '';
|
|
3717
|
+
// Process the continuation response
|
|
3718
|
+
for await (const chunk of continuationResponse) {
|
|
3719
|
+
if (abortSignal?.aborted) {
|
|
3720
|
+
break;
|
|
3721
|
+
}
|
|
3722
|
+
const choice = chunk.choices?.[0];
|
|
3723
|
+
if (!choice)
|
|
3724
|
+
continue;
|
|
3725
|
+
const deltaContent = this.coerceMessageContentToString(choice.delta?.content);
|
|
3726
|
+
if (deltaContent) {
|
|
3727
|
+
const sanitized = this.sanitizeProviderText(deltaContent);
|
|
3728
|
+
if (sanitized) {
|
|
3729
|
+
accumulatedText += sanitized;
|
|
3730
|
+
yield {
|
|
3731
|
+
speaker: 'ai',
|
|
3732
|
+
blocks: [
|
|
3733
|
+
{
|
|
3734
|
+
type: 'text',
|
|
3735
|
+
text: sanitized,
|
|
3736
|
+
},
|
|
3737
|
+
],
|
|
3738
|
+
};
|
|
3739
|
+
}
|
|
3740
|
+
}
|
|
3741
|
+
}
|
|
3742
|
+
logger.debug(() => `[OpenAIProvider] Continuation request completed, received ${accumulatedText.length} chars`, {
|
|
3743
|
+
model,
|
|
3744
|
+
accumulatedTextLength: accumulatedText.length,
|
|
3745
|
+
});
|
|
3746
|
+
}
|
|
3747
|
+
catch (continuationError) {
|
|
3748
|
+
// Tool calls were already successfully yielded, so log warning and continue
|
|
3749
|
+
logger.warn(() => `[OpenAIProvider] Continuation request failed, but tool calls were already emitted: ${continuationError instanceof Error ? continuationError.message : String(continuationError)}`, {
|
|
3750
|
+
model,
|
|
3751
|
+
error: continuationError,
|
|
3752
|
+
});
|
|
3753
|
+
// Don't re-throw - tool calls were already successful
|
|
3754
|
+
}
|
|
3755
|
+
}
|
|
3615
3756
|
}
|
|
3616
3757
|
//# sourceMappingURL=OpenAIProvider.js.map
|