@vybestack/llxprt-code-core 0.1.23-nightly.250905.97906524 → 0.2.2-nightly.250908.fb8099b7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/src/adapters/IStreamAdapter.d.ts +3 -3
- package/dist/src/auth/precedence.d.ts +1 -1
- package/dist/src/auth/precedence.js +9 -4
- package/dist/src/auth/precedence.js.map +1 -1
- package/dist/src/auth/types.d.ts +4 -4
- package/dist/src/code_assist/codeAssist.js +8 -6
- package/dist/src/code_assist/codeAssist.js.map +1 -1
- package/dist/src/code_assist/setup.js +9 -7
- package/dist/src/code_assist/setup.js.map +1 -1
- package/dist/src/config/index.d.ts +7 -0
- package/dist/src/config/index.js +8 -0
- package/dist/src/config/index.js.map +1 -0
- package/dist/src/core/client.d.ts +9 -21
- package/dist/src/core/client.js +55 -156
- package/dist/src/core/client.js.map +1 -1
- package/dist/src/core/compression-config.d.ts +1 -1
- package/dist/src/core/compression-config.js +4 -5
- package/dist/src/core/compression-config.js.map +1 -1
- package/dist/src/core/coreToolScheduler.js +50 -15
- package/dist/src/core/coreToolScheduler.js.map +1 -1
- package/dist/src/core/geminiChat.d.ts +51 -2
- package/dist/src/core/geminiChat.js +616 -106
- package/dist/src/core/geminiChat.js.map +1 -1
- package/dist/src/core/nonInteractiveToolExecutor.js +70 -19
- package/dist/src/core/nonInteractiveToolExecutor.js.map +1 -1
- package/dist/src/core/prompts.js +34 -26
- package/dist/src/core/prompts.js.map +1 -1
- package/dist/src/core/turn.d.ts +1 -0
- package/dist/src/core/turn.js +8 -6
- package/dist/src/core/turn.js.map +1 -1
- package/dist/src/index.d.ts +1 -2
- package/dist/src/index.js +2 -2
- package/dist/src/index.js.map +1 -1
- package/dist/src/prompt-config/TemplateEngine.js +17 -0
- package/dist/src/prompt-config/TemplateEngine.js.map +1 -1
- package/dist/src/prompt-config/defaults/core-defaults.js +39 -32
- package/dist/src/prompt-config/defaults/core-defaults.js.map +1 -1
- package/dist/src/prompt-config/defaults/core.md +2 -0
- package/dist/src/prompt-config/defaults/provider-defaults.js +34 -27
- package/dist/src/prompt-config/defaults/provider-defaults.js.map +1 -1
- package/dist/src/prompt-config/defaults/providers/gemini/core.md +229 -43
- package/dist/src/prompt-config/defaults/providers/gemini/models/gemini-2.5-flash/core.md +12 -0
- package/dist/src/prompt-config/defaults/providers/gemini/models/gemini-2.5-flash/gemini-2-5-flash/core.md +12 -0
- package/dist/src/prompt-config/types.d.ts +2 -0
- package/dist/src/providers/BaseProvider.d.ts +32 -6
- package/dist/src/providers/BaseProvider.js +79 -22
- package/dist/src/providers/BaseProvider.js.map +1 -1
- package/dist/src/providers/IProvider.d.ts +9 -3
- package/dist/src/providers/LoggingProviderWrapper.d.ts +10 -3
- package/dist/src/providers/LoggingProviderWrapper.js +33 -27
- package/dist/src/providers/LoggingProviderWrapper.js.map +1 -1
- package/dist/src/providers/ProviderContentGenerator.d.ts +2 -2
- package/dist/src/providers/ProviderContentGenerator.js +9 -6
- package/dist/src/providers/ProviderContentGenerator.js.map +1 -1
- package/dist/src/providers/anthropic/AnthropicProvider.d.ts +27 -21
- package/dist/src/providers/anthropic/AnthropicProvider.js +473 -472
- package/dist/src/providers/anthropic/AnthropicProvider.js.map +1 -1
- package/dist/src/providers/gemini/GeminiProvider.d.ts +14 -9
- package/dist/src/providers/gemini/GeminiProvider.js +202 -486
- package/dist/src/providers/gemini/GeminiProvider.js.map +1 -1
- package/dist/src/providers/openai/ConversationCache.d.ts +3 -3
- package/dist/src/providers/openai/IChatGenerateParams.d.ts +9 -4
- package/dist/src/providers/openai/OpenAIProvider.d.ts +44 -115
- package/dist/src/providers/openai/OpenAIProvider.js +535 -948
- package/dist/src/providers/openai/OpenAIProvider.js.map +1 -1
- package/dist/src/providers/openai/buildResponsesRequest.d.ts +3 -3
- package/dist/src/providers/openai/buildResponsesRequest.js +67 -37
- package/dist/src/providers/openai/buildResponsesRequest.js.map +1 -1
- package/dist/src/providers/openai/estimateRemoteTokens.d.ts +2 -2
- package/dist/src/providers/openai/estimateRemoteTokens.js +21 -8
- package/dist/src/providers/openai/estimateRemoteTokens.js.map +1 -1
- package/dist/src/providers/openai/parseResponsesStream.d.ts +6 -2
- package/dist/src/providers/openai/parseResponsesStream.js +99 -391
- package/dist/src/providers/openai/parseResponsesStream.js.map +1 -1
- package/dist/src/providers/openai/syntheticToolResponses.d.ts +5 -5
- package/dist/src/providers/openai/syntheticToolResponses.js +102 -91
- package/dist/src/providers/openai/syntheticToolResponses.js.map +1 -1
- package/dist/src/providers/openai-responses/OpenAIResponsesProvider.d.ts +18 -20
- package/dist/src/providers/openai-responses/OpenAIResponsesProvider.js +250 -239
- package/dist/src/providers/openai-responses/OpenAIResponsesProvider.js.map +1 -1
- package/dist/src/providers/tokenizers/OpenAITokenizer.js +3 -3
- package/dist/src/providers/tokenizers/OpenAITokenizer.js.map +1 -1
- package/dist/src/providers/types.d.ts +1 -1
- package/dist/src/services/history/ContentConverters.d.ts +6 -1
- package/dist/src/services/history/ContentConverters.js +155 -18
- package/dist/src/services/history/ContentConverters.js.map +1 -1
- package/dist/src/services/history/HistoryService.d.ts +52 -0
- package/dist/src/services/history/HistoryService.js +245 -93
- package/dist/src/services/history/HistoryService.js.map +1 -1
- package/dist/src/services/history/IContent.d.ts +4 -0
- package/dist/src/services/history/IContent.js.map +1 -1
- package/dist/src/telemetry/types.d.ts +16 -4
- package/dist/src/telemetry/types.js.map +1 -1
- package/dist/src/tools/IToolFormatter.d.ts +2 -2
- package/dist/src/tools/ToolFormatter.d.ts +42 -4
- package/dist/src/tools/ToolFormatter.js +159 -37
- package/dist/src/tools/ToolFormatter.js.map +1 -1
- package/dist/src/tools/doubleEscapeUtils.d.ts +57 -0
- package/dist/src/tools/doubleEscapeUtils.js +241 -0
- package/dist/src/tools/doubleEscapeUtils.js.map +1 -0
- package/dist/src/tools/read-file.js +5 -2
- package/dist/src/tools/read-file.js.map +1 -1
- package/dist/src/tools/todo-schemas.d.ts +4 -4
- package/dist/src/tools/write-file.js +5 -2
- package/dist/src/tools/write-file.js.map +1 -1
- package/dist/src/types/modelParams.d.ts +8 -0
- package/dist/src/utils/bfsFileSearch.js +2 -6
- package/dist/src/utils/bfsFileSearch.js.map +1 -1
- package/package.json +8 -7
- package/dist/src/core/ContentGeneratorAdapter.d.ts +0 -37
- package/dist/src/core/ContentGeneratorAdapter.js +0 -58
- package/dist/src/core/ContentGeneratorAdapter.js.map +0 -1
- package/dist/src/providers/IMessage.d.ts +0 -38
- package/dist/src/providers/IMessage.js +0 -17
- package/dist/src/providers/IMessage.js.map +0 -1
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.d.ts +0 -69
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.js +0 -577
- package/dist/src/providers/adapters/GeminiCompatibleWrapper.js.map +0 -1
@@ -1,30 +1,16 @@
|
|
1
1
|
import Anthropic from '@anthropic-ai/sdk';
|
2
2
|
import { DebugLogger } from '../../debug/index.js';
|
3
|
-
import { retryWithBackoff } from '../../utils/retry.js';
|
4
3
|
import { ToolFormatter } from '../../tools/ToolFormatter.js';
|
5
4
|
import { BaseProvider } from '../BaseProvider.js';
|
6
5
|
import { getSettingsService } from '../../settings/settingsServiceInstance.js';
|
7
|
-
import {
|
6
|
+
import { processToolParameters, logDoubleEscapingInChunk, } from '../../tools/doubleEscapeUtils.js';
|
7
|
+
import { getCoreSystemPromptAsync } from '../../core/prompts.js';
|
8
8
|
export class AnthropicProvider extends BaseProvider {
|
9
9
|
logger;
|
10
10
|
anthropic;
|
11
11
|
toolFormatter;
|
12
12
|
toolFormat = 'anthropic';
|
13
|
-
baseURL;
|
14
|
-
_config;
|
15
|
-
currentModel = 'claude-sonnet-4-20250514'; // Default model
|
16
|
-
modelParams;
|
17
13
|
_cachedAuthKey; // Track cached auth key for client recreation
|
18
|
-
// Model cache for latest resolution
|
19
|
-
modelCache = null;
|
20
|
-
modelCacheTTL = 5 * 60 * 1000; // 5 minutes
|
21
|
-
// Retry configuration
|
22
|
-
retryableErrorMessages = [
|
23
|
-
'overloaded',
|
24
|
-
'rate_limit',
|
25
|
-
'server_error',
|
26
|
-
'service_unavailable',
|
27
|
-
];
|
28
14
|
// Model patterns for max output tokens
|
29
15
|
modelTokenPatterns = [
|
30
16
|
{ pattern: /claude-.*opus-4/i, tokens: 32000 },
|
@@ -47,15 +33,11 @@ export class AnthropicProvider extends BaseProvider {
|
|
47
33
|
oauthProvider: oauthManager ? 'anthropic' : undefined,
|
48
34
|
oauthManager,
|
49
35
|
};
|
50
|
-
super(baseConfig);
|
36
|
+
super(baseConfig, config);
|
51
37
|
this.logger = new DebugLogger('llxprt:anthropic:provider');
|
52
|
-
this.baseURL = baseURL;
|
53
|
-
this._config = config;
|
54
|
-
// Config reserved for future provider customization
|
55
|
-
void this._config;
|
56
38
|
this.anthropic = new Anthropic({
|
57
39
|
apiKey: apiKey || '', // Empty string if OAuth will be used
|
58
|
-
baseURL,
|
40
|
+
baseURL: config?.baseUrl || baseURL,
|
59
41
|
dangerouslyAllowBrowser: true,
|
60
42
|
});
|
61
43
|
this.toolFormatter = new ToolFormatter();
|
@@ -82,12 +64,14 @@ export class AnthropicProvider extends BaseProvider {
|
|
82
64
|
if (this._cachedAuthKey !== resolvedToken) {
|
83
65
|
// Check if this is an OAuth token (starts with sk-ant-oat)
|
84
66
|
const isOAuthToken = resolvedToken.startsWith('sk-ant-oat');
|
67
|
+
// Use the unified getBaseURL() method from BaseProvider
|
68
|
+
const baseURL = this.getBaseURL();
|
85
69
|
if (isOAuthToken) {
|
86
70
|
// For OAuth tokens, use authToken field which sends Bearer token
|
87
71
|
// Don't pass apiKey at all - just authToken
|
88
72
|
const oauthConfig = {
|
89
73
|
authToken: resolvedToken, // Use authToken for OAuth Bearer tokens
|
90
|
-
baseURL
|
74
|
+
baseURL,
|
91
75
|
dangerouslyAllowBrowser: true,
|
92
76
|
defaultHeaders: {
|
93
77
|
'anthropic-beta': 'oauth-2025-04-20', // Still need the beta header
|
@@ -99,7 +83,7 @@ export class AnthropicProvider extends BaseProvider {
|
|
99
83
|
// Regular API key auth
|
100
84
|
this.anthropic = new Anthropic({
|
101
85
|
apiKey: resolvedToken,
|
102
|
-
baseURL
|
86
|
+
baseURL,
|
103
87
|
dangerouslyAllowBrowser: true,
|
104
88
|
});
|
105
89
|
}
|
@@ -177,331 +161,26 @@ export class AnthropicProvider extends BaseProvider {
|
|
177
161
|
return []; // Return empty array on error
|
178
162
|
}
|
179
163
|
}
|
180
|
-
async *generateChatCompletion(messages, tools, _toolFormat) {
|
181
|
-
const authToken = await this.getAuthToken();
|
182
|
-
if (!authToken) {
|
183
|
-
throw new Error('Authentication required to generate Anthropic chat completions');
|
184
|
-
}
|
185
|
-
// Get streaming setting from ephemeral settings (default: enabled)
|
186
|
-
const streamingSetting = this._config?.getEphemeralSettings?.()?.['streaming'];
|
187
|
-
const streamingEnabled = streamingSetting !== 'disabled';
|
188
|
-
// Update Anthropic client with resolved authentication if needed
|
189
|
-
await this.updateClientWithResolvedAuth();
|
190
|
-
const apiCall = async () => {
|
191
|
-
// Resolve model if it uses -latest alias
|
192
|
-
const resolvedModel = await this.resolveLatestModel(this.currentModel);
|
193
|
-
// Always validate and fix message history to prevent tool_use/tool_result mismatches
|
194
|
-
// This is necessary for both cancelled tools and retries
|
195
|
-
const validatedMessages = this.validateAndFixMessages(messages);
|
196
|
-
// Use the resolved model for the API call
|
197
|
-
const modelForApi = resolvedModel;
|
198
|
-
// Check if we're in OAuth mode early
|
199
|
-
const authToken = await this.getAuthToken();
|
200
|
-
const isOAuth = authToken && authToken.startsWith('sk-ant-oat');
|
201
|
-
// Extract system message if present and handle tool responses
|
202
|
-
let systemMessage;
|
203
|
-
let llxprtPrompts; // Store llxprt prompts separately
|
204
|
-
const anthropicMessages = [];
|
205
|
-
for (const msg of validatedMessages) {
|
206
|
-
if (msg.role === 'system') {
|
207
|
-
if (isOAuth) {
|
208
|
-
// In OAuth mode, save system content for injection as user message
|
209
|
-
llxprtPrompts = msg.content;
|
210
|
-
}
|
211
|
-
else {
|
212
|
-
// In normal mode, use as system message
|
213
|
-
systemMessage = msg.content;
|
214
|
-
}
|
215
|
-
}
|
216
|
-
else if (msg.role === 'tool') {
|
217
|
-
// Anthropic expects tool responses as user messages with tool_result content
|
218
|
-
anthropicMessages.push({
|
219
|
-
role: 'user',
|
220
|
-
content: [
|
221
|
-
{
|
222
|
-
type: 'tool_result',
|
223
|
-
tool_use_id: msg.tool_call_id || 'unknown',
|
224
|
-
content: msg.content,
|
225
|
-
},
|
226
|
-
],
|
227
|
-
});
|
228
|
-
}
|
229
|
-
else if (msg.role === 'assistant' && msg.tool_calls) {
|
230
|
-
// Handle assistant messages with tool calls
|
231
|
-
const content = [];
|
232
|
-
if (msg.content) {
|
233
|
-
content.push({ type: 'text', text: msg.content });
|
234
|
-
}
|
235
|
-
for (const toolCall of msg.tool_calls) {
|
236
|
-
content.push({
|
237
|
-
type: 'tool_use',
|
238
|
-
id: toolCall.id,
|
239
|
-
name: toolCall.function.name,
|
240
|
-
input: toolCall.function.arguments
|
241
|
-
? JSON.parse(toolCall.function.arguments)
|
242
|
-
: {},
|
243
|
-
});
|
244
|
-
}
|
245
|
-
anthropicMessages.push({
|
246
|
-
role: 'assistant',
|
247
|
-
content,
|
248
|
-
});
|
249
|
-
}
|
250
|
-
else {
|
251
|
-
// Regular user/assistant messages
|
252
|
-
anthropicMessages.push({
|
253
|
-
role: msg.role,
|
254
|
-
content: msg.content,
|
255
|
-
});
|
256
|
-
}
|
257
|
-
}
|
258
|
-
// In OAuth mode, inject llxprt prompts as conversation content
|
259
|
-
// ONLY for the very first message in a new conversation
|
260
|
-
if (isOAuth && llxprtPrompts && anthropicMessages.length === 0) {
|
261
|
-
// This is the very first message - inject the context
|
262
|
-
const contextMessage = `Important context for using llxprt tools:
|
263
|
-
|
264
|
-
Tool Parameter Reference:
|
265
|
-
- read_file uses parameter 'absolute_path' (not 'file_path')
|
266
|
-
- write_file uses parameter 'file_path' (not 'path')
|
267
|
-
- list_directory uses parameter 'path'
|
268
|
-
- replace uses 'file_path', 'old_string', 'new_string'
|
269
|
-
- search_file_content (grep) expects regex patterns, not literal text
|
270
|
-
- todo_write requires 'todos' array with {id, content, status, priority}
|
271
|
-
- All file paths must be absolute (starting with /)
|
272
|
-
|
273
|
-
${llxprtPrompts}`;
|
274
|
-
// Inject at the beginning of the conversation
|
275
|
-
anthropicMessages.unshift({
|
276
|
-
role: 'user',
|
277
|
-
content: contextMessage,
|
278
|
-
}, {
|
279
|
-
role: 'assistant',
|
280
|
-
content: "I understand the llxprt tool parameters and context. I'll use the correct parameter names for each tool. Ready to help with your tasks.",
|
281
|
-
});
|
282
|
-
}
|
283
|
-
// For ongoing conversations, the context was already injected in the first message
|
284
|
-
// so we don't need to inject it again
|
285
|
-
// Convert ITool[] to Anthropic's tool format if tools are provided
|
286
|
-
const anthropicTools = tools
|
287
|
-
? this.toolFormatter.toProviderFormat(tools, 'anthropic')
|
288
|
-
: undefined;
|
289
|
-
// Create the request options with proper typing
|
290
|
-
const createOptions = {
|
291
|
-
model: modelForApi,
|
292
|
-
messages: anthropicMessages,
|
293
|
-
max_tokens: this.getMaxTokensForModel(resolvedModel),
|
294
|
-
...this.modelParams, // Apply model params first
|
295
|
-
stream: streamingEnabled, // Use ephemeral streaming setting
|
296
|
-
};
|
297
|
-
// Set system message based on auth mode
|
298
|
-
if (isOAuth) {
|
299
|
-
// OAuth mode: Use Claude Code system prompt (required for Max/Pro)
|
300
|
-
createOptions.system =
|
301
|
-
"You are Claude Code, Anthropic's official CLI for Claude.";
|
302
|
-
// llxprt prompts were already injected as conversation content above
|
303
|
-
}
|
304
|
-
else if (systemMessage) {
|
305
|
-
// Normal mode: Use full llxprt system prompt
|
306
|
-
createOptions.system = systemMessage;
|
307
|
-
}
|
308
|
-
if (anthropicTools) {
|
309
|
-
createOptions.tools = anthropicTools;
|
310
|
-
}
|
311
|
-
if (streamingEnabled) {
|
312
|
-
return this.anthropic.messages.create(createOptions);
|
313
|
-
}
|
314
|
-
else {
|
315
|
-
return this.anthropic.messages.create(createOptions);
|
316
|
-
}
|
317
|
-
};
|
318
|
-
try {
|
319
|
-
const response = await retryWithBackoff(apiCall, {
|
320
|
-
shouldRetry: (error) => this.isRetryableError(error),
|
321
|
-
});
|
322
|
-
if (streamingEnabled) {
|
323
|
-
// Handle streaming response
|
324
|
-
const stream = response;
|
325
|
-
let currentUsage;
|
326
|
-
// Track current tool call being streamed
|
327
|
-
let currentToolCall;
|
328
|
-
// Process the stream
|
329
|
-
for await (const chunk of stream) {
|
330
|
-
this.logger.debug(() => `Received chunk type: ${chunk.type}${chunk.type === 'message_start'
|
331
|
-
? ` - ${JSON.stringify(chunk, null, 2)}`
|
332
|
-
: ''}`);
|
333
|
-
if (chunk.type === 'message_start') {
|
334
|
-
// Initial usage info
|
335
|
-
this.logger.debug(() => `message_start chunk: ${JSON.stringify(chunk, null, 2)}`);
|
336
|
-
if (chunk.message?.usage) {
|
337
|
-
const usage = chunk.message.usage;
|
338
|
-
// Don't require both fields - Anthropic might send them separately
|
339
|
-
currentUsage = {
|
340
|
-
input_tokens: usage.input_tokens ?? 0,
|
341
|
-
output_tokens: usage.output_tokens ?? 0,
|
342
|
-
};
|
343
|
-
this.logger.debug(() => `Set currentUsage from message_start: ${JSON.stringify(currentUsage)}`);
|
344
|
-
yield {
|
345
|
-
role: 'assistant',
|
346
|
-
content: '',
|
347
|
-
usage: {
|
348
|
-
prompt_tokens: currentUsage.input_tokens,
|
349
|
-
completion_tokens: currentUsage.output_tokens,
|
350
|
-
total_tokens: currentUsage.input_tokens + currentUsage.output_tokens,
|
351
|
-
},
|
352
|
-
};
|
353
|
-
}
|
354
|
-
}
|
355
|
-
else if (chunk.type === 'content_block_start') {
|
356
|
-
// Handle tool use blocks
|
357
|
-
if (chunk.content_block.type === 'tool_use') {
|
358
|
-
currentToolCall = {
|
359
|
-
id: chunk.content_block.id,
|
360
|
-
name: chunk.content_block.name,
|
361
|
-
input: '',
|
362
|
-
};
|
363
|
-
}
|
364
|
-
}
|
365
|
-
else if (chunk.type === 'content_block_delta') {
|
366
|
-
// Yield content chunks
|
367
|
-
if (chunk.delta.type === 'text_delta') {
|
368
|
-
yield {
|
369
|
-
role: 'assistant',
|
370
|
-
content: chunk.delta.text,
|
371
|
-
};
|
372
|
-
}
|
373
|
-
else if (chunk.delta.type === 'input_json_delta' &&
|
374
|
-
currentToolCall) {
|
375
|
-
// Handle input deltas for tool calls
|
376
|
-
currentToolCall.input += chunk.delta.partial_json;
|
377
|
-
}
|
378
|
-
}
|
379
|
-
else if (chunk.type === 'content_block_stop') {
|
380
|
-
// Complete the tool call
|
381
|
-
if (currentToolCall) {
|
382
|
-
const toolCallResult = this.toolFormatter.fromProviderFormat({
|
383
|
-
id: currentToolCall.id,
|
384
|
-
type: 'tool_use',
|
385
|
-
name: currentToolCall.name,
|
386
|
-
input: currentToolCall.input
|
387
|
-
? JSON.parse(currentToolCall.input)
|
388
|
-
: undefined,
|
389
|
-
}, 'anthropic');
|
390
|
-
yield {
|
391
|
-
role: 'assistant',
|
392
|
-
content: '',
|
393
|
-
tool_calls: toolCallResult,
|
394
|
-
};
|
395
|
-
currentToolCall = undefined;
|
396
|
-
}
|
397
|
-
}
|
398
|
-
else if (chunk.type === 'message_delta') {
|
399
|
-
// Update usage if provided
|
400
|
-
if (chunk.usage) {
|
401
|
-
this.logger.debug(() => `message_delta usage: ${JSON.stringify(chunk.usage, null, 2)}`);
|
402
|
-
}
|
403
|
-
if (chunk.usage) {
|
404
|
-
// Anthropic may send partial usage data - merge with existing
|
405
|
-
currentUsage = {
|
406
|
-
input_tokens: chunk.usage.input_tokens ?? currentUsage?.input_tokens ?? 0,
|
407
|
-
output_tokens: chunk.usage.output_tokens ?? currentUsage?.output_tokens ?? 0,
|
408
|
-
};
|
409
|
-
this.logger.debug(() => `Updated currentUsage from message_delta: ${JSON.stringify(currentUsage)}`);
|
410
|
-
yield {
|
411
|
-
role: 'assistant',
|
412
|
-
content: '',
|
413
|
-
usage: {
|
414
|
-
prompt_tokens: currentUsage.input_tokens,
|
415
|
-
completion_tokens: currentUsage.output_tokens,
|
416
|
-
total_tokens: currentUsage.input_tokens + currentUsage.output_tokens,
|
417
|
-
},
|
418
|
-
};
|
419
|
-
}
|
420
|
-
}
|
421
|
-
else if (chunk.type === 'message_stop') {
|
422
|
-
// Final usage info
|
423
|
-
if (currentUsage) {
|
424
|
-
this.logger.debug(() => `Yielding final usage: ${JSON.stringify(currentUsage)}`);
|
425
|
-
yield {
|
426
|
-
role: 'assistant',
|
427
|
-
content: '',
|
428
|
-
usage: {
|
429
|
-
prompt_tokens: currentUsage.input_tokens,
|
430
|
-
completion_tokens: currentUsage.output_tokens,
|
431
|
-
total_tokens: currentUsage.input_tokens + currentUsage.output_tokens,
|
432
|
-
},
|
433
|
-
};
|
434
|
-
}
|
435
|
-
else {
|
436
|
-
this.logger.debug(() => 'No currentUsage data at message_stop');
|
437
|
-
}
|
438
|
-
}
|
439
|
-
}
|
440
|
-
}
|
441
|
-
else {
|
442
|
-
// Handle non-streaming response
|
443
|
-
const message = response;
|
444
|
-
let fullContent = '';
|
445
|
-
const toolCalls = [];
|
446
|
-
// Process content blocks
|
447
|
-
for (const content of message.content) {
|
448
|
-
if (content.type === 'text') {
|
449
|
-
fullContent += content.text;
|
450
|
-
}
|
451
|
-
else if (content.type === 'tool_use') {
|
452
|
-
toolCalls.push({
|
453
|
-
id: content.id,
|
454
|
-
type: 'function',
|
455
|
-
function: {
|
456
|
-
name: content.name,
|
457
|
-
arguments: JSON.stringify(content.input),
|
458
|
-
},
|
459
|
-
});
|
460
|
-
}
|
461
|
-
}
|
462
|
-
// Build response message
|
463
|
-
const responseMessage = {
|
464
|
-
role: ContentGeneratorRole.ASSISTANT,
|
465
|
-
content: fullContent,
|
466
|
-
};
|
467
|
-
if (toolCalls.length > 0) {
|
468
|
-
responseMessage.tool_calls = toolCalls;
|
469
|
-
}
|
470
|
-
if (message.usage) {
|
471
|
-
responseMessage.usage = {
|
472
|
-
prompt_tokens: message.usage.input_tokens,
|
473
|
-
completion_tokens: message.usage.output_tokens,
|
474
|
-
total_tokens: message.usage.input_tokens + message.usage.output_tokens,
|
475
|
-
};
|
476
|
-
}
|
477
|
-
yield responseMessage;
|
478
|
-
}
|
479
|
-
}
|
480
|
-
catch (error) {
|
481
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
482
|
-
throw new Error(`Anthropic API error: ${errorMessage}`);
|
483
|
-
}
|
484
|
-
}
|
485
164
|
setApiKey(apiKey) {
|
486
165
|
// Call base provider implementation
|
487
166
|
super.setApiKey(apiKey);
|
488
167
|
// Create a new Anthropic client with the updated API key
|
168
|
+
const resolvedBaseURL = this.providerConfig?.baseUrl || this.baseProviderConfig.baseURL;
|
489
169
|
this.anthropic = new Anthropic({
|
490
170
|
apiKey,
|
491
|
-
baseURL:
|
171
|
+
baseURL: resolvedBaseURL,
|
492
172
|
dangerouslyAllowBrowser: true,
|
493
173
|
});
|
494
174
|
}
|
495
175
|
setBaseUrl(baseUrl) {
|
496
|
-
//
|
497
|
-
this.baseURL = baseUrl && baseUrl.trim() !== '' ? baseUrl : undefined;
|
498
|
-
// Call base provider implementation
|
176
|
+
// Call base provider implementation which stores in ephemeral settings
|
499
177
|
super.setBaseUrl?.(baseUrl);
|
500
178
|
// Create a new Anthropic client with the updated (or cleared) base URL
|
501
179
|
// Will be updated with actual token in updateClientWithResolvedAuth
|
180
|
+
const resolvedBaseURL = this.getBaseURL();
|
502
181
|
this.anthropic = new Anthropic({
|
503
182
|
apiKey: '', // Empty string, will be replaced when auth is resolved
|
504
|
-
baseURL:
|
183
|
+
baseURL: resolvedBaseURL,
|
505
184
|
dangerouslyAllowBrowser: true,
|
506
185
|
});
|
507
186
|
}
|
@@ -514,8 +193,7 @@ ${llxprtPrompts}`;
|
|
514
193
|
catch (error) {
|
515
194
|
this.logger.debug(() => `Failed to persist model to SettingsService: ${error}`);
|
516
195
|
}
|
517
|
-
//
|
518
|
-
this.currentModel = modelId;
|
196
|
+
// No local caching - always look up from SettingsService
|
519
197
|
}
|
520
198
|
getCurrentModel() {
|
521
199
|
// Try to get from SettingsService first (source of truth)
|
@@ -529,11 +207,11 @@ ${llxprtPrompts}`;
|
|
529
207
|
catch (error) {
|
530
208
|
this.logger.debug(() => `Failed to get model from SettingsService: ${error}`);
|
531
209
|
}
|
532
|
-
//
|
533
|
-
return this.
|
210
|
+
// Always return from getDefaultModel, no caching
|
211
|
+
return this.getDefaultModel();
|
534
212
|
}
|
535
213
|
getDefaultModel() {
|
536
|
-
// Return
|
214
|
+
// Return hardcoded default - do NOT call getModel() to avoid circular dependency
|
537
215
|
return 'claude-sonnet-4-20250514';
|
538
216
|
}
|
539
217
|
/**
|
@@ -555,54 +233,6 @@ ${llxprtPrompts}`;
|
|
555
233
|
return 'claude-sonnet-4-latest';
|
556
234
|
}
|
557
235
|
}
|
558
|
-
/**
|
559
|
-
* Resolves a model ID that may contain "-latest" to the actual model ID.
|
560
|
-
* Caches the result to avoid frequent API calls.
|
561
|
-
*/
|
562
|
-
async resolveLatestModel(modelId) {
|
563
|
-
// If it's not a latest alias, return as-is
|
564
|
-
if (!modelId.endsWith('-latest')) {
|
565
|
-
return modelId;
|
566
|
-
}
|
567
|
-
// Check cache
|
568
|
-
const now = Date.now();
|
569
|
-
if (this.modelCache &&
|
570
|
-
now - this.modelCache.timestamp < this.modelCacheTTL) {
|
571
|
-
// Find the corresponding model from cache
|
572
|
-
const model = this.modelCache.models.find((m) => m.id === modelId);
|
573
|
-
if (model) {
|
574
|
-
// The latest aliases are synthetic, find the real model
|
575
|
-
const tier = modelId.includes('opus') ? 'opus' : 'sonnet';
|
576
|
-
const realModel = this.modelCache.models
|
577
|
-
.filter((m) => m.id.startsWith(`claude-${tier}-4-`) && !m.id.endsWith('-latest'))
|
578
|
-
.sort((a, b) => b.id.localeCompare(a.id))[0];
|
579
|
-
return realModel ? realModel.id : modelId;
|
580
|
-
}
|
581
|
-
}
|
582
|
-
try {
|
583
|
-
// Ensure client has proper auth before calling getModels
|
584
|
-
await this.updateClientWithResolvedAuth();
|
585
|
-
// Fetch fresh models
|
586
|
-
const models = await this.getModels();
|
587
|
-
this.modelCache = { models, timestamp: now };
|
588
|
-
// Find the real model for this latest alias
|
589
|
-
const tier = modelId.includes('opus') ? 'opus' : 'sonnet';
|
590
|
-
const realModel = models
|
591
|
-
.filter((m) => m.id.startsWith(`claude-${tier}-4-`) && !m.id.endsWith('-latest'))
|
592
|
-
.sort((a, b) => b.id.localeCompare(a.id))[0];
|
593
|
-
return realModel ? realModel.id : modelId;
|
594
|
-
}
|
595
|
-
catch (_error) {
|
596
|
-
// If we can't fetch models, just use simple fallback like Claude Code does
|
597
|
-
this.logger.debug(() => 'Failed to fetch models for latest resolution, using fallback');
|
598
|
-
if (modelId.includes('opus')) {
|
599
|
-
return 'opus';
|
600
|
-
}
|
601
|
-
else {
|
602
|
-
return 'sonnet'; // Default to sonnet like Claude Code
|
603
|
-
}
|
604
|
-
}
|
605
|
-
}
|
606
236
|
getMaxTokensForModel(modelId) {
|
607
237
|
// Handle latest aliases explicitly
|
608
238
|
if (modelId === 'claude-opus-4-latest' ||
|
@@ -637,88 +267,6 @@ ${llxprtPrompts}`;
|
|
637
267
|
// Default for Claude 3.x models
|
638
268
|
return 200000;
|
639
269
|
}
|
640
|
-
isRetryableError(error) {
|
641
|
-
if (!(error instanceof Error))
|
642
|
-
return false;
|
643
|
-
const errorMessage = error.message.toLowerCase();
|
644
|
-
if (error.message.includes('rate_limit_error'))
|
645
|
-
return true;
|
646
|
-
// Check for Anthropic-specific error patterns
|
647
|
-
if (error.message.includes('Anthropic API error:')) {
|
648
|
-
// Extract the actual error content
|
649
|
-
const match = error.message.match(/{"type":"error","error":({.*})}/);
|
650
|
-
if (match) {
|
651
|
-
try {
|
652
|
-
const errorData = JSON.parse(match[1]);
|
653
|
-
const errorType = errorData.type?.toLowerCase() || '';
|
654
|
-
const errorMsg = errorData.message?.toLowerCase() || '';
|
655
|
-
return this.retryableErrorMessages.some((retryable) => errorType.includes(retryable) || errorMsg.includes(retryable));
|
656
|
-
}
|
657
|
-
catch {
|
658
|
-
// If parsing fails, fall back to string matching
|
659
|
-
}
|
660
|
-
}
|
661
|
-
}
|
662
|
-
// Direct error message checking
|
663
|
-
return this.retryableErrorMessages.some((msg) => errorMessage.includes(msg));
|
664
|
-
}
|
665
|
-
/**
|
666
|
-
* Validates and potentially fixes the message history to ensure proper tool_use/tool_result pairing.
|
667
|
-
* This prevents the "tool_use ids were found without tool_result blocks" error after a failed request.
|
668
|
-
*/
|
669
|
-
validateAndFixMessages(messages) {
|
670
|
-
const fixedMessages = [];
|
671
|
-
let pendingToolCalls = [];
|
672
|
-
for (let i = 0; i < messages.length; i++) {
|
673
|
-
const msg = messages[i];
|
674
|
-
if (msg.role === 'assistant' && msg.tool_calls) {
|
675
|
-
// Track tool calls from assistant
|
676
|
-
fixedMessages.push(msg);
|
677
|
-
pendingToolCalls = msg.tool_calls.map((tc) => ({
|
678
|
-
id: tc.id,
|
679
|
-
name: tc.function.name,
|
680
|
-
}));
|
681
|
-
}
|
682
|
-
else if (msg.role === 'tool' && pendingToolCalls.length > 0) {
|
683
|
-
// Match tool results with pending tool calls
|
684
|
-
fixedMessages.push(msg);
|
685
|
-
// Remove the matched tool call
|
686
|
-
pendingToolCalls = pendingToolCalls.filter((tc) => tc.id !== msg.tool_call_id);
|
687
|
-
}
|
688
|
-
else if (msg.role === 'assistant' ||
|
689
|
-
msg.role === 'user' ||
|
690
|
-
msg.role === 'system') {
|
691
|
-
// If we have pending tool calls and encounter a non-tool message,
|
692
|
-
// we need to add dummy tool results to maintain consistency
|
693
|
-
if (pendingToolCalls.length > 0 && msg.role !== 'system') {
|
694
|
-
// Add dummy tool results for unmatched tool calls
|
695
|
-
for (const toolCall of pendingToolCalls) {
|
696
|
-
fixedMessages.push({
|
697
|
-
role: 'tool',
|
698
|
-
tool_call_id: toolCall.id,
|
699
|
-
content: 'Error: Tool execution was interrupted. Please retry.',
|
700
|
-
});
|
701
|
-
}
|
702
|
-
pendingToolCalls = [];
|
703
|
-
}
|
704
|
-
fixedMessages.push(msg);
|
705
|
-
}
|
706
|
-
else {
|
707
|
-
fixedMessages.push(msg);
|
708
|
-
}
|
709
|
-
}
|
710
|
-
// Handle any remaining pending tool calls at the end
|
711
|
-
if (pendingToolCalls.length > 0) {
|
712
|
-
for (const toolCall of pendingToolCalls) {
|
713
|
-
fixedMessages.push({
|
714
|
-
role: 'tool',
|
715
|
-
tool_call_id: toolCall.id,
|
716
|
-
content: 'Error: Tool execution was interrupted. Please retry.',
|
717
|
-
});
|
718
|
-
}
|
719
|
-
}
|
720
|
-
return fixedMessages;
|
721
|
-
}
|
722
270
|
/**
|
723
271
|
* Anthropic always requires payment (API key or OAuth)
|
724
272
|
*/
|
@@ -742,11 +290,31 @@ ${llxprtPrompts}`;
|
|
742
290
|
* @param params Parameters to merge with existing, or undefined to clear all
|
743
291
|
*/
|
744
292
|
setModelParams(params) {
|
293
|
+
const settingsService = getSettingsService();
|
745
294
|
if (params === undefined) {
|
746
|
-
|
295
|
+
// Clear all model params
|
296
|
+
settingsService.setProviderSetting(this.name, 'temperature', undefined);
|
297
|
+
settingsService.setProviderSetting(this.name, 'max_tokens', undefined);
|
298
|
+
settingsService.setProviderSetting(this.name, 'top_p', undefined);
|
299
|
+
settingsService.setProviderSetting(this.name, 'top_k', undefined);
|
747
300
|
}
|
748
301
|
else {
|
749
|
-
|
302
|
+
// Set each param individually
|
303
|
+
if ('temperature' in params) {
|
304
|
+
settingsService.setProviderSetting(this.name, 'temperature', params.temperature);
|
305
|
+
}
|
306
|
+
if ('max_tokens' in params) {
|
307
|
+
settingsService.setProviderSetting(this.name, 'max_tokens', params.max_tokens);
|
308
|
+
}
|
309
|
+
if ('top_p' in params) {
|
310
|
+
settingsService.setProviderSetting(this.name, 'top_p', params.top_p);
|
311
|
+
}
|
312
|
+
if ('top_k' in params) {
|
313
|
+
settingsService.setProviderSetting(this.name, 'top_k', params.top_k);
|
314
|
+
}
|
315
|
+
if ('stop_sequences' in params) {
|
316
|
+
settingsService.setProviderSetting(this.name, 'stop_sequences', params.stop_sequences);
|
317
|
+
}
|
750
318
|
}
|
751
319
|
}
|
752
320
|
/**
|
@@ -754,7 +322,24 @@ ${llxprtPrompts}`;
|
|
754
322
|
* @returns Current parameters or undefined if not set
|
755
323
|
*/
|
756
324
|
getModelParams() {
|
757
|
-
|
325
|
+
// Always get from SettingsService
|
326
|
+
const settingsService = getSettingsService();
|
327
|
+
const providerSettings = settingsService.getProviderSettings(this.name);
|
328
|
+
if (!providerSettings) {
|
329
|
+
return undefined;
|
330
|
+
}
|
331
|
+
const params = {};
|
332
|
+
if (providerSettings.temperature !== undefined)
|
333
|
+
params.temperature = providerSettings.temperature;
|
334
|
+
if (providerSettings.max_tokens !== undefined)
|
335
|
+
params.max_tokens = providerSettings.max_tokens;
|
336
|
+
if (providerSettings.top_p !== undefined)
|
337
|
+
params.top_p = providerSettings.top_p;
|
338
|
+
if (providerSettings.top_k !== undefined)
|
339
|
+
params.top_k = providerSettings.top_k;
|
340
|
+
if (providerSettings.stop_sequences !== undefined)
|
341
|
+
params.stop_sequences = providerSettings.stop_sequences;
|
342
|
+
return Object.keys(params).length > 0 ? params : undefined;
|
758
343
|
}
|
759
344
|
/**
|
760
345
|
* Override clearAuthCache to also clear cached auth key
|
@@ -770,5 +355,421 @@ ${llxprtPrompts}`;
|
|
770
355
|
async isAuthenticated() {
|
771
356
|
return super.isAuthenticated();
|
772
357
|
}
|
358
|
+
/**
|
359
|
+
* Detect the appropriate tool format for the current model/configuration
|
360
|
+
* @returns The detected tool format
|
361
|
+
*/
|
362
|
+
detectToolFormat() {
|
363
|
+
try {
|
364
|
+
const settingsService = getSettingsService();
|
365
|
+
// First check SettingsService for toolFormat override in provider settings
|
366
|
+
// Note: This is synchronous access to cached settings, not async
|
367
|
+
const currentSettings = settingsService['settings'];
|
368
|
+
const providerSettings = currentSettings?.providers?.[this.name];
|
369
|
+
const toolFormatOverride = providerSettings?.toolFormat;
|
370
|
+
// If explicitly set to a specific format (not 'auto'), use it
|
371
|
+
if (toolFormatOverride && toolFormatOverride !== 'auto') {
|
372
|
+
return toolFormatOverride;
|
373
|
+
}
|
374
|
+
// Auto-detect based on model name if set to 'auto' or not set
|
375
|
+
const modelName = this.getCurrentModel().toLowerCase();
|
376
|
+
// Check for GLM-4.5 models (glm-4.5, glm-4-5)
|
377
|
+
if (modelName.includes('glm-4.5') || modelName.includes('glm-4-5')) {
|
378
|
+
return 'qwen';
|
379
|
+
}
|
380
|
+
// Check for qwen models
|
381
|
+
if (modelName.includes('qwen')) {
|
382
|
+
return 'qwen';
|
383
|
+
}
|
384
|
+
// Default to 'anthropic' format
|
385
|
+
return 'anthropic';
|
386
|
+
}
|
387
|
+
catch (error) {
|
388
|
+
this.logger.debug(() => `Failed to detect tool format from SettingsService: ${error}`);
|
389
|
+
// Fallback detection without SettingsService
|
390
|
+
const modelName = this.getCurrentModel().toLowerCase();
|
391
|
+
if (modelName.includes('glm-4.5') || modelName.includes('glm-4-5')) {
|
392
|
+
return 'qwen';
|
393
|
+
}
|
394
|
+
if (modelName.includes('qwen')) {
|
395
|
+
return 'qwen';
|
396
|
+
}
|
397
|
+
return 'anthropic';
|
398
|
+
}
|
399
|
+
}
|
400
|
+
getToolFormat() {
|
401
|
+
// Use the same detection logic as detectToolFormat()
|
402
|
+
return this.detectToolFormat();
|
403
|
+
}
|
404
|
+
/**
|
405
|
+
* Normalize tool IDs from various formats to Anthropic format
|
406
|
+
* Handles IDs from OpenAI (call_xxx), Anthropic (toolu_xxx), and history (hist_tool_xxx)
|
407
|
+
*/
|
408
|
+
normalizeToAnthropicToolId(id) {
|
409
|
+
// If already in Anthropic format, return as-is
|
410
|
+
if (id.startsWith('toolu_')) {
|
411
|
+
return id;
|
412
|
+
}
|
413
|
+
// For history format, extract the UUID and add Anthropic prefix
|
414
|
+
if (id.startsWith('hist_tool_')) {
|
415
|
+
const uuid = id.substring('hist_tool_'.length);
|
416
|
+
return 'toolu_' + uuid;
|
417
|
+
}
|
418
|
+
// For OpenAI format, extract the UUID and add Anthropic prefix
|
419
|
+
if (id.startsWith('call_')) {
|
420
|
+
const uuid = id.substring('call_'.length);
|
421
|
+
return 'toolu_' + uuid;
|
422
|
+
}
|
423
|
+
// Unknown format - assume it's a raw UUID
|
424
|
+
return 'toolu_' + id;
|
425
|
+
}
|
426
|
+
/**
|
427
|
+
* Normalize tool IDs from Anthropic format to history format
|
428
|
+
*/
|
429
|
+
normalizeToHistoryToolId(id) {
|
430
|
+
// If already in history format, return as-is
|
431
|
+
if (id.startsWith('hist_tool_')) {
|
432
|
+
return id;
|
433
|
+
}
|
434
|
+
// For Anthropic format, extract the UUID and add history prefix
|
435
|
+
if (id.startsWith('toolu_')) {
|
436
|
+
const uuid = id.substring('toolu_'.length);
|
437
|
+
return 'hist_tool_' + uuid;
|
438
|
+
}
|
439
|
+
// For OpenAI format, extract the UUID and add history prefix
|
440
|
+
if (id.startsWith('call_')) {
|
441
|
+
const uuid = id.substring('call_'.length);
|
442
|
+
return 'hist_tool_' + uuid;
|
443
|
+
}
|
444
|
+
// Unknown format - assume it's a raw UUID
|
445
|
+
return 'hist_tool_' + id;
|
446
|
+
}
|
447
|
+
/**
|
448
|
+
* Generate chat completion with IContent interface
|
449
|
+
* Convert IContent directly to Anthropic API format
|
450
|
+
*/
|
451
|
+
async *generateChatCompletion(content, tools) {
|
452
|
+
// Convert IContent directly to Anthropic API format (no IMessage!)
|
453
|
+
const anthropicMessages = [];
|
454
|
+
// Extract system message if present
|
455
|
+
// let systemMessage: string | undefined;
|
456
|
+
// Filter out orphaned tool responses at the beginning of the conversation
|
457
|
+
// TODO: Investigate post-0.2.2 - These shouldn't be truly orphaned since the same
|
458
|
+
// history works with OpenAI/Cerebras. Likely Anthropic has stricter formatting
|
459
|
+
// requirements for tool responses that we're not fully meeting yet.
|
460
|
+
let startIndex = 0;
|
461
|
+
while (startIndex < content.length &&
|
462
|
+
content[startIndex].speaker === 'tool') {
|
463
|
+
this.logger.debug(() => `Skipping orphaned tool response at beginning of conversation`);
|
464
|
+
startIndex++;
|
465
|
+
}
|
466
|
+
const filteredContent = content.slice(startIndex);
|
467
|
+
// Group consecutive tool responses together for Anthropic API
|
468
|
+
let pendingToolResults = [];
|
469
|
+
const flushToolResults = () => {
|
470
|
+
if (pendingToolResults.length > 0) {
|
471
|
+
anthropicMessages.push({
|
472
|
+
role: 'user',
|
473
|
+
content: pendingToolResults,
|
474
|
+
});
|
475
|
+
pendingToolResults = [];
|
476
|
+
}
|
477
|
+
};
|
478
|
+
for (const c of filteredContent) {
|
479
|
+
if (c.speaker === 'human') {
|
480
|
+
// Flush any pending tool results before adding a human message
|
481
|
+
flushToolResults();
|
482
|
+
const textBlock = c.blocks.find((b) => b.type === 'text');
|
483
|
+
// Add text block as user message
|
484
|
+
anthropicMessages.push({
|
485
|
+
role: 'user',
|
486
|
+
content: textBlock?.text || '',
|
487
|
+
});
|
488
|
+
}
|
489
|
+
else if (c.speaker === 'ai') {
|
490
|
+
// Flush any pending tool results before adding an AI message
|
491
|
+
flushToolResults();
|
492
|
+
const textBlocks = c.blocks.filter((b) => b.type === 'text');
|
493
|
+
const toolCallBlocks = c.blocks.filter((b) => b.type === 'tool_call');
|
494
|
+
if (toolCallBlocks.length > 0) {
|
495
|
+
// Build content array with text and tool_use blocks
|
496
|
+
const contentArray = [];
|
497
|
+
// Add text if present
|
498
|
+
const contentText = textBlocks.map((b) => b.text).join('');
|
499
|
+
if (contentText) {
|
500
|
+
contentArray.push({ type: 'text', text: contentText });
|
501
|
+
}
|
502
|
+
// Add tool uses
|
503
|
+
for (const tc of toolCallBlocks) {
|
504
|
+
// Ensure parameters are an object, not a string
|
505
|
+
let parametersObj = tc.parameters;
|
506
|
+
if (typeof parametersObj === 'string') {
|
507
|
+
try {
|
508
|
+
parametersObj = JSON.parse(parametersObj);
|
509
|
+
}
|
510
|
+
catch (e) {
|
511
|
+
this.logger.debug(() => `Failed to parse tool parameters as JSON: ${e}`);
|
512
|
+
parametersObj = {};
|
513
|
+
}
|
514
|
+
}
|
515
|
+
contentArray.push({
|
516
|
+
type: 'tool_use',
|
517
|
+
id: this.normalizeToAnthropicToolId(tc.id),
|
518
|
+
name: tc.name,
|
519
|
+
input: parametersObj,
|
520
|
+
});
|
521
|
+
}
|
522
|
+
anthropicMessages.push({
|
523
|
+
role: 'assistant',
|
524
|
+
content: contentArray,
|
525
|
+
});
|
526
|
+
}
|
527
|
+
else {
|
528
|
+
// Text-only message
|
529
|
+
const contentText = textBlocks.map((b) => b.text).join('');
|
530
|
+
anthropicMessages.push({
|
531
|
+
role: 'assistant',
|
532
|
+
content: contentText,
|
533
|
+
});
|
534
|
+
}
|
535
|
+
}
|
536
|
+
else if (c.speaker === 'tool') {
|
537
|
+
const toolResponseBlock = c.blocks.find((b) => b.type === 'tool_response');
|
538
|
+
if (!toolResponseBlock) {
|
539
|
+
throw new Error('Tool content must have a tool_response block');
|
540
|
+
}
|
541
|
+
// Collect tool results to be grouped together
|
542
|
+
pendingToolResults.push({
|
543
|
+
type: 'tool_result',
|
544
|
+
tool_use_id: this.normalizeToAnthropicToolId(toolResponseBlock.callId),
|
545
|
+
content: JSON.stringify(toolResponseBlock.result),
|
546
|
+
});
|
547
|
+
}
|
548
|
+
else {
|
549
|
+
throw new Error(`Unknown speaker type: ${c.speaker}`);
|
550
|
+
}
|
551
|
+
}
|
552
|
+
// Flush any remaining tool results at the end
|
553
|
+
flushToolResults();
|
554
|
+
// Validate that all tool_results have corresponding tool_uses
|
555
|
+
// Anthropic requires strict pairing between tool_use and tool_result
|
556
|
+
const toolUseIds = new Set();
|
557
|
+
const toolResultIds = new Set();
|
558
|
+
for (const msg of anthropicMessages) {
|
559
|
+
if (msg.role === 'assistant' && Array.isArray(msg.content)) {
|
560
|
+
for (const block of msg.content) {
|
561
|
+
if (block.type === 'tool_use') {
|
562
|
+
toolUseIds.add(block.id);
|
563
|
+
}
|
564
|
+
}
|
565
|
+
}
|
566
|
+
else if (msg.role === 'user' && Array.isArray(msg.content)) {
|
567
|
+
for (const block of msg.content) {
|
568
|
+
if (block.type === 'tool_result') {
|
569
|
+
toolResultIds.add(block.tool_use_id);
|
570
|
+
}
|
571
|
+
}
|
572
|
+
}
|
573
|
+
}
|
574
|
+
// Remove orphaned tool results (results without corresponding tool uses)
|
575
|
+
const orphanedResults = Array.from(toolResultIds).filter((id) => !toolUseIds.has(id));
|
576
|
+
if (orphanedResults.length > 0) {
|
577
|
+
this.logger.debug(() => `Found ${orphanedResults.length} orphaned tool results, removing them`);
|
578
|
+
// Filter out messages that only contain orphaned tool results
|
579
|
+
const filteredMessages = anthropicMessages.filter((msg) => {
|
580
|
+
if (msg.role === 'user' && Array.isArray(msg.content)) {
|
581
|
+
const filteredContent = msg.content.filter((block) => block.type !== 'tool_result' ||
|
582
|
+
!orphanedResults.includes(block.tool_use_id));
|
583
|
+
if (filteredContent.length === 0) {
|
584
|
+
// Remove empty user messages
|
585
|
+
return false;
|
586
|
+
}
|
587
|
+
msg.content = filteredContent;
|
588
|
+
}
|
589
|
+
return true;
|
590
|
+
});
|
591
|
+
// Replace the messages array
|
592
|
+
anthropicMessages.length = 0;
|
593
|
+
anthropicMessages.push(...filteredMessages);
|
594
|
+
}
|
595
|
+
// Ensure the conversation starts with a valid message type
|
596
|
+
// Anthropic requires the first message to be from the user
|
597
|
+
if (anthropicMessages.length > 0 && anthropicMessages[0].role !== 'user') {
|
598
|
+
// If the first message is not from the user, add a minimal user message
|
599
|
+
this.logger.debug(() => `First message is not from user, adding placeholder user message`);
|
600
|
+
anthropicMessages.unshift({
|
601
|
+
role: 'user',
|
602
|
+
content: 'Continue the conversation',
|
603
|
+
});
|
604
|
+
}
|
605
|
+
// Ensure we have at least one message
|
606
|
+
if (anthropicMessages.length === 0) {
|
607
|
+
anthropicMessages.push({
|
608
|
+
role: 'user',
|
609
|
+
content: 'Hello',
|
610
|
+
});
|
611
|
+
}
|
612
|
+
// Convert Gemini format tools directly to Anthropic format using the new method
|
613
|
+
const anthropicTools = this.toolFormatter.convertGeminiToAnthropic(tools);
|
614
|
+
// Ensure authentication
|
615
|
+
await this.updateClientWithResolvedAuth();
|
616
|
+
// Check OAuth mode
|
617
|
+
const authToken = await this.getAuthToken();
|
618
|
+
const isOAuth = authToken && authToken.startsWith('sk-ant-oat');
|
619
|
+
// Get streaming setting from ephemeral settings (default: enabled)
|
620
|
+
const streamingSetting = this.providerConfig?.getEphemeralSettings?.()?.['streaming'];
|
621
|
+
const streamingEnabled = streamingSetting !== 'disabled';
|
622
|
+
// Build request with proper typing
|
623
|
+
const currentModel = this.getCurrentModel();
|
624
|
+
// Get the system prompt for non-OAuth mode
|
625
|
+
const userMemory = this.globalConfig?.getUserMemory
|
626
|
+
? this.globalConfig.getUserMemory()
|
627
|
+
: '';
|
628
|
+
// For OAuth mode, inject core system prompt as the first human message
|
629
|
+
if (isOAuth) {
|
630
|
+
const corePrompt = await getCoreSystemPromptAsync(userMemory, currentModel, undefined);
|
631
|
+
if (corePrompt) {
|
632
|
+
anthropicMessages.unshift({
|
633
|
+
role: 'user',
|
634
|
+
content: `<system>\n${corePrompt}\n</system>\n\nUser provided conversation begins here:`,
|
635
|
+
});
|
636
|
+
}
|
637
|
+
}
|
638
|
+
const systemPrompt = !isOAuth
|
639
|
+
? await getCoreSystemPromptAsync(userMemory, currentModel, undefined)
|
640
|
+
: undefined;
|
641
|
+
const requestBody = {
|
642
|
+
model: currentModel,
|
643
|
+
messages: anthropicMessages,
|
644
|
+
max_tokens: this.getMaxTokensForModel(currentModel),
|
645
|
+
stream: streamingEnabled,
|
646
|
+
...(this.getModelParams() || {}),
|
647
|
+
...(isOAuth
|
648
|
+
? {
|
649
|
+
system: "You are Claude Code, Anthropic's official CLI for Claude.",
|
650
|
+
}
|
651
|
+
: systemPrompt
|
652
|
+
? { system: systemPrompt }
|
653
|
+
: {}),
|
654
|
+
...(anthropicTools && anthropicTools.length > 0
|
655
|
+
? { tools: anthropicTools }
|
656
|
+
: {}),
|
657
|
+
};
|
658
|
+
// Debug log the tools being sent to Anthropic
|
659
|
+
if (anthropicTools && anthropicTools.length > 0) {
|
660
|
+
this.logger.debug(() => `[AnthropicProvider] Sending tools to API:`, {
|
661
|
+
toolCount: anthropicTools.length,
|
662
|
+
toolNames: anthropicTools.map((t) => t.name),
|
663
|
+
firstTool: anthropicTools[0],
|
664
|
+
requestHasTools: 'tools' in requestBody,
|
665
|
+
});
|
666
|
+
}
|
667
|
+
// Make the API call directly with type assertion
|
668
|
+
const response = await this.anthropic.messages.create(requestBody);
|
669
|
+
if (streamingEnabled) {
|
670
|
+
// Handle streaming response - response is already a Stream when streaming is enabled
|
671
|
+
const stream = response;
|
672
|
+
let currentToolCall;
|
673
|
+
for await (const chunk of stream) {
|
674
|
+
if (chunk.type === 'content_block_start') {
|
675
|
+
if (chunk.content_block.type === 'tool_use') {
|
676
|
+
currentToolCall = {
|
677
|
+
id: chunk.content_block.id,
|
678
|
+
name: chunk.content_block.name,
|
679
|
+
input: '',
|
680
|
+
};
|
681
|
+
}
|
682
|
+
}
|
683
|
+
else if (chunk.type === 'content_block_delta') {
|
684
|
+
if (chunk.delta.type === 'text_delta') {
|
685
|
+
// Emit text immediately as IContent
|
686
|
+
yield {
|
687
|
+
speaker: 'ai',
|
688
|
+
blocks: [{ type: 'text', text: chunk.delta.text }],
|
689
|
+
};
|
690
|
+
}
|
691
|
+
else if (chunk.delta.type === 'input_json_delta' &&
|
692
|
+
currentToolCall) {
|
693
|
+
currentToolCall.input += chunk.delta.partial_json;
|
694
|
+
// Check for double-escaping patterns
|
695
|
+
const detectedFormat = this.detectToolFormat();
|
696
|
+
logDoubleEscapingInChunk(chunk.delta.partial_json, currentToolCall.name, detectedFormat);
|
697
|
+
}
|
698
|
+
}
|
699
|
+
else if (chunk.type === 'content_block_stop') {
|
700
|
+
if (currentToolCall) {
|
701
|
+
// Process tool parameters with double-escape handling
|
702
|
+
const detectedFormat = this.detectToolFormat();
|
703
|
+
const processedParameters = processToolParameters(currentToolCall.input, currentToolCall.name, detectedFormat);
|
704
|
+
yield {
|
705
|
+
speaker: 'ai',
|
706
|
+
blocks: [
|
707
|
+
{
|
708
|
+
type: 'tool_call',
|
709
|
+
id: this.normalizeToHistoryToolId(currentToolCall.id),
|
710
|
+
name: currentToolCall.name,
|
711
|
+
parameters: processedParameters,
|
712
|
+
},
|
713
|
+
],
|
714
|
+
};
|
715
|
+
currentToolCall = undefined;
|
716
|
+
}
|
717
|
+
}
|
718
|
+
else if (chunk.type === 'message_delta' && chunk.usage) {
|
719
|
+
// Emit usage metadata
|
720
|
+
yield {
|
721
|
+
speaker: 'ai',
|
722
|
+
blocks: [],
|
723
|
+
metadata: {
|
724
|
+
usage: {
|
725
|
+
promptTokens: chunk.usage.input_tokens || 0,
|
726
|
+
completionTokens: chunk.usage.output_tokens || 0,
|
727
|
+
totalTokens: (chunk.usage.input_tokens || 0) +
|
728
|
+
(chunk.usage.output_tokens || 0),
|
729
|
+
},
|
730
|
+
},
|
731
|
+
};
|
732
|
+
}
|
733
|
+
}
|
734
|
+
}
|
735
|
+
else {
|
736
|
+
// Handle non-streaming response
|
737
|
+
const message = response;
|
738
|
+
const blocks = [];
|
739
|
+
// Process content blocks
|
740
|
+
const detectedFormat = this.detectToolFormat();
|
741
|
+
for (const contentBlock of message.content) {
|
742
|
+
if (contentBlock.type === 'text') {
|
743
|
+
blocks.push({ type: 'text', text: contentBlock.text });
|
744
|
+
}
|
745
|
+
else if (contentBlock.type === 'tool_use') {
|
746
|
+
// Process tool parameters with double-escape handling
|
747
|
+
const processedParameters = processToolParameters(JSON.stringify(contentBlock.input), contentBlock.name, detectedFormat);
|
748
|
+
blocks.push({
|
749
|
+
type: 'tool_call',
|
750
|
+
id: this.normalizeToHistoryToolId(contentBlock.id),
|
751
|
+
name: contentBlock.name,
|
752
|
+
parameters: processedParameters,
|
753
|
+
});
|
754
|
+
}
|
755
|
+
}
|
756
|
+
// Build response IContent
|
757
|
+
const result = {
|
758
|
+
speaker: 'ai',
|
759
|
+
blocks,
|
760
|
+
};
|
761
|
+
// Add usage metadata if present
|
762
|
+
if (message.usage) {
|
763
|
+
result.metadata = {
|
764
|
+
usage: {
|
765
|
+
promptTokens: message.usage.input_tokens,
|
766
|
+
completionTokens: message.usage.output_tokens,
|
767
|
+
totalTokens: message.usage.input_tokens + message.usage.output_tokens,
|
768
|
+
},
|
769
|
+
};
|
770
|
+
}
|
771
|
+
yield result;
|
772
|
+
}
|
773
|
+
}
|
773
774
|
}
|
774
775
|
//# sourceMappingURL=AnthropicProvider.js.map
|