converse-mcp-server 2.3.1 → 2.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +771 -738
- package/docs/API.md +10 -1
- package/docs/PROVIDERS.md +8 -4
- package/package.json +12 -12
- package/src/async/asyncJobStore.js +82 -52
- package/src/async/eventBus.js +25 -20
- package/src/async/fileCache.js +121 -40
- package/src/async/jobRunner.js +65 -39
- package/src/async/providerStreamNormalizer.js +203 -117
- package/src/config.js +374 -102
- package/src/continuationStore.js +32 -24
- package/src/index.js +45 -25
- package/src/prompts/helpPrompt.js +328 -305
- package/src/providers/anthropic.js +303 -119
- package/src/providers/codex.js +103 -45
- package/src/providers/deepseek.js +24 -8
- package/src/providers/google.js +337 -93
- package/src/providers/index.js +1 -1
- package/src/providers/interface.js +16 -11
- package/src/providers/mistral.js +179 -69
- package/src/providers/openai-compatible.js +231 -94
- package/src/providers/openai.js +1094 -914
- package/src/providers/openrouter-endpoints-client.js +220 -216
- package/src/providers/openrouter.js +426 -381
- package/src/providers/xai.js +153 -56
- package/src/resources/helpResource.js +70 -67
- package/src/router.js +95 -67
- package/src/services/summarizationService.js +51 -24
- package/src/systemPrompts.js +89 -89
- package/src/tools/cancelJob.js +31 -19
- package/src/tools/chat.js +997 -883
- package/src/tools/checkStatus.js +86 -65
- package/src/tools/consensus.js +400 -234
- package/src/tools/index.js +39 -16
- package/src/transport/httpTransport.js +82 -55
- package/src/utils/contextProcessor.js +54 -37
- package/src/utils/errorHandler.js +95 -45
- package/src/utils/fileValidator.js +107 -98
- package/src/utils/formatStatus.js +122 -64
- package/src/utils/logger.js +459 -449
- package/src/utils/pathUtils.js +2 -2
- package/src/utils/tokenLimiter.js +216 -216
|
@@ -29,25 +29,25 @@ import { ProviderError, ErrorCodes, StopReasons } from './interface.js';
|
|
|
29
29
|
*/
|
|
30
30
|
const STOP_REASON_MAP = {
|
|
31
31
|
// Standard OpenAI reasons
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
32
|
+
stop: StopReasons.STOP,
|
|
33
|
+
length: StopReasons.LENGTH,
|
|
34
|
+
max_tokens: StopReasons.LENGTH,
|
|
35
|
+
tool_calls: StopReasons.TOOL_USE,
|
|
36
|
+
function_call: StopReasons.TOOL_USE,
|
|
37
|
+
content_filter: StopReasons.CONTENT_FILTER,
|
|
38
38
|
|
|
39
39
|
// Provider-specific variations
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
40
|
+
finish: StopReasons.STOP,
|
|
41
|
+
complete: StopReasons.STOP,
|
|
42
|
+
completed: StopReasons.STOP,
|
|
43
|
+
token_limit: StopReasons.LENGTH,
|
|
44
|
+
token_limit_reached: StopReasons.LENGTH,
|
|
45
|
+
safety: StopReasons.SAFETY,
|
|
46
|
+
filtered: StopReasons.CONTENT_FILTER,
|
|
47
47
|
|
|
48
48
|
// Default
|
|
49
|
-
|
|
50
|
-
|
|
49
|
+
null: StopReasons.STOP,
|
|
50
|
+
undefined: StopReasons.STOP,
|
|
51
51
|
};
|
|
52
52
|
|
|
53
53
|
/**
|
|
@@ -71,22 +71,34 @@ function defaultValidateApiKey(apiKey) {
|
|
|
71
71
|
*/
|
|
72
72
|
function convertMessages(messages, providerName) {
|
|
73
73
|
if (!Array.isArray(messages)) {
|
|
74
|
-
throw new ProviderError(
|
|
74
|
+
throw new ProviderError(
|
|
75
|
+
'Messages must be an array',
|
|
76
|
+
ErrorCodes.INVALID_MESSAGES,
|
|
77
|
+
);
|
|
75
78
|
}
|
|
76
79
|
|
|
77
80
|
return messages.map((msg, index) => {
|
|
78
81
|
if (!msg || typeof msg !== 'object') {
|
|
79
|
-
throw new ProviderError(
|
|
82
|
+
throw new ProviderError(
|
|
83
|
+
`Message at index ${index} must be an object`,
|
|
84
|
+
ErrorCodes.INVALID_MESSAGE,
|
|
85
|
+
);
|
|
80
86
|
}
|
|
81
87
|
|
|
82
88
|
const { role, content } = msg;
|
|
83
89
|
|
|
84
90
|
if (!role || !['system', 'user', 'assistant'].includes(role)) {
|
|
85
|
-
throw new ProviderError(
|
|
91
|
+
throw new ProviderError(
|
|
92
|
+
`Invalid role "${role}" at message index ${index}`,
|
|
93
|
+
ErrorCodes.INVALID_ROLE,
|
|
94
|
+
);
|
|
86
95
|
}
|
|
87
96
|
|
|
88
97
|
if (!content) {
|
|
89
|
-
throw new ProviderError(
|
|
98
|
+
throw new ProviderError(
|
|
99
|
+
`Message content is required at index ${index}`,
|
|
100
|
+
ErrorCodes.MISSING_CONTENT,
|
|
101
|
+
);
|
|
90
102
|
}
|
|
91
103
|
|
|
92
104
|
// Handle complex content structure (array with text and images)
|
|
@@ -97,7 +109,7 @@ function convertMessages(messages, providerName) {
|
|
|
97
109
|
if (item.type === 'text') {
|
|
98
110
|
convertedContent.push({
|
|
99
111
|
type: 'text',
|
|
100
|
-
text: item.text
|
|
112
|
+
text: item.text,
|
|
101
113
|
});
|
|
102
114
|
} else if (item.type === 'image' && item.source) {
|
|
103
115
|
// Convert Anthropic/Claude format to OpenAI format
|
|
@@ -105,10 +117,12 @@ function convertMessages(messages, providerName) {
|
|
|
105
117
|
type: 'image_url',
|
|
106
118
|
image_url: {
|
|
107
119
|
url: `data:${item.source.media_type};base64,${item.source.data}`,
|
|
108
|
-
detail: 'auto'
|
|
109
|
-
}
|
|
120
|
+
detail: 'auto',
|
|
121
|
+
},
|
|
110
122
|
});
|
|
111
|
-
debugLog(
|
|
123
|
+
debugLog(
|
|
124
|
+
`[${providerName}] Converting image: ${item.source.media_type}, data length: ${item.source.data.length}`,
|
|
125
|
+
);
|
|
112
126
|
}
|
|
113
127
|
}
|
|
114
128
|
|
|
@@ -154,33 +168,90 @@ function resolveModelName(modelName, supportedModels) {
|
|
|
154
168
|
function handleApiError(error, providerName, resolvedModel) {
|
|
155
169
|
// Extract error details from different error formats
|
|
156
170
|
const status = error.response?.status || error.status;
|
|
157
|
-
const errorMessage =
|
|
171
|
+
const errorMessage =
|
|
172
|
+
error.response?.data?.error?.message || error.message || 'Unknown error';
|
|
158
173
|
const errorCode = error.response?.data?.error?.code || error.code;
|
|
159
174
|
|
|
160
175
|
// Map common error codes and status codes
|
|
161
|
-
if (
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
throw new ProviderError(
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
} else if (
|
|
172
|
-
|
|
176
|
+
if (
|
|
177
|
+
status === 401 ||
|
|
178
|
+
errorCode === 'invalid_api_key' ||
|
|
179
|
+
errorMessage?.includes('Invalid API key')
|
|
180
|
+
) {
|
|
181
|
+
throw new ProviderError(
|
|
182
|
+
`Invalid ${providerName} API key`,
|
|
183
|
+
ErrorCodes.INVALID_API_KEY,
|
|
184
|
+
error,
|
|
185
|
+
);
|
|
186
|
+
} else if (
|
|
187
|
+
status === 429 ||
|
|
188
|
+
error.type === 'rate_limit_error' ||
|
|
189
|
+
errorCode === 'rate_limit_exceeded' ||
|
|
190
|
+
errorMessage?.includes('Rate limit exceeded')
|
|
191
|
+
) {
|
|
192
|
+
throw new ProviderError(
|
|
193
|
+
`${providerName} rate limit exceeded`,
|
|
194
|
+
ErrorCodes.RATE_LIMIT_EXCEEDED,
|
|
195
|
+
error,
|
|
196
|
+
);
|
|
197
|
+
} else if (
|
|
198
|
+
status === 403 ||
|
|
199
|
+
errorCode === 'insufficient_quota' ||
|
|
200
|
+
errorMessage?.includes('quota exceeded')
|
|
201
|
+
) {
|
|
202
|
+
throw new ProviderError(
|
|
203
|
+
`${providerName} API quota exceeded`,
|
|
204
|
+
ErrorCodes.QUOTA_EXCEEDED,
|
|
205
|
+
error,
|
|
206
|
+
);
|
|
207
|
+
} else if (
|
|
208
|
+
status === 404 ||
|
|
209
|
+
errorCode === 'model_not_found' ||
|
|
210
|
+
(errorMessage?.includes('Model') && errorMessage?.includes('not found'))
|
|
211
|
+
) {
|
|
212
|
+
throw new ProviderError(
|
|
213
|
+
`Model ${resolvedModel} not found`,
|
|
214
|
+
ErrorCodes.MODEL_NOT_FOUND,
|
|
215
|
+
error,
|
|
216
|
+
);
|
|
217
|
+
} else if (
|
|
218
|
+
status === 400 &&
|
|
219
|
+
(errorMessage?.includes('Context length exceeded') ||
|
|
220
|
+
errorMessage?.includes('context'))
|
|
221
|
+
) {
|
|
222
|
+
throw new ProviderError(
|
|
223
|
+
'Context length exceeded for model',
|
|
224
|
+
ErrorCodes.CONTEXT_LENGTH_EXCEEDED,
|
|
225
|
+
error,
|
|
226
|
+
);
|
|
227
|
+
} else if (
|
|
228
|
+
error.type === 'invalid_request_error' ||
|
|
229
|
+
(status === 400 && !errorMessage?.includes('context'))
|
|
230
|
+
) {
|
|
231
|
+
throw new ProviderError(
|
|
232
|
+
`Invalid request: ${errorMessage}`,
|
|
233
|
+
ErrorCodes.INVALID_REQUEST,
|
|
234
|
+
error,
|
|
235
|
+
);
|
|
173
236
|
} else if (error.code === 'ETIMEDOUT' || error.code === 'ECONNABORTED') {
|
|
174
|
-
throw new ProviderError(
|
|
237
|
+
throw new ProviderError(
|
|
238
|
+
`${providerName} request timeout`,
|
|
239
|
+
ErrorCodes.TIMEOUT_ERROR,
|
|
240
|
+
error,
|
|
241
|
+
);
|
|
175
242
|
} else if (error.code?.startsWith('E') || errorMessage?.includes('network')) {
|
|
176
|
-
throw new ProviderError(
|
|
243
|
+
throw new ProviderError(
|
|
244
|
+
`${providerName} network error: ${errorMessage}`,
|
|
245
|
+
ErrorCodes.NETWORK_ERROR,
|
|
246
|
+
error,
|
|
247
|
+
);
|
|
177
248
|
}
|
|
178
249
|
|
|
179
250
|
// Generic error
|
|
180
251
|
throw new ProviderError(
|
|
181
252
|
`${providerName} API error: ${error.message || 'Unknown error'}`,
|
|
182
253
|
ErrorCodes.API_ERROR,
|
|
183
|
-
error
|
|
254
|
+
error,
|
|
184
255
|
);
|
|
185
256
|
}
|
|
186
257
|
|
|
@@ -199,7 +270,7 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
199
270
|
validateApiKey = defaultValidateApiKey,
|
|
200
271
|
transformRequest,
|
|
201
272
|
transformResponse,
|
|
202
|
-
defaultParams = {}
|
|
273
|
+
defaultParams = {},
|
|
203
274
|
} = providerConfig;
|
|
204
275
|
|
|
205
276
|
// Create custom error class for this provider
|
|
@@ -230,15 +301,22 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
230
301
|
} = options;
|
|
231
302
|
|
|
232
303
|
// Get API key from config or use provider default
|
|
233
|
-
const effectiveApiKey =
|
|
304
|
+
const effectiveApiKey =
|
|
305
|
+
config?.apiKeys?.[providerName.toLowerCase()] || apiKey;
|
|
234
306
|
|
|
235
307
|
// Validate API key
|
|
236
308
|
if (!effectiveApiKey) {
|
|
237
|
-
throw new CustomProviderError(
|
|
309
|
+
throw new CustomProviderError(
|
|
310
|
+
`${providerName} API key not configured`,
|
|
311
|
+
ErrorCodes.MISSING_API_KEY,
|
|
312
|
+
);
|
|
238
313
|
}
|
|
239
314
|
|
|
240
315
|
if (!validateApiKey(effectiveApiKey)) {
|
|
241
|
-
throw new CustomProviderError(
|
|
316
|
+
throw new CustomProviderError(
|
|
317
|
+
`Invalid ${providerName} API key format`,
|
|
318
|
+
ErrorCodes.INVALID_API_KEY,
|
|
319
|
+
);
|
|
242
320
|
}
|
|
243
321
|
|
|
244
322
|
// Initialize OpenAI client with custom configuration
|
|
@@ -248,8 +326,8 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
248
326
|
defaultHeaders: {
|
|
249
327
|
...customHeaders,
|
|
250
328
|
// Support dynamic headers from provider config
|
|
251
|
-
...(config?.providers?._customHeaders || {})
|
|
252
|
-
}
|
|
329
|
+
...(config?.providers?._customHeaders || {}),
|
|
330
|
+
},
|
|
253
331
|
};
|
|
254
332
|
|
|
255
333
|
// Add timeout if specified in model config
|
|
@@ -265,15 +343,16 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
265
343
|
const openaiMessages = convertMessages(messages, providerName);
|
|
266
344
|
|
|
267
345
|
// Check if messages contain images and if model supports them
|
|
268
|
-
const hasImages = messages.some(
|
|
269
|
-
|
|
270
|
-
|
|
346
|
+
const hasImages = messages.some(
|
|
347
|
+
(msg) =>
|
|
348
|
+
Array.isArray(msg.content) &&
|
|
349
|
+
msg.content.some((item) => item.type === 'image'),
|
|
271
350
|
);
|
|
272
351
|
|
|
273
352
|
if (hasImages && modelConfig.supportsImages === false) {
|
|
274
353
|
throw new CustomProviderError(
|
|
275
354
|
`Model ${resolvedModel} does not support images`,
|
|
276
|
-
ErrorCodes.INVALID_REQUEST
|
|
355
|
+
ErrorCodes.INVALID_REQUEST,
|
|
277
356
|
);
|
|
278
357
|
}
|
|
279
358
|
|
|
@@ -283,17 +362,24 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
283
362
|
messages: openaiMessages,
|
|
284
363
|
stream,
|
|
285
364
|
...defaultParams,
|
|
286
|
-
...otherOptions
|
|
365
|
+
...otherOptions,
|
|
287
366
|
};
|
|
288
367
|
|
|
289
368
|
// Add temperature if model supports it and not already set by defaultParams
|
|
290
|
-
if (
|
|
369
|
+
if (
|
|
370
|
+
modelConfig.supportsTemperature !== false &&
|
|
371
|
+
temperature !== undefined &&
|
|
372
|
+
!defaultParams.temperature
|
|
373
|
+
) {
|
|
291
374
|
requestPayload.temperature = Math.max(0, Math.min(2, temperature));
|
|
292
375
|
}
|
|
293
376
|
|
|
294
377
|
// Add max tokens if specified
|
|
295
378
|
if (maxTokens) {
|
|
296
|
-
requestPayload.max_tokens = Math.min(
|
|
379
|
+
requestPayload.max_tokens = Math.min(
|
|
380
|
+
maxTokens,
|
|
381
|
+
modelConfig.maxOutputTokens || 100000,
|
|
382
|
+
);
|
|
297
383
|
}
|
|
298
384
|
|
|
299
385
|
// Add usage reporting for streaming mode
|
|
@@ -306,16 +392,27 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
306
392
|
|
|
307
393
|
// Apply custom request transformation if provided
|
|
308
394
|
if (transformRequest) {
|
|
309
|
-
requestPayload = await transformRequest(requestPayload, {
|
|
395
|
+
requestPayload = await transformRequest(requestPayload, {
|
|
396
|
+
model: resolvedModel,
|
|
397
|
+
modelConfig,
|
|
398
|
+
});
|
|
310
399
|
}
|
|
311
400
|
|
|
312
401
|
// Handle streaming requests
|
|
313
402
|
if (stream && requestPayload.stream !== false) {
|
|
314
|
-
return this._createStreamingGenerator(
|
|
403
|
+
return this._createStreamingGenerator(
|
|
404
|
+
openai,
|
|
405
|
+
requestPayload,
|
|
406
|
+
resolvedModel,
|
|
407
|
+
modelConfig,
|
|
408
|
+
signal,
|
|
409
|
+
);
|
|
315
410
|
}
|
|
316
411
|
|
|
317
412
|
try {
|
|
318
|
-
debugLog(
|
|
413
|
+
debugLog(
|
|
414
|
+
`[${providerName}] Calling ${resolvedModel} with ${openaiMessages.length} messages`,
|
|
415
|
+
);
|
|
319
416
|
|
|
320
417
|
// Check if already aborted before making request
|
|
321
418
|
if (signal?.aborted) {
|
|
@@ -329,7 +426,8 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
329
426
|
if (signal) {
|
|
330
427
|
requestWithSignal.signal = signal;
|
|
331
428
|
}
|
|
332
|
-
const response =
|
|
429
|
+
const response =
|
|
430
|
+
await openai.chat.completions.create(requestWithSignal);
|
|
333
431
|
|
|
334
432
|
const responseTime = Date.now() - startTime;
|
|
335
433
|
debugLog(`[${providerName}] Response received in ${responseTime}ms`);
|
|
@@ -337,12 +435,18 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
337
435
|
// Extract response data
|
|
338
436
|
const choice = response.choices?.[0];
|
|
339
437
|
if (!choice) {
|
|
340
|
-
throw new CustomProviderError(
|
|
438
|
+
throw new CustomProviderError(
|
|
439
|
+
'No response choice received',
|
|
440
|
+
ErrorCodes.NO_RESPONSE_CHOICE,
|
|
441
|
+
);
|
|
341
442
|
}
|
|
342
443
|
|
|
343
444
|
const content = choice.message?.content;
|
|
344
445
|
if (!content) {
|
|
345
|
-
throw new CustomProviderError(
|
|
446
|
+
throw new CustomProviderError(
|
|
447
|
+
'No content in response',
|
|
448
|
+
ErrorCodes.NO_RESPONSE_CONTENT,
|
|
449
|
+
);
|
|
346
450
|
}
|
|
347
451
|
|
|
348
452
|
// Extract and normalize finish reason
|
|
@@ -361,13 +465,14 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
361
465
|
model: response.model || resolvedModel,
|
|
362
466
|
usage: {
|
|
363
467
|
input_tokens: usage.prompt_tokens || usage.input_tokens || 0,
|
|
364
|
-
output_tokens:
|
|
365
|
-
|
|
468
|
+
output_tokens:
|
|
469
|
+
usage.completion_tokens || usage.output_tokens || 0,
|
|
470
|
+
total_tokens: usage.total_tokens || 0,
|
|
366
471
|
},
|
|
367
472
|
response_time_ms: responseTime,
|
|
368
473
|
finish_reason: finishReason,
|
|
369
|
-
provider: providerName.toLowerCase()
|
|
370
|
-
}
|
|
474
|
+
provider: providerName.toLowerCase(),
|
|
475
|
+
},
|
|
371
476
|
};
|
|
372
477
|
|
|
373
478
|
// Apply custom response transformation if provided
|
|
@@ -376,7 +481,6 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
376
481
|
}
|
|
377
482
|
|
|
378
483
|
return result;
|
|
379
|
-
|
|
380
484
|
} catch (error) {
|
|
381
485
|
debugError(`[${providerName}] Error during API call:`, error);
|
|
382
486
|
|
|
@@ -398,8 +502,16 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
398
502
|
* @param {Object} modelConfig - Model configuration
|
|
399
503
|
* @returns {AsyncGenerator} - Streaming generator yielding events
|
|
400
504
|
*/
|
|
401
|
-
async *_createStreamingGenerator(
|
|
402
|
-
|
|
505
|
+
async *_createStreamingGenerator(
|
|
506
|
+
openai,
|
|
507
|
+
requestPayload,
|
|
508
|
+
resolvedModel,
|
|
509
|
+
modelConfig,
|
|
510
|
+
signal,
|
|
511
|
+
) {
|
|
512
|
+
debugLog(
|
|
513
|
+
`[${providerName}] Starting streaming for ${resolvedModel} with ${requestPayload.messages?.length} messages`,
|
|
514
|
+
);
|
|
403
515
|
|
|
404
516
|
const startTime = Date.now();
|
|
405
517
|
let totalContent = '';
|
|
@@ -418,7 +530,7 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
418
530
|
type: 'start',
|
|
419
531
|
timestamp: new Date().toISOString(),
|
|
420
532
|
model: resolvedModel,
|
|
421
|
-
provider: providerName.toLowerCase()
|
|
533
|
+
provider: providerName.toLowerCase(),
|
|
422
534
|
};
|
|
423
535
|
|
|
424
536
|
// Create streaming request with abort signal support
|
|
@@ -433,7 +545,9 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
433
545
|
try {
|
|
434
546
|
// Check for cancellation during stream processing
|
|
435
547
|
if (signal?.aborted) {
|
|
436
|
-
debugLog(
|
|
548
|
+
debugLog(
|
|
549
|
+
`[${providerName}] Stream aborted during processing: ${signal.reason || 'Cancelled'}`,
|
|
550
|
+
);
|
|
437
551
|
break;
|
|
438
552
|
}
|
|
439
553
|
const choice = chunk.choices?.[0];
|
|
@@ -446,7 +560,7 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
446
560
|
yield {
|
|
447
561
|
type: 'delta',
|
|
448
562
|
content,
|
|
449
|
-
timestamp: new Date().toISOString()
|
|
563
|
+
timestamp: new Date().toISOString(),
|
|
450
564
|
};
|
|
451
565
|
}
|
|
452
566
|
|
|
@@ -455,7 +569,7 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
455
569
|
yield {
|
|
456
570
|
type: 'thinking',
|
|
457
571
|
content: choice.delta.reasoning,
|
|
458
|
-
timestamp: new Date().toISOString()
|
|
572
|
+
timestamp: new Date().toISOString(),
|
|
459
573
|
};
|
|
460
574
|
}
|
|
461
575
|
|
|
@@ -474,15 +588,18 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
474
588
|
finalModel = chunk.model;
|
|
475
589
|
}
|
|
476
590
|
} catch (chunkError) {
|
|
477
|
-
debugError(
|
|
591
|
+
debugError(
|
|
592
|
+
`[${providerName}] Error processing stream chunk:`,
|
|
593
|
+
chunkError,
|
|
594
|
+
);
|
|
478
595
|
yield {
|
|
479
596
|
type: 'error',
|
|
480
597
|
error: {
|
|
481
598
|
message: `Chunk processing error: ${chunkError.message}`,
|
|
482
599
|
code: 'CHUNK_PROCESSING_ERROR',
|
|
483
|
-
recoverable: true
|
|
600
|
+
recoverable: true,
|
|
484
601
|
},
|
|
485
|
-
timestamp: new Date().toISOString()
|
|
602
|
+
timestamp: new Date().toISOString(),
|
|
486
603
|
};
|
|
487
604
|
}
|
|
488
605
|
}
|
|
@@ -495,11 +612,13 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
495
612
|
yield {
|
|
496
613
|
type: 'usage',
|
|
497
614
|
usage: {
|
|
498
|
-
input_tokens:
|
|
499
|
-
|
|
500
|
-
|
|
615
|
+
input_tokens:
|
|
616
|
+
lastUsage.prompt_tokens || lastUsage.input_tokens || 0,
|
|
617
|
+
output_tokens:
|
|
618
|
+
lastUsage.completion_tokens || lastUsage.output_tokens || 0,
|
|
619
|
+
total_tokens: lastUsage.total_tokens || 0,
|
|
501
620
|
},
|
|
502
|
-
timestamp: new Date().toISOString()
|
|
621
|
+
timestamp: new Date().toISOString(),
|
|
503
622
|
};
|
|
504
623
|
}
|
|
505
624
|
|
|
@@ -510,21 +629,23 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
510
629
|
metadata: {
|
|
511
630
|
model: finalModel,
|
|
512
631
|
usage: {
|
|
513
|
-
input_tokens:
|
|
514
|
-
|
|
515
|
-
|
|
632
|
+
input_tokens:
|
|
633
|
+
lastUsage?.prompt_tokens || lastUsage?.input_tokens || 0,
|
|
634
|
+
output_tokens:
|
|
635
|
+
lastUsage?.completion_tokens || lastUsage?.output_tokens || 0,
|
|
636
|
+
total_tokens: lastUsage?.total_tokens || 0,
|
|
516
637
|
},
|
|
517
638
|
response_time_ms: responseTime,
|
|
518
639
|
finish_reason: finishReason || 'stop',
|
|
519
|
-
provider: providerName.toLowerCase()
|
|
520
|
-
}
|
|
640
|
+
provider: providerName.toLowerCase(),
|
|
641
|
+
},
|
|
521
642
|
};
|
|
522
643
|
|
|
523
644
|
if (transformResponse) {
|
|
524
645
|
const mockRawResponse = {
|
|
525
646
|
choices: [{ finish_reason: finishReason }],
|
|
526
647
|
usage: lastUsage,
|
|
527
|
-
model: finalModel
|
|
648
|
+
model: finalModel,
|
|
528
649
|
};
|
|
529
650
|
finalResult = await transformResponse(finalResult, mockRawResponse);
|
|
530
651
|
}
|
|
@@ -535,9 +656,8 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
535
656
|
content: totalContent,
|
|
536
657
|
stop_reason: finalResult.stop_reason,
|
|
537
658
|
metadata: finalResult.metadata,
|
|
538
|
-
timestamp: new Date().toISOString()
|
|
659
|
+
timestamp: new Date().toISOString(),
|
|
539
660
|
};
|
|
540
|
-
|
|
541
661
|
} catch (error) {
|
|
542
662
|
debugError(`[${providerName}] Streaming error:`, error);
|
|
543
663
|
|
|
@@ -550,10 +670,14 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
550
670
|
error: {
|
|
551
671
|
message: handledError.message,
|
|
552
672
|
code: handledError.code || 'STREAMING_ERROR',
|
|
553
|
-
recoverable: [
|
|
554
|
-
|
|
673
|
+
recoverable: [
|
|
674
|
+
ErrorCodes.RATE_LIMIT_EXCEEDED,
|
|
675
|
+
ErrorCodes.TIMEOUT_ERROR,
|
|
676
|
+
ErrorCodes.NETWORK_ERROR,
|
|
677
|
+
].includes(handledError.code),
|
|
678
|
+
originalError: error,
|
|
555
679
|
},
|
|
556
|
-
timestamp: new Date().toISOString()
|
|
680
|
+
timestamp: new Date().toISOString(),
|
|
557
681
|
};
|
|
558
682
|
|
|
559
683
|
// Re-throw to maintain existing error handling behavior
|
|
@@ -566,7 +690,8 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
566
690
|
* Validate configuration
|
|
567
691
|
*/
|
|
568
692
|
validateConfig(config) {
|
|
569
|
-
const effectiveApiKey =
|
|
693
|
+
const effectiveApiKey =
|
|
694
|
+
config?.apiKeys?.[providerName.toLowerCase()] || apiKey;
|
|
570
695
|
return !!(effectiveApiKey && validateApiKey(effectiveApiKey));
|
|
571
696
|
},
|
|
572
697
|
|
|
@@ -590,14 +715,18 @@ export function createOpenAICompatibleProvider(providerConfig) {
|
|
|
590
715
|
getModelConfig(modelName) {
|
|
591
716
|
const resolved = resolveModelName(modelName, supportedModels);
|
|
592
717
|
return supportedModels[resolved] || null;
|
|
593
|
-
}
|
|
718
|
+
},
|
|
594
719
|
};
|
|
595
720
|
}
|
|
596
721
|
|
|
597
722
|
/**
|
|
598
723
|
* Retry helper for rate-limited requests
|
|
599
724
|
*/
|
|
600
|
-
export async function retryWithBackoff(
|
|
725
|
+
export async function retryWithBackoff(
|
|
726
|
+
fn,
|
|
727
|
+
maxRetries = 3,
|
|
728
|
+
initialDelay = 1000,
|
|
729
|
+
) {
|
|
601
730
|
let lastError;
|
|
602
731
|
|
|
603
732
|
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
|
@@ -607,19 +736,27 @@ export async function retryWithBackoff(fn, maxRetries = 3, initialDelay = 1000)
|
|
|
607
736
|
lastError = error;
|
|
608
737
|
|
|
609
738
|
// Don't retry on non-retryable errors
|
|
610
|
-
if (
|
|
739
|
+
if (
|
|
740
|
+
error.code &&
|
|
741
|
+
![
|
|
742
|
+
ErrorCodes.RATE_LIMIT_EXCEEDED,
|
|
743
|
+
ErrorCodes.TIMEOUT_ERROR,
|
|
744
|
+
ErrorCodes.NETWORK_ERROR,
|
|
745
|
+
].includes(error.code)
|
|
746
|
+
) {
|
|
611
747
|
throw error;
|
|
612
748
|
}
|
|
613
749
|
|
|
614
750
|
// Wait before retrying
|
|
615
751
|
if (attempt < maxRetries - 1) {
|
|
616
752
|
const delay = initialDelay * Math.pow(2, attempt);
|
|
617
|
-
debugLog(
|
|
618
|
-
|
|
753
|
+
debugLog(
|
|
754
|
+
`Retrying after ${delay}ms (attempt ${attempt + 1}/${maxRetries})`,
|
|
755
|
+
);
|
|
756
|
+
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
619
757
|
}
|
|
620
758
|
}
|
|
621
759
|
}
|
|
622
760
|
|
|
623
761
|
throw lastError;
|
|
624
762
|
}
|
|
625
|
-
|