@llumiverse/drivers 1.0.0-dev.20260224.234313Z → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/cjs/bedrock/converse.js +86 -12
- package/lib/cjs/bedrock/converse.js.map +1 -1
- package/lib/cjs/bedrock/index.js +208 -1
- package/lib/cjs/bedrock/index.js.map +1 -1
- package/lib/cjs/groq/index.js +7 -4
- package/lib/cjs/groq/index.js.map +1 -1
- package/lib/cjs/openai/index.js +457 -26
- package/lib/cjs/openai/index.js.map +1 -1
- package/lib/cjs/openai/openai_compatible.js +1 -0
- package/lib/cjs/openai/openai_compatible.js.map +1 -1
- package/lib/cjs/vertexai/index.js +42 -0
- package/lib/cjs/vertexai/index.js.map +1 -1
- package/lib/cjs/vertexai/models/claude.js +230 -2
- package/lib/cjs/vertexai/models/claude.js.map +1 -1
- package/lib/cjs/vertexai/models/gemini.js +261 -41
- package/lib/cjs/vertexai/models/gemini.js.map +1 -1
- package/lib/cjs/vertexai/models.js +1 -1
- package/lib/cjs/vertexai/models.js.map +1 -1
- package/lib/esm/bedrock/converse.js +80 -6
- package/lib/esm/bedrock/converse.js.map +1 -1
- package/lib/esm/bedrock/index.js +207 -2
- package/lib/esm/bedrock/index.js.map +1 -1
- package/lib/esm/groq/index.js +7 -4
- package/lib/esm/groq/index.js.map +1 -1
- package/lib/esm/openai/index.js +456 -27
- package/lib/esm/openai/index.js.map +1 -1
- package/lib/esm/openai/openai_compatible.js +1 -0
- package/lib/esm/openai/openai_compatible.js.map +1 -1
- package/lib/esm/vertexai/index.js +43 -1
- package/lib/esm/vertexai/index.js.map +1 -1
- package/lib/esm/vertexai/models/claude.js +229 -3
- package/lib/esm/vertexai/models/claude.js.map +1 -1
- package/lib/esm/vertexai/models/gemini.js +262 -43
- package/lib/esm/vertexai/models/gemini.js.map +1 -1
- package/lib/esm/vertexai/models.js +1 -1
- package/lib/esm/vertexai/models.js.map +1 -1
- package/lib/types/bedrock/converse.d.ts +1 -2
- package/lib/types/bedrock/converse.d.ts.map +1 -1
- package/lib/types/bedrock/index.d.ts +53 -1
- package/lib/types/bedrock/index.d.ts.map +1 -1
- package/lib/types/openai/index.d.ts +96 -1
- package/lib/types/openai/index.d.ts.map +1 -1
- package/lib/types/openai/openai_compatible.d.ts +5 -0
- package/lib/types/openai/openai_compatible.d.ts.map +1 -1
- package/lib/types/openai/openai_format.d.ts +1 -1
- package/lib/types/vertexai/index.d.ts +11 -1
- package/lib/types/vertexai/index.d.ts.map +1 -1
- package/lib/types/vertexai/models/claude.d.ts +64 -1
- package/lib/types/vertexai/models/claude.d.ts.map +1 -1
- package/lib/types/vertexai/models/gemini.d.ts +61 -1
- package/lib/types/vertexai/models/gemini.d.ts.map +1 -1
- package/lib/types/vertexai/models.d.ts +6 -1
- package/lib/types/vertexai/models.d.ts.map +1 -1
- package/package.json +9 -9
- package/src/bedrock/converse.ts +85 -10
- package/src/bedrock/error-handling.test.ts +352 -0
- package/src/bedrock/index.ts +225 -1
- package/src/groq/index.ts +9 -4
- package/src/openai/error-handling.test.ts +567 -0
- package/src/openai/index.ts +505 -29
- package/src/openai/openai_compatible.ts +7 -0
- package/src/openai/openai_format.ts +1 -1
- package/src/vertexai/index.ts +56 -5
- package/src/vertexai/models/claude-error-handling.test.ts +432 -0
- package/src/vertexai/models/claude.ts +273 -7
- package/src/vertexai/models/gemini-error-handling.test.ts +353 -0
- package/src/vertexai/models/gemini.ts +304 -48
- package/src/vertexai/models.ts +7 -2
package/lib/esm/openai/index.js
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
import { AbstractDriver, ModelType, TrainingJobStatus, getConversationMeta, getModelCapabilities, incrementConversationTurn, modelModalitiesToArray, stripBase64ImagesFromConversation, supportsToolUse, truncateLargeTextInConversation, unwrapConversationArray, } from "@llumiverse/core";
|
|
1
|
+
import { AbstractDriver, LlumiverseError, ModelType, TrainingJobStatus, getConversationMeta, getModelCapabilities, incrementConversationTurn, modelModalitiesToArray, stripBase64ImagesFromConversation, stripHeartbeatsFromConversation, supportsToolUse, truncateLargeTextInConversation, unwrapConversationArray, } from "@llumiverse/core";
|
|
2
|
+
import { APIConnectionError, APIConnectionTimeoutError, APIError, AuthenticationError, BadRequestError, ConflictError, ContentFilterFinishReasonError, InternalServerError, LengthFinishReasonError, NotFoundError, OpenAIError, PermissionDeniedError, RateLimitError, UnprocessableEntityError, } from 'openai/error';
|
|
2
3
|
import { formatOpenAILikeMultimodalPrompt } from "./openai_format.js";
|
|
3
4
|
// Helper function to convert string to CompletionResult[]
|
|
4
5
|
function textToCompletionResult(text) {
|
|
@@ -20,13 +21,14 @@ export class BaseOpenAIDriver extends AbstractDriver {
|
|
|
20
21
|
extractDataFromResponse(_options, result) {
|
|
21
22
|
const tokenInfo = mapUsage(result.usage);
|
|
22
23
|
const tools = collectTools(result.output);
|
|
23
|
-
|
|
24
|
-
|
|
24
|
+
// Collect all parts in order (text and images)
|
|
25
|
+
const allResults = extractCompletionResults(result.output);
|
|
26
|
+
if (allResults.length === 0 && !tools) {
|
|
25
27
|
this.logger.error({ result }, "[OpenAI] Response is not valid");
|
|
26
28
|
throw new Error("Response is not valid: no data");
|
|
27
29
|
}
|
|
28
30
|
return {
|
|
29
|
-
result:
|
|
31
|
+
result: allResults,
|
|
30
32
|
token_usage: tokenInfo,
|
|
31
33
|
finish_reason: responseFinishReason(result, tools),
|
|
32
34
|
tool_use: tools,
|
|
@@ -37,9 +39,15 @@ export class BaseOpenAIDriver extends AbstractDriver {
|
|
|
37
39
|
this.logger.warn({ options: options.model_options }, "Invalid model options");
|
|
38
40
|
}
|
|
39
41
|
// Include conversation history (same as non-streaming)
|
|
40
|
-
|
|
42
|
+
// Fix orphaned function_call items (can occur when agent is stopped mid-tool-execution)
|
|
43
|
+
let conversation = fixOrphanedToolUse(updateConversation(options.conversation, prompt));
|
|
41
44
|
const toolDefs = getToolDefinitions(options.tools);
|
|
42
45
|
const useTools = toolDefs ? supportsToolUse(options.model, this.provider, true) : false;
|
|
46
|
+
// When no tools are provided but conversation contains function_call/function_call_output
|
|
47
|
+
// items (e.g. checkpoint summary calls), convert them to text to avoid API errors
|
|
48
|
+
if (!useTools) {
|
|
49
|
+
conversation = convertOpenAIFunctionItemsToText(conversation);
|
|
50
|
+
}
|
|
43
51
|
convertRoles(prompt, options.model);
|
|
44
52
|
const model_options = options.model_options;
|
|
45
53
|
insert_image_detail(prompt, model_options?.image_detail ?? "auto");
|
|
@@ -86,7 +94,13 @@ export class BaseOpenAIDriver extends AbstractDriver {
|
|
|
86
94
|
insert_image_detail(prompt, model_options?.image_detail ?? "auto");
|
|
87
95
|
const toolDefs = getToolDefinitions(options.tools);
|
|
88
96
|
const useTools = toolDefs ? supportsToolUse(options.model, this.provider) : false;
|
|
89
|
-
|
|
97
|
+
// Fix orphaned function_call items (can occur when agent is stopped mid-tool-execution)
|
|
98
|
+
let conversation = fixOrphanedToolUse(updateConversation(options.conversation, prompt));
|
|
99
|
+
// When no tools are provided but conversation contains function_call/function_call_output
|
|
100
|
+
// items (e.g. checkpoint summary calls), convert them to text to avoid API errors
|
|
101
|
+
if (!useTools) {
|
|
102
|
+
conversation = convertOpenAIFunctionItemsToText(conversation);
|
|
103
|
+
}
|
|
90
104
|
let parsedSchema = undefined;
|
|
91
105
|
let strictMode = false;
|
|
92
106
|
if (options.result_schema && supportsSchema(options.model)) {
|
|
@@ -136,10 +150,21 @@ export class BaseOpenAIDriver extends AbstractDriver {
|
|
|
136
150
|
let processedConversation = stripBase64ImagesFromConversation(conversation, stripOptions);
|
|
137
151
|
// Truncate large text content if configured
|
|
138
152
|
processedConversation = truncateLargeTextInConversation(processedConversation, stripOptions);
|
|
153
|
+
// Strip old heartbeat status messages
|
|
154
|
+
processedConversation = stripHeartbeatsFromConversation(processedConversation, {
|
|
155
|
+
keepForTurns: options.stripHeartbeatsAfterTurns ?? 1,
|
|
156
|
+
currentTurn,
|
|
157
|
+
});
|
|
139
158
|
completion.conversation = processedConversation;
|
|
140
159
|
return completion;
|
|
141
160
|
}
|
|
142
161
|
canStream(_options) {
|
|
162
|
+
// Image generation models don't support streaming
|
|
163
|
+
if (_options.model.includes("dall-e")
|
|
164
|
+
|| _options.model.includes("gpt-image")
|
|
165
|
+
|| _options.model.includes("chatgpt-image")) {
|
|
166
|
+
return Promise.resolve(false);
|
|
167
|
+
}
|
|
143
168
|
if (_options.model.includes("o1")
|
|
144
169
|
&& !(_options.model.includes("mini") || _options.model.includes("preview"))) {
|
|
145
170
|
//o1 full does not support streaming
|
|
@@ -187,6 +212,10 @@ export class BaseOpenAIDriver extends AbstractDriver {
|
|
|
187
212
|
};
|
|
188
213
|
let processedConversation = stripBase64ImagesFromConversation(conversation, stripOptions);
|
|
189
214
|
processedConversation = truncateLargeTextInConversation(processedConversation, stripOptions);
|
|
215
|
+
processedConversation = stripHeartbeatsFromConversation(processedConversation, {
|
|
216
|
+
keepForTurns: options.stripHeartbeatsAfterTurns ?? 1,
|
|
217
|
+
currentTurn,
|
|
218
|
+
});
|
|
190
219
|
return processedConversation;
|
|
191
220
|
}
|
|
192
221
|
createTrainingPrompt(options) {
|
|
@@ -240,7 +269,7 @@ export class BaseOpenAIDriver extends AbstractDriver {
|
|
|
240
269
|
//Some of these use the completions API instead of the chat completions API.
|
|
241
270
|
//Others are for non-text input modalities. Therefore common to both.
|
|
242
271
|
const wordBlacklist = ["embed", "whisper", "transcribe", "audio", "moderation", "tts",
|
|
243
|
-
"realtime", "
|
|
272
|
+
"realtime", "babbage", "davinci", "codex", "o1-pro", "computer-use", "sora"];
|
|
244
273
|
//OpenAI has very little information, filtering based on name.
|
|
245
274
|
result = result.filter((m) => {
|
|
246
275
|
return !wordBlacklist.some((word) => m.id.includes(word));
|
|
@@ -252,14 +281,17 @@ export class BaseOpenAIDriver extends AbstractDriver {
|
|
|
252
281
|
if (owner == "system") {
|
|
253
282
|
owner = "openai";
|
|
254
283
|
}
|
|
284
|
+
// Determine model type based on capabilities
|
|
285
|
+
let modelType = ModelType.Text;
|
|
286
|
+
if (m.id.includes("dall-e") || m.id.includes("gpt-image")) {
|
|
287
|
+
modelType = ModelType.Image;
|
|
288
|
+
}
|
|
255
289
|
return {
|
|
256
290
|
id: m.id,
|
|
257
291
|
name: m.id,
|
|
258
292
|
provider: this.provider,
|
|
259
293
|
owner: owner,
|
|
260
|
-
type:
|
|
261
|
-
can_stream: true,
|
|
262
|
-
is_multimodal: m.id.includes("gpt-4"),
|
|
294
|
+
type: modelType,
|
|
263
295
|
input_modalities: modelModalitiesToArray(modelCapability.input),
|
|
264
296
|
output_modalities: modelModalitiesToArray(modelCapability.output),
|
|
265
297
|
tool_support: modelCapability.tool_support,
|
|
@@ -284,6 +316,289 @@ export class BaseOpenAIDriver extends AbstractDriver {
|
|
|
284
316
|
}
|
|
285
317
|
return { values: embeddings, model };
|
|
286
318
|
}
|
|
319
|
+
imageModels = ["dall-e", "gpt-image", "chatgpt-image"];
|
|
320
|
+
/**
|
|
321
|
+
* Determine if a model is specifically an image generation model (not conversational image model)
|
|
322
|
+
*/
|
|
323
|
+
isImageModel(model) {
|
|
324
|
+
// DALL-E models are standalone image generation
|
|
325
|
+
// gpt-image models can generate images in conversations, not standalone
|
|
326
|
+
return this.imageModels.some(imageModel => model.includes(imageModel));
|
|
327
|
+
}
|
|
328
|
+
/**
|
|
329
|
+
* Request image generation from standalone Images API
|
|
330
|
+
* Supports: DALL-E 2, DALL-E 3, GPT-image models (for edit/variation)
|
|
331
|
+
*/
|
|
332
|
+
async requestImageGeneration(prompt, options) {
|
|
333
|
+
this.logger.debug(`[${this.provider}] Generating image with model ${options.model}`);
|
|
334
|
+
const model_options = options.model_options;
|
|
335
|
+
// Extract prompt text from ResponseInputItem[]
|
|
336
|
+
let promptText = "";
|
|
337
|
+
for (const item of prompt) {
|
|
338
|
+
if ('content' in item && typeof item.content === 'string') {
|
|
339
|
+
promptText += item.content + "\\n";
|
|
340
|
+
}
|
|
341
|
+
else if ('content' in item && Array.isArray(item.content)) {
|
|
342
|
+
// Extract text from content array
|
|
343
|
+
for (const part of item.content) {
|
|
344
|
+
if ('type' in part && part.type === 'input_text' && 'text' in part) {
|
|
345
|
+
promptText += part.text + "\\n";
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
}
|
|
349
|
+
}
|
|
350
|
+
promptText = promptText.trim();
|
|
351
|
+
try {
|
|
352
|
+
const generateParams = {
|
|
353
|
+
model: options.model,
|
|
354
|
+
prompt: promptText,
|
|
355
|
+
size: model_options?.size || "1024x1024",
|
|
356
|
+
};
|
|
357
|
+
// Add DALL-E specific options
|
|
358
|
+
if (options.model.includes("dall-e") || model_options?._option_id === "openai-dalle") {
|
|
359
|
+
const dalleOptions = model_options;
|
|
360
|
+
generateParams.n = dalleOptions?.n || 1;
|
|
361
|
+
generateParams.response_format = dalleOptions?.response_format || "b64_json";
|
|
362
|
+
if (options.model.includes("dall-e-3")) {
|
|
363
|
+
generateParams.quality = dalleOptions?.image_quality || "standard";
|
|
364
|
+
if (dalleOptions?.style) {
|
|
365
|
+
generateParams.style = dalleOptions.style;
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
}
|
|
369
|
+
else {
|
|
370
|
+
// Default for other models
|
|
371
|
+
generateParams.n = 1;
|
|
372
|
+
}
|
|
373
|
+
const response = await this.service.images.generate(generateParams);
|
|
374
|
+
// Convert response to CompletionResults
|
|
375
|
+
const results = [];
|
|
376
|
+
if (response.data) {
|
|
377
|
+
for (const image of response.data) {
|
|
378
|
+
let imageValue;
|
|
379
|
+
if (image.b64_json) {
|
|
380
|
+
// Base64 format
|
|
381
|
+
imageValue = `data:image/png;base64,${image.b64_json}`;
|
|
382
|
+
}
|
|
383
|
+
else if (image.url) {
|
|
384
|
+
// URL format
|
|
385
|
+
imageValue = image.url;
|
|
386
|
+
}
|
|
387
|
+
else {
|
|
388
|
+
continue;
|
|
389
|
+
}
|
|
390
|
+
results.push({
|
|
391
|
+
type: "image",
|
|
392
|
+
value: imageValue
|
|
393
|
+
});
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
return {
|
|
397
|
+
result: results
|
|
398
|
+
};
|
|
399
|
+
}
|
|
400
|
+
catch (error) {
|
|
401
|
+
this.logger.error({ error }, `[${this.provider}] Image generation failed`);
|
|
402
|
+
return {
|
|
403
|
+
result: [],
|
|
404
|
+
error: {
|
|
405
|
+
message: error.message,
|
|
406
|
+
code: error.code || 'GENERATION_FAILED'
|
|
407
|
+
}
|
|
408
|
+
};
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
/**
|
|
412
|
+
* Format OpenAI API errors into LlumiverseError with proper status codes and retryability.
|
|
413
|
+
*
|
|
414
|
+
* OpenAI API errors have a specific structure:
|
|
415
|
+
* - APIError.status: HTTP status code (400, 401, 403, 404, 409, 422, 429, 500+)
|
|
416
|
+
* - APIError.error: Error object with type, message, param, code
|
|
417
|
+
* - APIError.requestID: Request ID for support
|
|
418
|
+
* - APIError.code: Error code (e.g., 'invalid_api_key', 'rate_limit_exceeded')
|
|
419
|
+
* - APIError.param: Parameter that caused the error (optional)
|
|
420
|
+
* - APIError.type: Error type (optional)
|
|
421
|
+
*
|
|
422
|
+
* Common error types:
|
|
423
|
+
* - BadRequestError (400): Invalid request parameters
|
|
424
|
+
* - AuthenticationError (401): Invalid API key
|
|
425
|
+
* - PermissionDeniedError (403): Insufficient permissions
|
|
426
|
+
* - NotFoundError (404): Resource not found
|
|
427
|
+
* - ConflictError (409): Resource conflict
|
|
428
|
+
* - UnprocessableEntityError (422): Validation error
|
|
429
|
+
* - RateLimitError (429): Rate limit exceeded
|
|
430
|
+
* - InternalServerError (500+): Server-side errors
|
|
431
|
+
* - APIConnectionError: Connection issues (no status code)
|
|
432
|
+
* - APIConnectionTimeoutError: Request timeout (no status code)
|
|
433
|
+
* - LengthFinishReasonError: Response truncated due to length
|
|
434
|
+
* - ContentFilterFinishReasonError: Content filtered
|
|
435
|
+
*
|
|
436
|
+
* This implementation works for:
|
|
437
|
+
* - OpenAI API
|
|
438
|
+
* - Azure OpenAI
|
|
439
|
+
* - xAI (uses OpenAI-compatible API)
|
|
440
|
+
* - Azure Foundry (OpenAI-compatible)
|
|
441
|
+
* - Other OpenAI-compatible APIs
|
|
442
|
+
*
|
|
443
|
+
* @see https://platform.openai.com/docs/guides/error-codes
|
|
444
|
+
*/
|
|
445
|
+
formatLlumiverseError(error, context) {
|
|
446
|
+
// Check if it's an OpenAI API error
|
|
447
|
+
const isOpenAIError = this.isOpenAIApiError(error);
|
|
448
|
+
if (!isOpenAIError) {
|
|
449
|
+
// Not an OpenAI API error, use default handling
|
|
450
|
+
throw error;
|
|
451
|
+
}
|
|
452
|
+
const apiError = error;
|
|
453
|
+
const httpStatusCode = apiError.status;
|
|
454
|
+
// Extract error message
|
|
455
|
+
const message = apiError.message || String(error);
|
|
456
|
+
// Extract additional error details (only available on APIError)
|
|
457
|
+
const errorCode = apiError.code;
|
|
458
|
+
const errorParam = apiError.param;
|
|
459
|
+
const errorType = apiError.type;
|
|
460
|
+
// Build user-facing message with status code
|
|
461
|
+
let userMessage = message;
|
|
462
|
+
// Include status code in message (for end-user visibility)
|
|
463
|
+
if (httpStatusCode) {
|
|
464
|
+
userMessage = `[${httpStatusCode}] ${userMessage}`;
|
|
465
|
+
}
|
|
466
|
+
// Add error code if available and not already in message
|
|
467
|
+
if (errorCode && !userMessage.includes(errorCode)) {
|
|
468
|
+
userMessage += ` (code: ${errorCode})`;
|
|
469
|
+
}
|
|
470
|
+
// Add parameter info if available and helpful
|
|
471
|
+
if (errorParam && !userMessage.toLowerCase().includes(errorParam.toLowerCase())) {
|
|
472
|
+
userMessage += ` [param: ${errorParam}]`;
|
|
473
|
+
}
|
|
474
|
+
// Add request ID if available (useful for OpenAI support)
|
|
475
|
+
if (apiError.requestID) {
|
|
476
|
+
userMessage += ` (Request ID: ${apiError.requestID})`;
|
|
477
|
+
}
|
|
478
|
+
// Determine retryability based on OpenAI error types
|
|
479
|
+
const retryable = this.isOpenAIErrorRetryable(error, httpStatusCode, errorCode, errorType);
|
|
480
|
+
// Use the error constructor name as the error name
|
|
481
|
+
const errorName = error.constructor?.name || 'OpenAIError';
|
|
482
|
+
return new LlumiverseError(`[${context.provider}] ${userMessage}`, retryable, context, error, httpStatusCode, errorName);
|
|
483
|
+
}
|
|
484
|
+
/**
|
|
485
|
+
* Type guard to check if error is an OpenAI API error or OpenAI-specific error.
|
|
486
|
+
*/
|
|
487
|
+
isOpenAIApiError(error) {
|
|
488
|
+
return (error !== null &&
|
|
489
|
+
typeof error === 'object' &&
|
|
490
|
+
(error instanceof APIError || error instanceof OpenAIError));
|
|
491
|
+
}
|
|
492
|
+
/**
|
|
493
|
+
* Determine if an OpenAI API error is retryable.
|
|
494
|
+
*
|
|
495
|
+
* Retryable errors:
|
|
496
|
+
* - RateLimitError (429): Rate limit exceeded, retry with backoff
|
|
497
|
+
* - InternalServerError (500+): Server-side errors
|
|
498
|
+
* - APIConnectionTimeoutError: Request timeout
|
|
499
|
+
* - Error codes: 'timeout', 'server_error', 'service_unavailable'
|
|
500
|
+
* - Status codes: 408, 429, 502, 503, 504, 529, 5xx
|
|
501
|
+
*
|
|
502
|
+
* Non-retryable errors:
|
|
503
|
+
* - BadRequestError (400): Invalid request parameters
|
|
504
|
+
* - AuthenticationError (401): Invalid API key
|
|
505
|
+
* - PermissionDeniedError (403): Insufficient permissions
|
|
506
|
+
* - NotFoundError (404): Resource not found
|
|
507
|
+
* - ConflictError (409): Resource conflict
|
|
508
|
+
* - UnprocessableEntityError (422): Validation error
|
|
509
|
+
* - LengthFinishReasonError: Length limit reached
|
|
510
|
+
* - ContentFilterFinishReasonError: Content filtered
|
|
511
|
+
* - Error codes: 'invalid_api_key', 'invalid_request_error', 'model_not_found'
|
|
512
|
+
* - Other 4xx client errors
|
|
513
|
+
*
|
|
514
|
+
* @param error - The error object
|
|
515
|
+
* @param httpStatusCode - The HTTP status code if available
|
|
516
|
+
* @param errorCode - The error code if available
|
|
517
|
+
* @param errorType - The error type if available
|
|
518
|
+
* @returns True if retryable, false if not retryable, undefined if unknown
|
|
519
|
+
*/
|
|
520
|
+
isOpenAIErrorRetryable(error, httpStatusCode, errorCode, errorType) {
|
|
521
|
+
// Check specific OpenAI error types by class
|
|
522
|
+
if (error instanceof RateLimitError)
|
|
523
|
+
return true;
|
|
524
|
+
if (error instanceof InternalServerError)
|
|
525
|
+
return true;
|
|
526
|
+
if (error instanceof APIConnectionTimeoutError)
|
|
527
|
+
return true;
|
|
528
|
+
// Non-retryable by error type
|
|
529
|
+
if (error instanceof BadRequestError)
|
|
530
|
+
return false;
|
|
531
|
+
if (error instanceof AuthenticationError)
|
|
532
|
+
return false;
|
|
533
|
+
if (error instanceof PermissionDeniedError)
|
|
534
|
+
return false;
|
|
535
|
+
if (error instanceof NotFoundError)
|
|
536
|
+
return false;
|
|
537
|
+
if (error instanceof ConflictError)
|
|
538
|
+
return false;
|
|
539
|
+
if (error instanceof UnprocessableEntityError)
|
|
540
|
+
return false;
|
|
541
|
+
if (error instanceof LengthFinishReasonError)
|
|
542
|
+
return false;
|
|
543
|
+
if (error instanceof ContentFilterFinishReasonError)
|
|
544
|
+
return false;
|
|
545
|
+
// Check error codes (OpenAI specific)
|
|
546
|
+
if (errorCode) {
|
|
547
|
+
// Retryable error codes
|
|
548
|
+
if (errorCode === 'timeout')
|
|
549
|
+
return true;
|
|
550
|
+
if (errorCode === 'server_error')
|
|
551
|
+
return true;
|
|
552
|
+
if (errorCode === 'service_unavailable')
|
|
553
|
+
return true;
|
|
554
|
+
if (errorCode === 'rate_limit_exceeded')
|
|
555
|
+
return true;
|
|
556
|
+
// Non-retryable error codes
|
|
557
|
+
if (errorCode === 'invalid_api_key')
|
|
558
|
+
return false;
|
|
559
|
+
if (errorCode === 'invalid_request_error')
|
|
560
|
+
return false;
|
|
561
|
+
if (errorCode === 'model_not_found')
|
|
562
|
+
return false;
|
|
563
|
+
if (errorCode === 'insufficient_quota')
|
|
564
|
+
return false;
|
|
565
|
+
if (errorCode === 'invalid_model')
|
|
566
|
+
return false;
|
|
567
|
+
if (errorCode.includes('invalid_'))
|
|
568
|
+
return false;
|
|
569
|
+
}
|
|
570
|
+
// Check error type
|
|
571
|
+
if (errorType === 'invalid_request_error')
|
|
572
|
+
return false;
|
|
573
|
+
if (errorType === 'authentication_error')
|
|
574
|
+
return false;
|
|
575
|
+
// Use HTTP status code
|
|
576
|
+
if (httpStatusCode !== undefined) {
|
|
577
|
+
if (httpStatusCode === 429)
|
|
578
|
+
return true; // Rate limit
|
|
579
|
+
if (httpStatusCode === 408)
|
|
580
|
+
return true; // Request timeout
|
|
581
|
+
if (httpStatusCode === 502)
|
|
582
|
+
return true; // Bad gateway
|
|
583
|
+
if (httpStatusCode === 503)
|
|
584
|
+
return true; // Service unavailable
|
|
585
|
+
if (httpStatusCode === 504)
|
|
586
|
+
return true; // Gateway timeout
|
|
587
|
+
if (httpStatusCode === 529)
|
|
588
|
+
return true; // Overloaded
|
|
589
|
+
if (httpStatusCode >= 500 && httpStatusCode < 600)
|
|
590
|
+
return true; // Server errors
|
|
591
|
+
if (httpStatusCode >= 400 && httpStatusCode < 500)
|
|
592
|
+
return false; // Client errors
|
|
593
|
+
}
|
|
594
|
+
// Connection errors without status codes
|
|
595
|
+
if (error instanceof APIConnectionError && !(error instanceof APIConnectionTimeoutError)) {
|
|
596
|
+
// Generic connection errors might be retryable (network issues)
|
|
597
|
+
return true;
|
|
598
|
+
}
|
|
599
|
+
// Unknown error type - let consumer decide retry strategy
|
|
600
|
+
return undefined;
|
|
601
|
+
}
|
|
287
602
|
}
|
|
288
603
|
function jobInfo(job) {
|
|
289
604
|
//validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
|
|
@@ -484,6 +799,39 @@ function supportsSchema(model) {
|
|
|
484
799
|
}
|
|
485
800
|
return supportsToolUse(model, "openai");
|
|
486
801
|
}
|
|
802
|
+
/**
|
|
803
|
+
* Converts function_call and function_call_output items to text messages in OpenAI conversation.
|
|
804
|
+
* Preserves tool call information while removing structured items that require
|
|
805
|
+
* tools to be defined in the API request.
|
|
806
|
+
*/
|
|
807
|
+
export function convertOpenAIFunctionItemsToText(items) {
|
|
808
|
+
const hasFunctionItems = items.some(item => {
|
|
809
|
+
const type = item.type;
|
|
810
|
+
return type === 'function_call' || type === 'function_call_output';
|
|
811
|
+
});
|
|
812
|
+
if (!hasFunctionItems)
|
|
813
|
+
return items;
|
|
814
|
+
return items.map(item => {
|
|
815
|
+
const typed = item;
|
|
816
|
+
if (typed.type === 'function_call') {
|
|
817
|
+
const argsStr = typed.arguments || '';
|
|
818
|
+
const truncated = argsStr.length > 500 ? argsStr.substring(0, 500) + '...' : argsStr;
|
|
819
|
+
return {
|
|
820
|
+
role: 'assistant',
|
|
821
|
+
content: `[Tool call: ${typed.name}(${truncated})]`,
|
|
822
|
+
};
|
|
823
|
+
}
|
|
824
|
+
if (typed.type === 'function_call_output') {
|
|
825
|
+
const output = typed.output || 'No output';
|
|
826
|
+
const truncated = output.length > 500 ? output.substring(0, 500) + '...' : output;
|
|
827
|
+
return {
|
|
828
|
+
role: 'user',
|
|
829
|
+
content: `[Tool result: ${truncated}]`,
|
|
830
|
+
};
|
|
831
|
+
}
|
|
832
|
+
return item;
|
|
833
|
+
});
|
|
834
|
+
}
|
|
487
835
|
function getToolDefinitions(tools) {
|
|
488
836
|
return tools ? tools.map(getToolDefinition) : undefined;
|
|
489
837
|
}
|
|
@@ -544,6 +892,42 @@ export function collectTools(output) {
|
|
|
544
892
|
}
|
|
545
893
|
return tools.length > 0 ? tools : undefined;
|
|
546
894
|
}
|
|
895
|
+
/**
|
|
896
|
+
* Collect all parts (text and images) from response output in order.
|
|
897
|
+
* This preserves the original ordering of text and image parts.
|
|
898
|
+
*/
|
|
899
|
+
function extractCompletionResults(output) {
|
|
900
|
+
if (!output) {
|
|
901
|
+
return [];
|
|
902
|
+
}
|
|
903
|
+
const results = [];
|
|
904
|
+
for (const item of output) {
|
|
905
|
+
if (item.type === 'message') {
|
|
906
|
+
// Extract text from message content
|
|
907
|
+
for (const part of item.content) {
|
|
908
|
+
if (part.type === 'output_text' && part.text) {
|
|
909
|
+
results.push({
|
|
910
|
+
type: "text",
|
|
911
|
+
value: part.text
|
|
912
|
+
});
|
|
913
|
+
}
|
|
914
|
+
}
|
|
915
|
+
}
|
|
916
|
+
else if (item.type === 'image_generation_call' && 'result' in item && item.result) {
|
|
917
|
+
// GPT-image models return base64 encoded images in result field
|
|
918
|
+
const base64Data = item.result;
|
|
919
|
+
// Format as data URL for consistency with other image outputs
|
|
920
|
+
const imageUrl = base64Data.startsWith('data:')
|
|
921
|
+
? base64Data
|
|
922
|
+
: `data:image/png;base64,${base64Data}`;
|
|
923
|
+
results.push({
|
|
924
|
+
type: "image",
|
|
925
|
+
value: imageUrl
|
|
926
|
+
});
|
|
927
|
+
}
|
|
928
|
+
}
|
|
929
|
+
return results;
|
|
930
|
+
}
|
|
547
931
|
//For strict mode false
|
|
548
932
|
function limitedSchemaFormat(schema) {
|
|
549
933
|
const formattedSchema = { ...schema };
|
|
@@ -618,23 +1002,6 @@ function openAISchemaFormat(schema, nesting = 0) {
|
|
|
618
1002
|
}
|
|
619
1003
|
return formattedSchema;
|
|
620
1004
|
}
|
|
621
|
-
function extractTextFromResponse(response) {
|
|
622
|
-
if (response.output_text) {
|
|
623
|
-
return response.output_text;
|
|
624
|
-
}
|
|
625
|
-
const collected = [];
|
|
626
|
-
for (const item of response.output ?? []) {
|
|
627
|
-
if (item.type === 'message') {
|
|
628
|
-
const text = item.content
|
|
629
|
-
.map(part => part.type === 'output_text' ? part.text : '')
|
|
630
|
-
.join('');
|
|
631
|
-
if (text) {
|
|
632
|
-
collected.push(text);
|
|
633
|
-
}
|
|
634
|
-
}
|
|
635
|
-
}
|
|
636
|
-
return collected.join("\n");
|
|
637
|
-
}
|
|
638
1005
|
function responseFinishReason(response, tools) {
|
|
639
1006
|
if (tools && tools.length > 0) {
|
|
640
1007
|
return "tool_use";
|
|
@@ -650,6 +1017,68 @@ function responseFinishReason(response, tools) {
|
|
|
650
1017
|
}
|
|
651
1018
|
return 'stop';
|
|
652
1019
|
}
|
|
1020
|
+
/**
|
|
1021
|
+
* Fix orphaned function_call items in the OpenAI Responses API conversation.
|
|
1022
|
+
*
|
|
1023
|
+
* When an agent is stopped mid-tool-execution, the conversation may contain
|
|
1024
|
+
* function_call items without matching function_call_output items. The OpenAI
|
|
1025
|
+
* Responses API requires every function_call to have a matching function_call_output.
|
|
1026
|
+
*
|
|
1027
|
+
* This function detects such cases and injects synthetic function_call_output items
|
|
1028
|
+
* indicating the tools were interrupted, allowing the conversation to continue.
|
|
1029
|
+
*/
|
|
1030
|
+
export function fixOrphanedToolUse(items) {
|
|
1031
|
+
if (items.length < 2)
|
|
1032
|
+
return items;
|
|
1033
|
+
// First pass: collect all function_call_output call_ids
|
|
1034
|
+
const outputCallIds = new Set();
|
|
1035
|
+
for (const item of items) {
|
|
1036
|
+
if ('type' in item && item.type === 'function_call_output') {
|
|
1037
|
+
outputCallIds.add(item.call_id);
|
|
1038
|
+
}
|
|
1039
|
+
}
|
|
1040
|
+
// Second pass: build result, injecting synthetic outputs for orphaned function_calls
|
|
1041
|
+
const result = [];
|
|
1042
|
+
const pendingCalls = new Map(); // call_id -> tool name
|
|
1043
|
+
for (const item of items) {
|
|
1044
|
+
if ('type' in item && item.type === 'function_call') {
|
|
1045
|
+
const fc = item;
|
|
1046
|
+
// Only track if there's no matching output anywhere in the conversation
|
|
1047
|
+
if (!outputCallIds.has(fc.call_id)) {
|
|
1048
|
+
pendingCalls.set(fc.call_id, fc.name ?? 'unknown');
|
|
1049
|
+
}
|
|
1050
|
+
result.push(item);
|
|
1051
|
+
}
|
|
1052
|
+
else if ('type' in item && item.type === 'function_call_output') {
|
|
1053
|
+
result.push(item);
|
|
1054
|
+
}
|
|
1055
|
+
else {
|
|
1056
|
+
// Before any non-function item, flush pending orphaned calls
|
|
1057
|
+
if (pendingCalls.size > 0) {
|
|
1058
|
+
for (const [callId, toolName] of pendingCalls) {
|
|
1059
|
+
result.push({
|
|
1060
|
+
type: 'function_call_output',
|
|
1061
|
+
call_id: callId,
|
|
1062
|
+
output: `[Tool interrupted: The user stopped the operation before "${toolName}" could execute.]`,
|
|
1063
|
+
});
|
|
1064
|
+
}
|
|
1065
|
+
pendingCalls.clear();
|
|
1066
|
+
}
|
|
1067
|
+
result.push(item);
|
|
1068
|
+
}
|
|
1069
|
+
}
|
|
1070
|
+
// Handle trailing orphans at the end of the conversation
|
|
1071
|
+
if (pendingCalls.size > 0) {
|
|
1072
|
+
for (const [callId, toolName] of pendingCalls) {
|
|
1073
|
+
result.push({
|
|
1074
|
+
type: 'function_call_output',
|
|
1075
|
+
call_id: callId,
|
|
1076
|
+
output: `[Tool interrupted: The user stopped the operation before "${toolName}" could execute.]`,
|
|
1077
|
+
});
|
|
1078
|
+
}
|
|
1079
|
+
}
|
|
1080
|
+
return result;
|
|
1081
|
+
}
|
|
653
1082
|
function safeJsonParse(value) {
|
|
654
1083
|
if (typeof value !== 'string') {
|
|
655
1084
|
return value;
|