@llumiverse/drivers 1.0.0-dev.20260202.145450Z → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/cjs/adobe/firefly.js +120 -0
- package/lib/cjs/adobe/firefly.js.map +1 -0
- package/lib/cjs/azure/azure_foundry.js +432 -0
- package/lib/cjs/azure/azure_foundry.js.map +1 -0
- package/lib/cjs/bedrock/converse.js +359 -0
- package/lib/cjs/bedrock/converse.js.map +1 -0
- package/lib/cjs/bedrock/index.js +1441 -0
- package/lib/cjs/bedrock/index.js.map +1 -0
- package/lib/cjs/bedrock/nova-image-payload.js +207 -0
- package/lib/cjs/bedrock/nova-image-payload.js.map +1 -0
- package/lib/cjs/bedrock/payloads.js +3 -0
- package/lib/cjs/bedrock/payloads.js.map +1 -0
- package/lib/cjs/bedrock/s3.js +107 -0
- package/lib/cjs/bedrock/s3.js.map +1 -0
- package/lib/cjs/bedrock/twelvelabs.js +87 -0
- package/lib/cjs/bedrock/twelvelabs.js.map +1 -0
- package/lib/cjs/groq/index.js +326 -0
- package/lib/cjs/groq/index.js.map +1 -0
- package/lib/cjs/huggingface_ie.js +201 -0
- package/lib/cjs/huggingface_ie.js.map +1 -0
- package/lib/cjs/index.js +31 -0
- package/lib/cjs/index.js.map +1 -0
- package/lib/cjs/mistral/index.js +176 -0
- package/lib/cjs/mistral/index.js.map +1 -0
- package/lib/cjs/mistral/types.js +83 -0
- package/lib/cjs/mistral/types.js.map +1 -0
- package/lib/cjs/openai/azure_openai.js +72 -0
- package/lib/cjs/openai/azure_openai.js.map +1 -0
- package/lib/cjs/openai/index.js +1100 -0
- package/lib/cjs/openai/index.js.map +1 -0
- package/lib/cjs/openai/openai.js +21 -0
- package/lib/cjs/openai/openai.js.map +1 -0
- package/lib/cjs/openai/openai_compatible.js +63 -0
- package/lib/cjs/openai/openai_compatible.js.map +1 -0
- package/lib/cjs/openai/openai_format.js +131 -0
- package/lib/cjs/openai/openai_format.js.map +1 -0
- package/lib/cjs/package.json +3 -0
- package/lib/cjs/replicate.js +275 -0
- package/lib/cjs/replicate.js.map +1 -0
- package/lib/cjs/test-driver/TestErrorCompletionStream.js +20 -0
- package/lib/cjs/test-driver/TestErrorCompletionStream.js.map +1 -0
- package/lib/cjs/test-driver/TestValidationErrorCompletionStream.js +24 -0
- package/lib/cjs/test-driver/TestValidationErrorCompletionStream.js.map +1 -0
- package/lib/cjs/test-driver/index.js +109 -0
- package/lib/cjs/test-driver/index.js.map +1 -0
- package/lib/cjs/test-driver/utils.js +30 -0
- package/lib/cjs/test-driver/utils.js.map +1 -0
- package/lib/cjs/togetherai/index.js +126 -0
- package/lib/cjs/togetherai/index.js.map +1 -0
- package/lib/cjs/togetherai/interfaces.js +3 -0
- package/lib/cjs/togetherai/interfaces.js.map +1 -0
- package/lib/cjs/vertexai/debug.js +12 -0
- package/lib/cjs/vertexai/debug.js.map +1 -0
- package/lib/cjs/vertexai/embeddings/embeddings-image.js +27 -0
- package/lib/cjs/vertexai/embeddings/embeddings-image.js.map +1 -0
- package/lib/cjs/vertexai/embeddings/embeddings-text.js +23 -0
- package/lib/cjs/vertexai/embeddings/embeddings-text.js.map +1 -0
- package/lib/cjs/vertexai/index.js +635 -0
- package/lib/cjs/vertexai/index.js.map +1 -0
- package/lib/cjs/vertexai/models/claude.js +842 -0
- package/lib/cjs/vertexai/models/claude.js.map +1 -0
- package/lib/cjs/vertexai/models/gemini.js +1110 -0
- package/lib/cjs/vertexai/models/gemini.js.map +1 -0
- package/lib/cjs/vertexai/models/imagen.js +303 -0
- package/lib/cjs/vertexai/models/imagen.js.map +1 -0
- package/lib/cjs/vertexai/models/llama.js +183 -0
- package/lib/cjs/vertexai/models/llama.js.map +1 -0
- package/lib/cjs/vertexai/models.js +35 -0
- package/lib/cjs/vertexai/models.js.map +1 -0
- package/lib/cjs/watsonx/index.js +161 -0
- package/lib/cjs/watsonx/index.js.map +1 -0
- package/lib/cjs/watsonx/interfaces.js +3 -0
- package/lib/cjs/watsonx/interfaces.js.map +1 -0
- package/lib/cjs/xai/index.js +65 -0
- package/lib/cjs/xai/index.js.map +1 -0
- package/lib/esm/adobe/firefly.js +116 -0
- package/lib/esm/adobe/firefly.js.map +1 -0
- package/lib/esm/azure/azure_foundry.js +426 -0
- package/lib/esm/azure/azure_foundry.js.map +1 -0
- package/lib/esm/bedrock/converse.js +352 -0
- package/lib/esm/bedrock/converse.js.map +1 -0
- package/lib/esm/bedrock/index.js +1434 -0
- package/lib/esm/bedrock/index.js.map +1 -0
- package/lib/esm/bedrock/nova-image-payload.js +203 -0
- package/lib/esm/bedrock/nova-image-payload.js.map +1 -0
- package/lib/esm/bedrock/payloads.js +2 -0
- package/lib/esm/bedrock/payloads.js.map +1 -0
- package/lib/esm/bedrock/s3.js +99 -0
- package/lib/esm/bedrock/s3.js.map +1 -0
- package/lib/esm/bedrock/twelvelabs.js +84 -0
- package/lib/esm/bedrock/twelvelabs.js.map +1 -0
- package/lib/esm/groq/index.js +319 -0
- package/lib/esm/groq/index.js.map +1 -0
- package/lib/esm/huggingface_ie.js +197 -0
- package/lib/esm/huggingface_ie.js.map +1 -0
- package/lib/esm/index.js +15 -0
- package/lib/esm/index.js.map +1 -0
- package/lib/esm/mistral/index.js +172 -0
- package/lib/esm/mistral/index.js.map +1 -0
- package/lib/esm/mistral/types.js +80 -0
- package/lib/esm/mistral/types.js.map +1 -0
- package/lib/esm/openai/azure_openai.js +68 -0
- package/lib/esm/openai/azure_openai.js.map +1 -0
- package/lib/esm/openai/index.js +1093 -0
- package/lib/esm/openai/index.js.map +1 -0
- package/lib/esm/openai/openai.js +14 -0
- package/lib/esm/openai/openai.js.map +1 -0
- package/lib/esm/openai/openai_compatible.js +56 -0
- package/lib/esm/openai/openai_compatible.js.map +1 -0
- package/lib/esm/openai/openai_format.js +127 -0
- package/lib/esm/openai/openai_format.js.map +1 -0
- package/lib/esm/replicate.js +268 -0
- package/lib/esm/replicate.js.map +1 -0
- package/lib/esm/test-driver/TestErrorCompletionStream.js +16 -0
- package/lib/esm/test-driver/TestErrorCompletionStream.js.map +1 -0
- package/lib/esm/test-driver/TestValidationErrorCompletionStream.js +20 -0
- package/lib/esm/test-driver/TestValidationErrorCompletionStream.js.map +1 -0
- package/lib/esm/test-driver/index.js +91 -0
- package/lib/esm/test-driver/index.js.map +1 -0
- package/lib/esm/test-driver/utils.js +25 -0
- package/lib/esm/test-driver/utils.js.map +1 -0
- package/lib/esm/togetherai/index.js +122 -0
- package/lib/esm/togetherai/index.js.map +1 -0
- package/lib/esm/togetherai/interfaces.js +2 -0
- package/lib/esm/togetherai/interfaces.js.map +1 -0
- package/lib/esm/vertexai/debug.js +6 -0
- package/lib/esm/vertexai/debug.js.map +1 -0
- package/lib/esm/vertexai/embeddings/embeddings-image.js +24 -0
- package/lib/esm/vertexai/embeddings/embeddings-image.js.map +1 -0
- package/lib/esm/vertexai/embeddings/embeddings-text.js +20 -0
- package/lib/esm/vertexai/embeddings/embeddings-text.js.map +1 -0
- package/lib/esm/vertexai/index.js +630 -0
- package/lib/esm/vertexai/index.js.map +1 -0
- package/lib/esm/vertexai/models/claude.js +833 -0
- package/lib/esm/vertexai/models/claude.js.map +1 -0
- package/lib/esm/vertexai/models/gemini.js +1104 -0
- package/lib/esm/vertexai/models/gemini.js.map +1 -0
- package/lib/esm/vertexai/models/imagen.js +299 -0
- package/lib/esm/vertexai/models/imagen.js.map +1 -0
- package/lib/esm/vertexai/models/llama.js +179 -0
- package/lib/esm/vertexai/models/llama.js.map +1 -0
- package/lib/esm/vertexai/models.js +32 -0
- package/lib/esm/vertexai/models.js.map +1 -0
- package/lib/esm/watsonx/index.js +157 -0
- package/lib/esm/watsonx/index.js.map +1 -0
- package/lib/esm/watsonx/interfaces.js +2 -0
- package/lib/esm/watsonx/interfaces.js.map +1 -0
- package/lib/esm/xai/index.js +58 -0
- package/lib/esm/xai/index.js.map +1 -0
- package/lib/types/adobe/firefly.d.ts +30 -0
- package/lib/types/adobe/firefly.d.ts.map +1 -0
- package/lib/types/azure/azure_foundry.d.ts +52 -0
- package/lib/types/azure/azure_foundry.d.ts.map +1 -0
- package/lib/types/bedrock/converse.d.ts +8 -0
- package/lib/types/bedrock/converse.d.ts.map +1 -0
- package/lib/types/bedrock/index.d.ts +135 -0
- package/lib/types/bedrock/index.d.ts.map +1 -0
- package/lib/types/bedrock/nova-image-payload.d.ts +74 -0
- package/lib/types/bedrock/nova-image-payload.d.ts.map +1 -0
- package/lib/types/bedrock/payloads.d.ts +12 -0
- package/lib/types/bedrock/payloads.d.ts.map +1 -0
- package/lib/types/bedrock/s3.d.ts +23 -0
- package/lib/types/bedrock/s3.d.ts.map +1 -0
- package/lib/types/bedrock/twelvelabs.d.ts +50 -0
- package/lib/types/bedrock/twelvelabs.d.ts.map +1 -0
- package/lib/types/groq/index.d.ts +27 -0
- package/lib/types/groq/index.d.ts.map +1 -0
- package/lib/types/huggingface_ie.d.ts +35 -0
- package/lib/types/huggingface_ie.d.ts.map +1 -0
- package/lib/types/index.d.ts +15 -0
- package/lib/types/index.d.ts.map +1 -0
- package/lib/types/mistral/index.d.ts +25 -0
- package/lib/types/mistral/index.d.ts.map +1 -0
- package/lib/types/mistral/types.d.ts +127 -0
- package/lib/types/mistral/types.d.ts.map +1 -0
- package/lib/types/openai/azure_openai.d.ts +25 -0
- package/lib/types/openai/azure_openai.d.ts.map +1 -0
- package/lib/types/openai/index.d.ts +126 -0
- package/lib/types/openai/index.d.ts.map +1 -0
- package/lib/types/openai/openai.d.ts +15 -0
- package/lib/types/openai/openai.d.ts.map +1 -0
- package/lib/types/openai/openai_compatible.d.ts +31 -0
- package/lib/types/openai/openai_compatible.d.ts.map +1 -0
- package/lib/types/openai/openai_format.d.ts +21 -0
- package/lib/types/openai/openai_format.d.ts.map +1 -0
- package/lib/types/replicate.d.ts +48 -0
- package/lib/types/replicate.d.ts.map +1 -0
- package/lib/types/test-driver/TestErrorCompletionStream.d.ts +9 -0
- package/lib/types/test-driver/TestErrorCompletionStream.d.ts.map +1 -0
- package/lib/types/test-driver/TestValidationErrorCompletionStream.d.ts +9 -0
- package/lib/types/test-driver/TestValidationErrorCompletionStream.d.ts.map +1 -0
- package/lib/types/test-driver/index.d.ts +24 -0
- package/lib/types/test-driver/index.d.ts.map +1 -0
- package/lib/types/test-driver/utils.d.ts +5 -0
- package/lib/types/test-driver/utils.d.ts.map +1 -0
- package/lib/types/togetherai/index.d.ts +23 -0
- package/lib/types/togetherai/index.d.ts.map +1 -0
- package/lib/types/togetherai/interfaces.d.ts +96 -0
- package/lib/types/togetherai/interfaces.d.ts.map +1 -0
- package/lib/types/vertexai/debug.d.ts +2 -0
- package/lib/types/vertexai/debug.d.ts.map +1 -0
- package/lib/types/vertexai/embeddings/embeddings-image.d.ts +11 -0
- package/lib/types/vertexai/embeddings/embeddings-image.d.ts.map +1 -0
- package/lib/types/vertexai/embeddings/embeddings-text.d.ts +10 -0
- package/lib/types/vertexai/embeddings/embeddings-text.d.ts.map +1 -0
- package/lib/types/vertexai/index.d.ts +79 -0
- package/lib/types/vertexai/index.d.ts.map +1 -0
- package/lib/types/vertexai/models/claude.d.ts +103 -0
- package/lib/types/vertexai/models/claude.d.ts.map +1 -0
- package/lib/types/vertexai/models/gemini.d.ts +78 -0
- package/lib/types/vertexai/models/gemini.d.ts.map +1 -0
- package/lib/types/vertexai/models/imagen.d.ts +75 -0
- package/lib/types/vertexai/models/imagen.d.ts.map +1 -0
- package/lib/types/vertexai/models/llama.d.ts +20 -0
- package/lib/types/vertexai/models/llama.d.ts.map +1 -0
- package/lib/types/vertexai/models.d.ts +20 -0
- package/lib/types/vertexai/models.d.ts.map +1 -0
- package/lib/types/watsonx/index.d.ts +27 -0
- package/lib/types/watsonx/index.d.ts.map +1 -0
- package/lib/types/watsonx/interfaces.d.ts +65 -0
- package/lib/types/watsonx/interfaces.d.ts.map +1 -0
- package/lib/types/xai/index.d.ts +18 -0
- package/lib/types/xai/index.d.ts.map +1 -0
- package/package.json +18 -18
- package/src/bedrock/converse.ts +85 -10
- package/src/bedrock/error-handling.test.ts +352 -0
- package/src/bedrock/index.ts +293 -16
- package/src/groq/index.ts +9 -4
- package/src/mistral/index.ts +25 -22
- package/src/mistral/types.ts +0 -5
- package/src/openai/error-handling.test.ts +567 -0
- package/src/openai/index.ts +513 -33
- package/src/openai/openai_compatible.ts +7 -0
- package/src/openai/openai_format.ts +1 -1
- package/src/vertexai/index.ts +61 -13
- package/src/vertexai/models/claude-error-handling.test.ts +432 -0
- package/src/vertexai/models/claude.ts +287 -10
- package/src/vertexai/models/gemini-error-handling.test.ts +353 -0
- package/src/vertexai/models/gemini.ts +329 -52
- package/src/vertexai/models.ts +7 -2
|
@@ -0,0 +1,1100 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.BaseOpenAIDriver = void 0;
|
|
4
|
+
exports.convertOpenAIFunctionItemsToText = convertOpenAIFunctionItemsToText;
|
|
5
|
+
exports.collectTools = collectTools;
|
|
6
|
+
exports.fixOrphanedToolUse = fixOrphanedToolUse;
|
|
7
|
+
const core_1 = require("@llumiverse/core");
|
|
8
|
+
const error_1 = require("openai/error");
|
|
9
|
+
const openai_format_js_1 = require("./openai_format.js");
|
|
10
|
+
// Helper function to convert string to CompletionResult[]
|
|
11
|
+
function textToCompletionResult(text) {
|
|
12
|
+
return text ? [{ type: "text", value: text }] : [];
|
|
13
|
+
}
|
|
14
|
+
//TODO: Do we need a list?, replace with if statements and modernize?
|
|
15
|
+
const supportFineTunning = new Set([
|
|
16
|
+
"gpt-3.5-turbo-1106",
|
|
17
|
+
"gpt-3.5-turbo-0613",
|
|
18
|
+
"babbage-002",
|
|
19
|
+
"davinci-002",
|
|
20
|
+
"gpt-4-0613"
|
|
21
|
+
]);
|
|
22
|
+
class BaseOpenAIDriver extends core_1.AbstractDriver {
|
|
23
|
+
constructor(opts) {
|
|
24
|
+
super(opts);
|
|
25
|
+
this.formatPrompt = openai_format_js_1.formatOpenAILikeMultimodalPrompt;
|
|
26
|
+
}
|
|
27
|
+
extractDataFromResponse(_options, result) {
|
|
28
|
+
const tokenInfo = mapUsage(result.usage);
|
|
29
|
+
const tools = collectTools(result.output);
|
|
30
|
+
// Collect all parts in order (text and images)
|
|
31
|
+
const allResults = extractCompletionResults(result.output);
|
|
32
|
+
if (allResults.length === 0 && !tools) {
|
|
33
|
+
this.logger.error({ result }, "[OpenAI] Response is not valid");
|
|
34
|
+
throw new Error("Response is not valid: no data");
|
|
35
|
+
}
|
|
36
|
+
return {
|
|
37
|
+
result: allResults,
|
|
38
|
+
token_usage: tokenInfo,
|
|
39
|
+
finish_reason: responseFinishReason(result, tools),
|
|
40
|
+
tool_use: tools,
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
async requestTextCompletionStream(prompt, options) {
|
|
44
|
+
if (options.model_options?._option_id !== "openai-text" && options.model_options?._option_id !== "openai-thinking") {
|
|
45
|
+
this.logger.warn({ options: options.model_options }, "Invalid model options");
|
|
46
|
+
}
|
|
47
|
+
// Include conversation history (same as non-streaming)
|
|
48
|
+
// Fix orphaned function_call items (can occur when agent is stopped mid-tool-execution)
|
|
49
|
+
let conversation = fixOrphanedToolUse(updateConversation(options.conversation, prompt));
|
|
50
|
+
const toolDefs = getToolDefinitions(options.tools);
|
|
51
|
+
const useTools = toolDefs ? (0, core_1.supportsToolUse)(options.model, this.provider, true) : false;
|
|
52
|
+
// When no tools are provided but conversation contains function_call/function_call_output
|
|
53
|
+
// items (e.g. checkpoint summary calls), convert them to text to avoid API errors
|
|
54
|
+
if (!useTools) {
|
|
55
|
+
conversation = convertOpenAIFunctionItemsToText(conversation);
|
|
56
|
+
}
|
|
57
|
+
convertRoles(prompt, options.model);
|
|
58
|
+
const model_options = options.model_options;
|
|
59
|
+
insert_image_detail(prompt, model_options?.image_detail ?? "auto");
|
|
60
|
+
let parsedSchema = undefined;
|
|
61
|
+
let strictMode = false;
|
|
62
|
+
if (options.result_schema && supportsSchema(options.model)) {
|
|
63
|
+
try {
|
|
64
|
+
parsedSchema = openAISchemaFormat(options.result_schema);
|
|
65
|
+
strictMode = true;
|
|
66
|
+
}
|
|
67
|
+
catch (e) {
|
|
68
|
+
parsedSchema = limitedSchemaFormat(options.result_schema);
|
|
69
|
+
strictMode = false;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
const reasoning = model_options?.reasoning_effort ? { effort: model_options.reasoning_effort } : undefined;
|
|
73
|
+
const isReasoningModel = /\b(o1|o3|o4)\b/.test(options.model);
|
|
74
|
+
const stream = await this.service.responses.create({
|
|
75
|
+
stream: true,
|
|
76
|
+
model: options.model,
|
|
77
|
+
input: conversation,
|
|
78
|
+
reasoning,
|
|
79
|
+
temperature: isReasoningModel ? undefined : model_options?.temperature,
|
|
80
|
+
top_p: isReasoningModel ? undefined : model_options?.top_p,
|
|
81
|
+
max_output_tokens: model_options?.max_tokens,
|
|
82
|
+
tools: useTools ? toolDefs : undefined,
|
|
83
|
+
text: parsedSchema ? {
|
|
84
|
+
format: {
|
|
85
|
+
type: "json_schema",
|
|
86
|
+
name: "format_output",
|
|
87
|
+
schema: parsedSchema,
|
|
88
|
+
strict: strictMode,
|
|
89
|
+
}
|
|
90
|
+
} : undefined,
|
|
91
|
+
});
|
|
92
|
+
return mapResponseStream(stream);
|
|
93
|
+
}
|
|
94
|
+
async requestTextCompletion(prompt, options) {
|
|
95
|
+
if (options.model_options?._option_id !== "openai-text" && options.model_options?._option_id !== "openai-thinking") {
|
|
96
|
+
this.logger.warn({ options: options.model_options }, "Invalid model options");
|
|
97
|
+
}
|
|
98
|
+
convertRoles(prompt, options.model);
|
|
99
|
+
const model_options = options.model_options;
|
|
100
|
+
insert_image_detail(prompt, model_options?.image_detail ?? "auto");
|
|
101
|
+
const toolDefs = getToolDefinitions(options.tools);
|
|
102
|
+
const useTools = toolDefs ? (0, core_1.supportsToolUse)(options.model, this.provider) : false;
|
|
103
|
+
// Fix orphaned function_call items (can occur when agent is stopped mid-tool-execution)
|
|
104
|
+
let conversation = fixOrphanedToolUse(updateConversation(options.conversation, prompt));
|
|
105
|
+
// When no tools are provided but conversation contains function_call/function_call_output
|
|
106
|
+
// items (e.g. checkpoint summary calls), convert them to text to avoid API errors
|
|
107
|
+
if (!useTools) {
|
|
108
|
+
conversation = convertOpenAIFunctionItemsToText(conversation);
|
|
109
|
+
}
|
|
110
|
+
let parsedSchema = undefined;
|
|
111
|
+
let strictMode = false;
|
|
112
|
+
if (options.result_schema && supportsSchema(options.model)) {
|
|
113
|
+
try {
|
|
114
|
+
parsedSchema = openAISchemaFormat(options.result_schema);
|
|
115
|
+
strictMode = true;
|
|
116
|
+
}
|
|
117
|
+
catch (e) {
|
|
118
|
+
parsedSchema = limitedSchemaFormat(options.result_schema);
|
|
119
|
+
strictMode = false;
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
const reasoning = model_options?.reasoning_effort ? { effort: model_options.reasoning_effort } : undefined;
|
|
123
|
+
const isReasoningModel = /\b(o1|o3|o4)\b/.test(options.model);
|
|
124
|
+
const res = await this.service.responses.create({
|
|
125
|
+
stream: false,
|
|
126
|
+
model: options.model,
|
|
127
|
+
input: conversation,
|
|
128
|
+
reasoning,
|
|
129
|
+
temperature: isReasoningModel ? undefined : model_options?.temperature,
|
|
130
|
+
top_p: isReasoningModel ? undefined : model_options?.top_p,
|
|
131
|
+
max_output_tokens: model_options?.max_tokens, //TODO: use max_tokens for older models, currently relying on OpenAI to handle it
|
|
132
|
+
tools: useTools ? toolDefs : undefined,
|
|
133
|
+
text: parsedSchema ? {
|
|
134
|
+
format: {
|
|
135
|
+
type: "json_schema",
|
|
136
|
+
name: "format_output",
|
|
137
|
+
schema: parsedSchema,
|
|
138
|
+
strict: strictMode,
|
|
139
|
+
}
|
|
140
|
+
} : undefined,
|
|
141
|
+
});
|
|
142
|
+
const completion = this.extractDataFromResponse(options, res);
|
|
143
|
+
if (options.include_original_response) {
|
|
144
|
+
completion.original_response = res;
|
|
145
|
+
}
|
|
146
|
+
conversation = updateConversation(conversation, createAssistantMessageFromCompletion(completion));
|
|
147
|
+
// Increment turn counter for deferred stripping
|
|
148
|
+
conversation = (0, core_1.incrementConversationTurn)(conversation);
|
|
149
|
+
// Strip large base64 image data based on options.stripImagesAfterTurns
|
|
150
|
+
const currentTurn = (0, core_1.getConversationMeta)(conversation).turnNumber;
|
|
151
|
+
const stripOptions = {
|
|
152
|
+
keepForTurns: options.stripImagesAfterTurns ?? Infinity,
|
|
153
|
+
currentTurn,
|
|
154
|
+
textMaxTokens: options.stripTextMaxTokens
|
|
155
|
+
};
|
|
156
|
+
let processedConversation = (0, core_1.stripBase64ImagesFromConversation)(conversation, stripOptions);
|
|
157
|
+
// Truncate large text content if configured
|
|
158
|
+
processedConversation = (0, core_1.truncateLargeTextInConversation)(processedConversation, stripOptions);
|
|
159
|
+
// Strip old heartbeat status messages
|
|
160
|
+
processedConversation = (0, core_1.stripHeartbeatsFromConversation)(processedConversation, {
|
|
161
|
+
keepForTurns: options.stripHeartbeatsAfterTurns ?? 1,
|
|
162
|
+
currentTurn,
|
|
163
|
+
});
|
|
164
|
+
completion.conversation = processedConversation;
|
|
165
|
+
return completion;
|
|
166
|
+
}
|
|
167
|
+
canStream(_options) {
|
|
168
|
+
// Image generation models don't support streaming
|
|
169
|
+
if (_options.model.includes("dall-e")
|
|
170
|
+
|| _options.model.includes("gpt-image")
|
|
171
|
+
|| _options.model.includes("chatgpt-image")) {
|
|
172
|
+
return Promise.resolve(false);
|
|
173
|
+
}
|
|
174
|
+
if (_options.model.includes("o1")
|
|
175
|
+
&& !(_options.model.includes("mini") || _options.model.includes("preview"))) {
|
|
176
|
+
//o1 full does not support streaming
|
|
177
|
+
//TODO: Update when OpenAI adds support for streaming, last check 16/02/2025
|
|
178
|
+
return Promise.resolve(false);
|
|
179
|
+
}
|
|
180
|
+
return Promise.resolve(true);
|
|
181
|
+
}
|
|
182
|
+
/**
|
|
183
|
+
* Build conversation context after streaming completion.
|
|
184
|
+
* Reconstructs the assistant message from accumulated results and applies stripping.
|
|
185
|
+
*/
|
|
186
|
+
buildStreamingConversation(prompt, result, toolUse, options) {
|
|
187
|
+
// Build assistant message from accumulated CompletionResult[]
|
|
188
|
+
const completionResults = result;
|
|
189
|
+
const textContent = completionResultsToText(completionResults);
|
|
190
|
+
// Start with the conversation from options or the prompt
|
|
191
|
+
let conversation = updateConversation(options.conversation, prompt);
|
|
192
|
+
// Add assistant message as EasyInputMessage
|
|
193
|
+
if (textContent) {
|
|
194
|
+
const assistantMessage = {
|
|
195
|
+
role: 'assistant',
|
|
196
|
+
content: textContent,
|
|
197
|
+
};
|
|
198
|
+
conversation = updateConversation(conversation, [assistantMessage]);
|
|
199
|
+
}
|
|
200
|
+
// Add function calls as separate items (Response API format)
|
|
201
|
+
if (toolUse && toolUse.length > 0) {
|
|
202
|
+
const functionCalls = toolUse.map(t => ({
|
|
203
|
+
type: 'function_call',
|
|
204
|
+
call_id: t.id,
|
|
205
|
+
name: t.tool_name,
|
|
206
|
+
arguments: typeof t.tool_input === 'string' ? t.tool_input : JSON.stringify(t.tool_input ?? {}),
|
|
207
|
+
}));
|
|
208
|
+
conversation = updateConversation(conversation, functionCalls);
|
|
209
|
+
}
|
|
210
|
+
// Increment turn counter
|
|
211
|
+
conversation = (0, core_1.incrementConversationTurn)(conversation);
|
|
212
|
+
// Apply stripping based on options
|
|
213
|
+
const currentTurn = (0, core_1.getConversationMeta)(conversation).turnNumber;
|
|
214
|
+
const stripOptions = {
|
|
215
|
+
keepForTurns: options.stripImagesAfterTurns ?? Infinity,
|
|
216
|
+
currentTurn,
|
|
217
|
+
textMaxTokens: options.stripTextMaxTokens
|
|
218
|
+
};
|
|
219
|
+
let processedConversation = (0, core_1.stripBase64ImagesFromConversation)(conversation, stripOptions);
|
|
220
|
+
processedConversation = (0, core_1.truncateLargeTextInConversation)(processedConversation, stripOptions);
|
|
221
|
+
processedConversation = (0, core_1.stripHeartbeatsFromConversation)(processedConversation, {
|
|
222
|
+
keepForTurns: options.stripHeartbeatsAfterTurns ?? 1,
|
|
223
|
+
currentTurn,
|
|
224
|
+
});
|
|
225
|
+
return processedConversation;
|
|
226
|
+
}
|
|
227
|
+
createTrainingPrompt(options) {
|
|
228
|
+
if (options.model.includes("gpt")) {
|
|
229
|
+
return super.createTrainingPrompt(options);
|
|
230
|
+
}
|
|
231
|
+
else {
|
|
232
|
+
// babbage, davinci not yet implemented
|
|
233
|
+
throw new Error("Unsupported model for training: " + options.model);
|
|
234
|
+
}
|
|
235
|
+
}
|
|
236
|
+
async startTraining(dataset, options) {
|
|
237
|
+
const url = await dataset.getURL();
|
|
238
|
+
const file = await this.service.files.create({
|
|
239
|
+
file: await fetch(url),
|
|
240
|
+
purpose: "fine-tune",
|
|
241
|
+
});
|
|
242
|
+
const job = await this.service.fineTuning.jobs.create({
|
|
243
|
+
training_file: file.id,
|
|
244
|
+
model: options.model,
|
|
245
|
+
hyperparameters: options.params
|
|
246
|
+
});
|
|
247
|
+
return jobInfo(job);
|
|
248
|
+
}
|
|
249
|
+
async cancelTraining(jobId) {
|
|
250
|
+
const job = await this.service.fineTuning.jobs.cancel(jobId);
|
|
251
|
+
return jobInfo(job);
|
|
252
|
+
}
|
|
253
|
+
async getTrainingJob(jobId) {
|
|
254
|
+
const job = await this.service.fineTuning.jobs.retrieve(jobId);
|
|
255
|
+
return jobInfo(job);
|
|
256
|
+
}
|
|
257
|
+
// ========= management API =============
|
|
258
|
+
async validateConnection() {
|
|
259
|
+
try {
|
|
260
|
+
await this.service.models.list();
|
|
261
|
+
return true;
|
|
262
|
+
}
|
|
263
|
+
catch (error) {
|
|
264
|
+
return false;
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
listTrainableModels() {
|
|
268
|
+
return this._listModels((m) => supportFineTunning.has(m.id));
|
|
269
|
+
}
|
|
270
|
+
async listModels() {
|
|
271
|
+
return this._listModels();
|
|
272
|
+
}
|
|
273
|
+
async _listModels(filter) {
|
|
274
|
+
let result = (await this.service.models.list()).data;
|
|
275
|
+
//Some of these use the completions API instead of the chat completions API.
|
|
276
|
+
//Others are for non-text input modalities. Therefore common to both.
|
|
277
|
+
const wordBlacklist = ["embed", "whisper", "transcribe", "audio", "moderation", "tts",
|
|
278
|
+
"realtime", "babbage", "davinci", "codex", "o1-pro", "computer-use", "sora"];
|
|
279
|
+
//OpenAI has very little information, filtering based on name.
|
|
280
|
+
result = result.filter((m) => {
|
|
281
|
+
return !wordBlacklist.some((word) => m.id.includes(word));
|
|
282
|
+
});
|
|
283
|
+
const models = filter ? result.filter(filter) : result;
|
|
284
|
+
const aiModels = models.map((m) => {
|
|
285
|
+
const modelCapability = (0, core_1.getModelCapabilities)(m.id, "openai");
|
|
286
|
+
let owner = m.owned_by;
|
|
287
|
+
if (owner == "system") {
|
|
288
|
+
owner = "openai";
|
|
289
|
+
}
|
|
290
|
+
// Determine model type based on capabilities
|
|
291
|
+
let modelType = core_1.ModelType.Text;
|
|
292
|
+
if (m.id.includes("dall-e") || m.id.includes("gpt-image")) {
|
|
293
|
+
modelType = core_1.ModelType.Image;
|
|
294
|
+
}
|
|
295
|
+
return {
|
|
296
|
+
id: m.id,
|
|
297
|
+
name: m.id,
|
|
298
|
+
provider: this.provider,
|
|
299
|
+
owner: owner,
|
|
300
|
+
type: modelType,
|
|
301
|
+
input_modalities: (0, core_1.modelModalitiesToArray)(modelCapability.input),
|
|
302
|
+
output_modalities: (0, core_1.modelModalitiesToArray)(modelCapability.output),
|
|
303
|
+
tool_support: modelCapability.tool_support,
|
|
304
|
+
};
|
|
305
|
+
}).sort((a, b) => a.id.localeCompare(b.id));
|
|
306
|
+
return aiModels;
|
|
307
|
+
}
|
|
308
|
+
async generateEmbeddings({ text, image, model = "text-embedding-3-small" }) {
|
|
309
|
+
if (image) {
|
|
310
|
+
throw new Error("Image embeddings not supported by OpenAI");
|
|
311
|
+
}
|
|
312
|
+
if (!text) {
|
|
313
|
+
throw new Error("No text provided");
|
|
314
|
+
}
|
|
315
|
+
const res = await this.service.embeddings.create({
|
|
316
|
+
input: text,
|
|
317
|
+
model: model,
|
|
318
|
+
});
|
|
319
|
+
const embeddings = res.data[0].embedding;
|
|
320
|
+
if (!embeddings || embeddings.length === 0) {
|
|
321
|
+
throw new Error("No embedding found");
|
|
322
|
+
}
|
|
323
|
+
return { values: embeddings, model };
|
|
324
|
+
}
|
|
325
|
+
imageModels = ["dall-e", "gpt-image", "chatgpt-image"];
|
|
326
|
+
/**
|
|
327
|
+
* Determine if a model is specifically an image generation model (not conversational image model)
|
|
328
|
+
*/
|
|
329
|
+
isImageModel(model) {
|
|
330
|
+
// DALL-E models are standalone image generation
|
|
331
|
+
// gpt-image models can generate images in conversations, not standalone
|
|
332
|
+
return this.imageModels.some(imageModel => model.includes(imageModel));
|
|
333
|
+
}
|
|
334
|
+
/**
|
|
335
|
+
* Request image generation from standalone Images API
|
|
336
|
+
* Supports: DALL-E 2, DALL-E 3, GPT-image models (for edit/variation)
|
|
337
|
+
*/
|
|
338
|
+
async requestImageGeneration(prompt, options) {
|
|
339
|
+
this.logger.debug(`[${this.provider}] Generating image with model ${options.model}`);
|
|
340
|
+
const model_options = options.model_options;
|
|
341
|
+
// Extract prompt text from ResponseInputItem[]
|
|
342
|
+
let promptText = "";
|
|
343
|
+
for (const item of prompt) {
|
|
344
|
+
if ('content' in item && typeof item.content === 'string') {
|
|
345
|
+
promptText += item.content + "\\n";
|
|
346
|
+
}
|
|
347
|
+
else if ('content' in item && Array.isArray(item.content)) {
|
|
348
|
+
// Extract text from content array
|
|
349
|
+
for (const part of item.content) {
|
|
350
|
+
if ('type' in part && part.type === 'input_text' && 'text' in part) {
|
|
351
|
+
promptText += part.text + "\\n";
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
promptText = promptText.trim();
|
|
357
|
+
try {
|
|
358
|
+
const generateParams = {
|
|
359
|
+
model: options.model,
|
|
360
|
+
prompt: promptText,
|
|
361
|
+
size: model_options?.size || "1024x1024",
|
|
362
|
+
};
|
|
363
|
+
// Add DALL-E specific options
|
|
364
|
+
if (options.model.includes("dall-e") || model_options?._option_id === "openai-dalle") {
|
|
365
|
+
const dalleOptions = model_options;
|
|
366
|
+
generateParams.n = dalleOptions?.n || 1;
|
|
367
|
+
generateParams.response_format = dalleOptions?.response_format || "b64_json";
|
|
368
|
+
if (options.model.includes("dall-e-3")) {
|
|
369
|
+
generateParams.quality = dalleOptions?.image_quality || "standard";
|
|
370
|
+
if (dalleOptions?.style) {
|
|
371
|
+
generateParams.style = dalleOptions.style;
|
|
372
|
+
}
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
else {
|
|
376
|
+
// Default for other models
|
|
377
|
+
generateParams.n = 1;
|
|
378
|
+
}
|
|
379
|
+
const response = await this.service.images.generate(generateParams);
|
|
380
|
+
// Convert response to CompletionResults
|
|
381
|
+
const results = [];
|
|
382
|
+
if (response.data) {
|
|
383
|
+
for (const image of response.data) {
|
|
384
|
+
let imageValue;
|
|
385
|
+
if (image.b64_json) {
|
|
386
|
+
// Base64 format
|
|
387
|
+
imageValue = `data:image/png;base64,${image.b64_json}`;
|
|
388
|
+
}
|
|
389
|
+
else if (image.url) {
|
|
390
|
+
// URL format
|
|
391
|
+
imageValue = image.url;
|
|
392
|
+
}
|
|
393
|
+
else {
|
|
394
|
+
continue;
|
|
395
|
+
}
|
|
396
|
+
results.push({
|
|
397
|
+
type: "image",
|
|
398
|
+
value: imageValue
|
|
399
|
+
});
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
return {
|
|
403
|
+
result: results
|
|
404
|
+
};
|
|
405
|
+
}
|
|
406
|
+
catch (error) {
|
|
407
|
+
this.logger.error({ error }, `[${this.provider}] Image generation failed`);
|
|
408
|
+
return {
|
|
409
|
+
result: [],
|
|
410
|
+
error: {
|
|
411
|
+
message: error.message,
|
|
412
|
+
code: error.code || 'GENERATION_FAILED'
|
|
413
|
+
}
|
|
414
|
+
};
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
/**
|
|
418
|
+
* Format OpenAI API errors into LlumiverseError with proper status codes and retryability.
|
|
419
|
+
*
|
|
420
|
+
* OpenAI API errors have a specific structure:
|
|
421
|
+
* - APIError.status: HTTP status code (400, 401, 403, 404, 409, 422, 429, 500+)
|
|
422
|
+
* - APIError.error: Error object with type, message, param, code
|
|
423
|
+
* - APIError.requestID: Request ID for support
|
|
424
|
+
* - APIError.code: Error code (e.g., 'invalid_api_key', 'rate_limit_exceeded')
|
|
425
|
+
* - APIError.param: Parameter that caused the error (optional)
|
|
426
|
+
* - APIError.type: Error type (optional)
|
|
427
|
+
*
|
|
428
|
+
* Common error types:
|
|
429
|
+
* - BadRequestError (400): Invalid request parameters
|
|
430
|
+
* - AuthenticationError (401): Invalid API key
|
|
431
|
+
* - PermissionDeniedError (403): Insufficient permissions
|
|
432
|
+
* - NotFoundError (404): Resource not found
|
|
433
|
+
* - ConflictError (409): Resource conflict
|
|
434
|
+
* - UnprocessableEntityError (422): Validation error
|
|
435
|
+
* - RateLimitError (429): Rate limit exceeded
|
|
436
|
+
* - InternalServerError (500+): Server-side errors
|
|
437
|
+
* - APIConnectionError: Connection issues (no status code)
|
|
438
|
+
* - APIConnectionTimeoutError: Request timeout (no status code)
|
|
439
|
+
* - LengthFinishReasonError: Response truncated due to length
|
|
440
|
+
* - ContentFilterFinishReasonError: Content filtered
|
|
441
|
+
*
|
|
442
|
+
* This implementation works for:
|
|
443
|
+
* - OpenAI API
|
|
444
|
+
* - Azure OpenAI
|
|
445
|
+
* - xAI (uses OpenAI-compatible API)
|
|
446
|
+
* - Azure Foundry (OpenAI-compatible)
|
|
447
|
+
* - Other OpenAI-compatible APIs
|
|
448
|
+
*
|
|
449
|
+
* @see https://platform.openai.com/docs/guides/error-codes
|
|
450
|
+
*/
|
|
451
|
+
formatLlumiverseError(error, context) {
|
|
452
|
+
// Check if it's an OpenAI API error
|
|
453
|
+
const isOpenAIError = this.isOpenAIApiError(error);
|
|
454
|
+
if (!isOpenAIError) {
|
|
455
|
+
// Not an OpenAI API error, use default handling
|
|
456
|
+
throw error;
|
|
457
|
+
}
|
|
458
|
+
const apiError = error;
|
|
459
|
+
const httpStatusCode = apiError.status;
|
|
460
|
+
// Extract error message
|
|
461
|
+
const message = apiError.message || String(error);
|
|
462
|
+
// Extract additional error details (only available on APIError)
|
|
463
|
+
const errorCode = apiError.code;
|
|
464
|
+
const errorParam = apiError.param;
|
|
465
|
+
const errorType = apiError.type;
|
|
466
|
+
// Build user-facing message with status code
|
|
467
|
+
let userMessage = message;
|
|
468
|
+
// Include status code in message (for end-user visibility)
|
|
469
|
+
if (httpStatusCode) {
|
|
470
|
+
userMessage = `[${httpStatusCode}] ${userMessage}`;
|
|
471
|
+
}
|
|
472
|
+
// Add error code if available and not already in message
|
|
473
|
+
if (errorCode && !userMessage.includes(errorCode)) {
|
|
474
|
+
userMessage += ` (code: ${errorCode})`;
|
|
475
|
+
}
|
|
476
|
+
// Add parameter info if available and helpful
|
|
477
|
+
if (errorParam && !userMessage.toLowerCase().includes(errorParam.toLowerCase())) {
|
|
478
|
+
userMessage += ` [param: ${errorParam}]`;
|
|
479
|
+
}
|
|
480
|
+
// Add request ID if available (useful for OpenAI support)
|
|
481
|
+
if (apiError.requestID) {
|
|
482
|
+
userMessage += ` (Request ID: ${apiError.requestID})`;
|
|
483
|
+
}
|
|
484
|
+
// Determine retryability based on OpenAI error types
|
|
485
|
+
const retryable = this.isOpenAIErrorRetryable(error, httpStatusCode, errorCode, errorType);
|
|
486
|
+
// Use the error constructor name as the error name
|
|
487
|
+
const errorName = error.constructor?.name || 'OpenAIError';
|
|
488
|
+
return new core_1.LlumiverseError(`[${context.provider}] ${userMessage}`, retryable, context, error, httpStatusCode, errorName);
|
|
489
|
+
}
|
|
490
|
+
/**
|
|
491
|
+
* Type guard to check if error is an OpenAI API error or OpenAI-specific error.
|
|
492
|
+
*/
|
|
493
|
+
isOpenAIApiError(error) {
|
|
494
|
+
return (error !== null &&
|
|
495
|
+
typeof error === 'object' &&
|
|
496
|
+
(error instanceof error_1.APIError || error instanceof error_1.OpenAIError));
|
|
497
|
+
}
|
|
498
|
+
/**
|
|
499
|
+
* Determine if an OpenAI API error is retryable.
|
|
500
|
+
*
|
|
501
|
+
* Retryable errors:
|
|
502
|
+
* - RateLimitError (429): Rate limit exceeded, retry with backoff
|
|
503
|
+
* - InternalServerError (500+): Server-side errors
|
|
504
|
+
* - APIConnectionTimeoutError: Request timeout
|
|
505
|
+
* - Error codes: 'timeout', 'server_error', 'service_unavailable'
|
|
506
|
+
* - Status codes: 408, 429, 502, 503, 504, 529, 5xx
|
|
507
|
+
*
|
|
508
|
+
* Non-retryable errors:
|
|
509
|
+
* - BadRequestError (400): Invalid request parameters
|
|
510
|
+
* - AuthenticationError (401): Invalid API key
|
|
511
|
+
* - PermissionDeniedError (403): Insufficient permissions
|
|
512
|
+
* - NotFoundError (404): Resource not found
|
|
513
|
+
* - ConflictError (409): Resource conflict
|
|
514
|
+
* - UnprocessableEntityError (422): Validation error
|
|
515
|
+
* - LengthFinishReasonError: Length limit reached
|
|
516
|
+
* - ContentFilterFinishReasonError: Content filtered
|
|
517
|
+
* - Error codes: 'invalid_api_key', 'invalid_request_error', 'model_not_found'
|
|
518
|
+
* - Other 4xx client errors
|
|
519
|
+
*
|
|
520
|
+
* @param error - The error object
|
|
521
|
+
* @param httpStatusCode - The HTTP status code if available
|
|
522
|
+
* @param errorCode - The error code if available
|
|
523
|
+
* @param errorType - The error type if available
|
|
524
|
+
* @returns True if retryable, false if not retryable, undefined if unknown
|
|
525
|
+
*/
|
|
526
|
+
isOpenAIErrorRetryable(error, httpStatusCode, errorCode, errorType) {
|
|
527
|
+
// Check specific OpenAI error types by class
|
|
528
|
+
if (error instanceof error_1.RateLimitError)
|
|
529
|
+
return true;
|
|
530
|
+
if (error instanceof error_1.InternalServerError)
|
|
531
|
+
return true;
|
|
532
|
+
if (error instanceof error_1.APIConnectionTimeoutError)
|
|
533
|
+
return true;
|
|
534
|
+
// Non-retryable by error type
|
|
535
|
+
if (error instanceof error_1.BadRequestError)
|
|
536
|
+
return false;
|
|
537
|
+
if (error instanceof error_1.AuthenticationError)
|
|
538
|
+
return false;
|
|
539
|
+
if (error instanceof error_1.PermissionDeniedError)
|
|
540
|
+
return false;
|
|
541
|
+
if (error instanceof error_1.NotFoundError)
|
|
542
|
+
return false;
|
|
543
|
+
if (error instanceof error_1.ConflictError)
|
|
544
|
+
return false;
|
|
545
|
+
if (error instanceof error_1.UnprocessableEntityError)
|
|
546
|
+
return false;
|
|
547
|
+
if (error instanceof error_1.LengthFinishReasonError)
|
|
548
|
+
return false;
|
|
549
|
+
if (error instanceof error_1.ContentFilterFinishReasonError)
|
|
550
|
+
return false;
|
|
551
|
+
// Check error codes (OpenAI specific)
|
|
552
|
+
if (errorCode) {
|
|
553
|
+
// Retryable error codes
|
|
554
|
+
if (errorCode === 'timeout')
|
|
555
|
+
return true;
|
|
556
|
+
if (errorCode === 'server_error')
|
|
557
|
+
return true;
|
|
558
|
+
if (errorCode === 'service_unavailable')
|
|
559
|
+
return true;
|
|
560
|
+
if (errorCode === 'rate_limit_exceeded')
|
|
561
|
+
return true;
|
|
562
|
+
// Non-retryable error codes
|
|
563
|
+
if (errorCode === 'invalid_api_key')
|
|
564
|
+
return false;
|
|
565
|
+
if (errorCode === 'invalid_request_error')
|
|
566
|
+
return false;
|
|
567
|
+
if (errorCode === 'model_not_found')
|
|
568
|
+
return false;
|
|
569
|
+
if (errorCode === 'insufficient_quota')
|
|
570
|
+
return false;
|
|
571
|
+
if (errorCode === 'invalid_model')
|
|
572
|
+
return false;
|
|
573
|
+
if (errorCode.includes('invalid_'))
|
|
574
|
+
return false;
|
|
575
|
+
}
|
|
576
|
+
// Check error type
|
|
577
|
+
if (errorType === 'invalid_request_error')
|
|
578
|
+
return false;
|
|
579
|
+
if (errorType === 'authentication_error')
|
|
580
|
+
return false;
|
|
581
|
+
// Use HTTP status code
|
|
582
|
+
if (httpStatusCode !== undefined) {
|
|
583
|
+
if (httpStatusCode === 429)
|
|
584
|
+
return true; // Rate limit
|
|
585
|
+
if (httpStatusCode === 408)
|
|
586
|
+
return true; // Request timeout
|
|
587
|
+
if (httpStatusCode === 502)
|
|
588
|
+
return true; // Bad gateway
|
|
589
|
+
if (httpStatusCode === 503)
|
|
590
|
+
return true; // Service unavailable
|
|
591
|
+
if (httpStatusCode === 504)
|
|
592
|
+
return true; // Gateway timeout
|
|
593
|
+
if (httpStatusCode === 529)
|
|
594
|
+
return true; // Overloaded
|
|
595
|
+
if (httpStatusCode >= 500 && httpStatusCode < 600)
|
|
596
|
+
return true; // Server errors
|
|
597
|
+
if (httpStatusCode >= 400 && httpStatusCode < 500)
|
|
598
|
+
return false; // Client errors
|
|
599
|
+
}
|
|
600
|
+
// Connection errors without status codes
|
|
601
|
+
if (error instanceof error_1.APIConnectionError && !(error instanceof error_1.APIConnectionTimeoutError)) {
|
|
602
|
+
// Generic connection errors might be retryable (network issues)
|
|
603
|
+
return true;
|
|
604
|
+
}
|
|
605
|
+
// Unknown error type - let consumer decide retry strategy
|
|
606
|
+
return undefined;
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
exports.BaseOpenAIDriver = BaseOpenAIDriver;
|
|
610
|
+
function jobInfo(job) {
|
|
611
|
+
//validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
|
|
612
|
+
const jobStatus = job.status;
|
|
613
|
+
let status = core_1.TrainingJobStatus.running;
|
|
614
|
+
let details;
|
|
615
|
+
if (jobStatus === 'succeeded') {
|
|
616
|
+
status = core_1.TrainingJobStatus.succeeded;
|
|
617
|
+
}
|
|
618
|
+
else if (jobStatus === 'failed') {
|
|
619
|
+
status = core_1.TrainingJobStatus.failed;
|
|
620
|
+
details = job.error ? `${job.error.code} - ${job.error.message} ${job.error.param ? " [" + job.error.param + "]" : ""}` : "error";
|
|
621
|
+
}
|
|
622
|
+
else if (jobStatus === 'cancelled') {
|
|
623
|
+
status = core_1.TrainingJobStatus.cancelled;
|
|
624
|
+
}
|
|
625
|
+
else {
|
|
626
|
+
status = core_1.TrainingJobStatus.running;
|
|
627
|
+
details = jobStatus;
|
|
628
|
+
}
|
|
629
|
+
return {
|
|
630
|
+
id: job.id,
|
|
631
|
+
model: job.fine_tuned_model || undefined,
|
|
632
|
+
status,
|
|
633
|
+
details
|
|
634
|
+
};
|
|
635
|
+
}
|
|
636
|
+
function mapUsage(usage) {
|
|
637
|
+
if (!usage) {
|
|
638
|
+
return undefined;
|
|
639
|
+
}
|
|
640
|
+
return {
|
|
641
|
+
prompt: usage.input_tokens,
|
|
642
|
+
result: usage.output_tokens,
|
|
643
|
+
total: usage.total_tokens,
|
|
644
|
+
};
|
|
645
|
+
}
|
|
646
|
+
function completionResultsToText(completionResults) {
|
|
647
|
+
if (!completionResults) {
|
|
648
|
+
return '';
|
|
649
|
+
}
|
|
650
|
+
return completionResults
|
|
651
|
+
.map(r => {
|
|
652
|
+
switch (r.type) {
|
|
653
|
+
case 'text':
|
|
654
|
+
return r.value;
|
|
655
|
+
case 'json':
|
|
656
|
+
return typeof r.value === 'string' ? r.value : JSON.stringify(r.value);
|
|
657
|
+
case 'image':
|
|
658
|
+
// Skip images in conversation - they're in the result
|
|
659
|
+
return '';
|
|
660
|
+
default:
|
|
661
|
+
return String(r.value || '');
|
|
662
|
+
}
|
|
663
|
+
})
|
|
664
|
+
.join('');
|
|
665
|
+
}
|
|
666
|
+
function createAssistantMessageFromCompletion(completion) {
|
|
667
|
+
const textContent = completionResultsToText(completion.result);
|
|
668
|
+
const result = [];
|
|
669
|
+
// Add assistant text message if present
|
|
670
|
+
if (textContent) {
|
|
671
|
+
const assistantMessage = {
|
|
672
|
+
role: 'assistant',
|
|
673
|
+
content: textContent,
|
|
674
|
+
};
|
|
675
|
+
result.push(assistantMessage);
|
|
676
|
+
}
|
|
677
|
+
// Add function calls as separate items (Response API format)
|
|
678
|
+
if (completion.tool_use && completion.tool_use.length > 0) {
|
|
679
|
+
for (const t of completion.tool_use) {
|
|
680
|
+
const functionCall = {
|
|
681
|
+
type: 'function_call',
|
|
682
|
+
call_id: t.id,
|
|
683
|
+
name: t.tool_name,
|
|
684
|
+
arguments: typeof t.tool_input === 'string'
|
|
685
|
+
? t.tool_input
|
|
686
|
+
: JSON.stringify(t.tool_input ?? {}),
|
|
687
|
+
};
|
|
688
|
+
result.push(functionCall);
|
|
689
|
+
}
|
|
690
|
+
}
|
|
691
|
+
return result;
|
|
692
|
+
}
|
|
693
|
+
function mapResponseStream(stream) {
|
|
694
|
+
const toolCallMetadata = new Map();
|
|
695
|
+
return {
|
|
696
|
+
async *[Symbol.asyncIterator]() {
|
|
697
|
+
for await (const event of stream) {
|
|
698
|
+
if (event.type === 'response.output_item.added' && event.item.type === 'function_call') {
|
|
699
|
+
const syntheticId = `tool_${event.output_index}`;
|
|
700
|
+
const actualId = event.item.id ?? event.item.call_id;
|
|
701
|
+
if (actualId) {
|
|
702
|
+
toolCallMetadata.set(actualId, { syntheticId, name: event.item.name });
|
|
703
|
+
}
|
|
704
|
+
const toolUse = {
|
|
705
|
+
id: syntheticId,
|
|
706
|
+
_actual_id: actualId,
|
|
707
|
+
tool_name: event.item.name,
|
|
708
|
+
tool_input: '',
|
|
709
|
+
};
|
|
710
|
+
yield {
|
|
711
|
+
result: [],
|
|
712
|
+
tool_use: [toolUse],
|
|
713
|
+
};
|
|
714
|
+
}
|
|
715
|
+
else if (event.type === 'response.function_call_arguments.delta') {
|
|
716
|
+
const metadata = toolCallMetadata.get(event.item_id);
|
|
717
|
+
const syntheticId = metadata?.syntheticId ?? `tool_${event.output_index}`;
|
|
718
|
+
const toolUse = {
|
|
719
|
+
id: syntheticId,
|
|
720
|
+
_actual_id: event.item_id,
|
|
721
|
+
tool_name: metadata?.name ?? '',
|
|
722
|
+
tool_input: event.delta,
|
|
723
|
+
};
|
|
724
|
+
yield {
|
|
725
|
+
result: [],
|
|
726
|
+
tool_use: [toolUse],
|
|
727
|
+
};
|
|
728
|
+
}
|
|
729
|
+
// Note: We don't emit response.function_call_arguments.done because the arguments were already
|
|
730
|
+
// streamed via delta events. Emitting it again would duplicate the tool_input content.
|
|
731
|
+
// We only update the metadata to ensure the tool name is captured.
|
|
732
|
+
else if (event.type === 'response.function_call_arguments.done') {
|
|
733
|
+
// Just update metadata, don't yield (arguments already accumulated from delta events)
|
|
734
|
+
const metadata = toolCallMetadata.get(event.item_id);
|
|
735
|
+
const syntheticId = metadata?.syntheticId ?? `tool_${event.output_index}`;
|
|
736
|
+
const tool_name = metadata?.name ?? event.name ?? '';
|
|
737
|
+
if (event.item_id) {
|
|
738
|
+
toolCallMetadata.set(event.item_id, { syntheticId, name: tool_name });
|
|
739
|
+
}
|
|
740
|
+
}
|
|
741
|
+
else if (event.type === 'response.output_text.delta') {
|
|
742
|
+
yield {
|
|
743
|
+
result: textToCompletionResult(event.delta),
|
|
744
|
+
};
|
|
745
|
+
}
|
|
746
|
+
// Note: We don't emit response.output_text.done because the text was already
|
|
747
|
+
// streamed via delta events. Emitting it again would duplicate the content.
|
|
748
|
+
else if (event.type === 'response.completed' || event.type === 'response.incomplete' || event.type === 'response.failed') {
|
|
749
|
+
const finalTools = collectTools(event.response.output);
|
|
750
|
+
yield {
|
|
751
|
+
result: [],
|
|
752
|
+
finish_reason: responseFinishReason(event.response, finalTools),
|
|
753
|
+
token_usage: mapUsage(event.response.usage),
|
|
754
|
+
};
|
|
755
|
+
}
|
|
756
|
+
}
|
|
757
|
+
}
|
|
758
|
+
};
|
|
759
|
+
}
|
|
760
|
+
function insert_image_detail(items, detail_level) {
|
|
761
|
+
if (detail_level === "auto" || detail_level === "low" || detail_level === "high") {
|
|
762
|
+
for (const item of items) {
|
|
763
|
+
// Check if it's an EasyInputMessage or Message with content array
|
|
764
|
+
if ('role' in item && 'content' in item && item.role !== 'assistant') {
|
|
765
|
+
const content = item.content;
|
|
766
|
+
if (Array.isArray(content)) {
|
|
767
|
+
for (const part of content) {
|
|
768
|
+
if (typeof part === 'object' && part.type === 'input_image') {
|
|
769
|
+
part.detail = detail_level;
|
|
770
|
+
}
|
|
771
|
+
}
|
|
772
|
+
}
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
}
|
|
776
|
+
return items;
|
|
777
|
+
}
|
|
778
|
+
function convertRoles(items, model) {
|
|
779
|
+
//New openai models use developer role instead of system
|
|
780
|
+
if (model.includes("o1") || model.includes("o3")) {
|
|
781
|
+
if (model.includes("o1-mini") || model.includes("o1-preview")) {
|
|
782
|
+
//o1-mini and o1-preview support neither system nor developer
|
|
783
|
+
for (const item of items) {
|
|
784
|
+
if ('role' in item && item.role === 'system') {
|
|
785
|
+
item.role = 'user';
|
|
786
|
+
}
|
|
787
|
+
}
|
|
788
|
+
}
|
|
789
|
+
else {
|
|
790
|
+
//Models newer than o1 use developer role
|
|
791
|
+
for (const item of items) {
|
|
792
|
+
if ('role' in item && item.role === 'system') {
|
|
793
|
+
item.role = 'developer';
|
|
794
|
+
}
|
|
795
|
+
}
|
|
796
|
+
}
|
|
797
|
+
}
|
|
798
|
+
return items;
|
|
799
|
+
}
|
|
800
|
+
//Structured output support is typically aligned with tool use support
|
|
801
|
+
//Not true for realtime models, which do not support structured output, but do support tool use.
|
|
802
|
+
function supportsSchema(model) {
|
|
803
|
+
const realtimeModel = model.includes("realtime");
|
|
804
|
+
if (realtimeModel) {
|
|
805
|
+
return false;
|
|
806
|
+
}
|
|
807
|
+
return (0, core_1.supportsToolUse)(model, "openai");
|
|
808
|
+
}
|
|
809
|
+
/**
|
|
810
|
+
* Converts function_call and function_call_output items to text messages in OpenAI conversation.
|
|
811
|
+
* Preserves tool call information while removing structured items that require
|
|
812
|
+
* tools to be defined in the API request.
|
|
813
|
+
*/
|
|
814
|
+
function convertOpenAIFunctionItemsToText(items) {
|
|
815
|
+
const hasFunctionItems = items.some(item => {
|
|
816
|
+
const type = item.type;
|
|
817
|
+
return type === 'function_call' || type === 'function_call_output';
|
|
818
|
+
});
|
|
819
|
+
if (!hasFunctionItems)
|
|
820
|
+
return items;
|
|
821
|
+
return items.map(item => {
|
|
822
|
+
const typed = item;
|
|
823
|
+
if (typed.type === 'function_call') {
|
|
824
|
+
const argsStr = typed.arguments || '';
|
|
825
|
+
const truncated = argsStr.length > 500 ? argsStr.substring(0, 500) + '...' : argsStr;
|
|
826
|
+
return {
|
|
827
|
+
role: 'assistant',
|
|
828
|
+
content: `[Tool call: ${typed.name}(${truncated})]`,
|
|
829
|
+
};
|
|
830
|
+
}
|
|
831
|
+
if (typed.type === 'function_call_output') {
|
|
832
|
+
const output = typed.output || 'No output';
|
|
833
|
+
const truncated = output.length > 500 ? output.substring(0, 500) + '...' : output;
|
|
834
|
+
return {
|
|
835
|
+
role: 'user',
|
|
836
|
+
content: `[Tool result: ${truncated}]`,
|
|
837
|
+
};
|
|
838
|
+
}
|
|
839
|
+
return item;
|
|
840
|
+
});
|
|
841
|
+
}
|
|
842
|
+
function getToolDefinitions(tools) {
|
|
843
|
+
return tools ? tools.map(getToolDefinition) : undefined;
|
|
844
|
+
}
|
|
845
|
+
function getToolDefinition(toolDef) {
|
|
846
|
+
let parsedSchema = undefined;
|
|
847
|
+
let strictMode = false;
|
|
848
|
+
if (toolDef.input_schema) {
|
|
849
|
+
try {
|
|
850
|
+
//TODO: type assertion here is not safe, does not work with satisfies
|
|
851
|
+
parsedSchema = openAISchemaFormat(toolDef.input_schema);
|
|
852
|
+
strictMode = true;
|
|
853
|
+
}
|
|
854
|
+
catch (e) {
|
|
855
|
+
//TODO: type assertion here is not safe, does not work with satisfies
|
|
856
|
+
parsedSchema = limitedSchemaFormat(toolDef.input_schema);
|
|
857
|
+
strictMode = false;
|
|
858
|
+
}
|
|
859
|
+
}
|
|
860
|
+
return {
|
|
861
|
+
type: "function",
|
|
862
|
+
name: toolDef.name,
|
|
863
|
+
description: toolDef.description,
|
|
864
|
+
parameters: parsedSchema ?? null,
|
|
865
|
+
strict: strictMode,
|
|
866
|
+
};
|
|
867
|
+
}
|
|
868
|
+
function updateConversation(conversation, items) {
|
|
869
|
+
if (!items) {
|
|
870
|
+
// Unwrap array if wrapped, otherwise treat as array
|
|
871
|
+
const unwrapped = (0, core_1.unwrapConversationArray)(conversation);
|
|
872
|
+
return unwrapped ?? (conversation || []);
|
|
873
|
+
}
|
|
874
|
+
if (!conversation) {
|
|
875
|
+
return items;
|
|
876
|
+
}
|
|
877
|
+
// Unwrap array if wrapped, otherwise treat as array
|
|
878
|
+
const unwrapped = (0, core_1.unwrapConversationArray)(conversation);
|
|
879
|
+
const convArray = unwrapped ?? conversation;
|
|
880
|
+
return [...convArray, ...items];
|
|
881
|
+
}
|
|
882
|
+
function collectTools(output) {
|
|
883
|
+
if (!output) {
|
|
884
|
+
return undefined;
|
|
885
|
+
}
|
|
886
|
+
const tools = [];
|
|
887
|
+
for (const item of output) {
|
|
888
|
+
if (item.type === 'function_call') {
|
|
889
|
+
const id = item.call_id || item.id;
|
|
890
|
+
if (!id) {
|
|
891
|
+
continue;
|
|
892
|
+
}
|
|
893
|
+
tools.push({
|
|
894
|
+
id,
|
|
895
|
+
tool_name: item.name ?? '',
|
|
896
|
+
tool_input: safeJsonParse(item.arguments),
|
|
897
|
+
});
|
|
898
|
+
}
|
|
899
|
+
}
|
|
900
|
+
return tools.length > 0 ? tools : undefined;
|
|
901
|
+
}
|
|
902
|
+
/**
|
|
903
|
+
* Collect all parts (text and images) from response output in order.
|
|
904
|
+
* This preserves the original ordering of text and image parts.
|
|
905
|
+
*/
|
|
906
|
+
function extractCompletionResults(output) {
|
|
907
|
+
if (!output) {
|
|
908
|
+
return [];
|
|
909
|
+
}
|
|
910
|
+
const results = [];
|
|
911
|
+
for (const item of output) {
|
|
912
|
+
if (item.type === 'message') {
|
|
913
|
+
// Extract text from message content
|
|
914
|
+
for (const part of item.content) {
|
|
915
|
+
if (part.type === 'output_text' && part.text) {
|
|
916
|
+
results.push({
|
|
917
|
+
type: "text",
|
|
918
|
+
value: part.text
|
|
919
|
+
});
|
|
920
|
+
}
|
|
921
|
+
}
|
|
922
|
+
}
|
|
923
|
+
else if (item.type === 'image_generation_call' && 'result' in item && item.result) {
|
|
924
|
+
// GPT-image models return base64 encoded images in result field
|
|
925
|
+
const base64Data = item.result;
|
|
926
|
+
// Format as data URL for consistency with other image outputs
|
|
927
|
+
const imageUrl = base64Data.startsWith('data:')
|
|
928
|
+
? base64Data
|
|
929
|
+
: `data:image/png;base64,${base64Data}`;
|
|
930
|
+
results.push({
|
|
931
|
+
type: "image",
|
|
932
|
+
value: imageUrl
|
|
933
|
+
});
|
|
934
|
+
}
|
|
935
|
+
}
|
|
936
|
+
return results;
|
|
937
|
+
}
|
|
938
|
+
//For strict mode false
|
|
939
|
+
function limitedSchemaFormat(schema) {
|
|
940
|
+
const formattedSchema = { ...schema };
|
|
941
|
+
// Defaults not supported
|
|
942
|
+
delete formattedSchema.default;
|
|
943
|
+
// OpenAI requires type field even in non-strict mode
|
|
944
|
+
// If no type is specified, default to 'object' for properties with format/editor hints,
|
|
945
|
+
// otherwise 'string' as a safe fallback
|
|
946
|
+
if (!formattedSchema.type && formattedSchema.description) {
|
|
947
|
+
// Properties with format: "document" or editor hints are typically objects
|
|
948
|
+
if (formattedSchema.format === 'document' || formattedSchema.editor) {
|
|
949
|
+
formattedSchema.type = 'object';
|
|
950
|
+
}
|
|
951
|
+
else {
|
|
952
|
+
formattedSchema.type = 'string';
|
|
953
|
+
}
|
|
954
|
+
}
|
|
955
|
+
if (formattedSchema?.properties) {
|
|
956
|
+
// Process each property recursively
|
|
957
|
+
for (const propName of Object.keys(formattedSchema.properties)) {
|
|
958
|
+
const property = formattedSchema.properties[propName];
|
|
959
|
+
// Recursively process properties
|
|
960
|
+
formattedSchema.properties[propName] = limitedSchemaFormat(property);
|
|
961
|
+
// Process arrays with items of type object
|
|
962
|
+
if (property?.type === 'array' && property.items && property.items?.type === 'object') {
|
|
963
|
+
formattedSchema.properties[propName] = {
|
|
964
|
+
...property,
|
|
965
|
+
items: limitedSchemaFormat(property.items),
|
|
966
|
+
};
|
|
967
|
+
}
|
|
968
|
+
}
|
|
969
|
+
}
|
|
970
|
+
return formattedSchema;
|
|
971
|
+
}
|
|
972
|
+
//For strict mode true
|
|
973
|
+
function openAISchemaFormat(schema, nesting = 0) {
|
|
974
|
+
if (nesting > 5) {
|
|
975
|
+
throw new Error("OpenAI schema nesting too deep");
|
|
976
|
+
}
|
|
977
|
+
const formattedSchema = { ...schema };
|
|
978
|
+
// Defaults not supported
|
|
979
|
+
delete formattedSchema.default;
|
|
980
|
+
// Additional properties not supported, required to be set.
|
|
981
|
+
if (formattedSchema?.type === "object") {
|
|
982
|
+
formattedSchema.additionalProperties = false;
|
|
983
|
+
}
|
|
984
|
+
if (formattedSchema?.properties) {
|
|
985
|
+
// Set all properties as required
|
|
986
|
+
formattedSchema.required = Object.keys(formattedSchema.properties);
|
|
987
|
+
// Process each property recursively
|
|
988
|
+
for (const propName of Object.keys(formattedSchema.properties)) {
|
|
989
|
+
const property = formattedSchema.properties[propName];
|
|
990
|
+
// OpenAI strict mode requires all properties to have a type
|
|
991
|
+
if (!property?.type) {
|
|
992
|
+
throw new Error(`Property '${propName}' is missing required 'type' field for OpenAI strict mode`);
|
|
993
|
+
}
|
|
994
|
+
// Recursively process properties
|
|
995
|
+
formattedSchema.properties[propName] = openAISchemaFormat(property, nesting + 1);
|
|
996
|
+
// Process arrays with items of type object
|
|
997
|
+
if (property?.type === 'array' && property.items && property.items?.type === 'object') {
|
|
998
|
+
formattedSchema.properties[propName] = {
|
|
999
|
+
...property,
|
|
1000
|
+
items: openAISchemaFormat(property.items, nesting + 1),
|
|
1001
|
+
};
|
|
1002
|
+
}
|
|
1003
|
+
}
|
|
1004
|
+
}
|
|
1005
|
+
if (formattedSchema?.type === 'object' && (!formattedSchema?.properties || Object.keys(formattedSchema?.properties ?? {}).length == 0)) {
|
|
1006
|
+
//If no properties are defined, then additionalProperties: true was set or the object would be empty.
|
|
1007
|
+
//OpenAI does not support this on structured output/ strict mode.
|
|
1008
|
+
throw new Error("OpenAI does not support empty objects or objects with additionalProperties set to true");
|
|
1009
|
+
}
|
|
1010
|
+
return formattedSchema;
|
|
1011
|
+
}
|
|
1012
|
+
function responseFinishReason(response, tools) {
|
|
1013
|
+
if (tools && tools.length > 0) {
|
|
1014
|
+
return "tool_use";
|
|
1015
|
+
}
|
|
1016
|
+
if (response.status === 'incomplete') {
|
|
1017
|
+
if (response.incomplete_details?.reason === 'max_output_tokens') {
|
|
1018
|
+
return 'length';
|
|
1019
|
+
}
|
|
1020
|
+
return response.incomplete_details?.reason ?? 'incomplete';
|
|
1021
|
+
}
|
|
1022
|
+
if (response.status && response.status !== 'completed') {
|
|
1023
|
+
return response.status;
|
|
1024
|
+
}
|
|
1025
|
+
return 'stop';
|
|
1026
|
+
}
|
|
1027
|
+
/**
|
|
1028
|
+
* Fix orphaned function_call items in the OpenAI Responses API conversation.
|
|
1029
|
+
*
|
|
1030
|
+
* When an agent is stopped mid-tool-execution, the conversation may contain
|
|
1031
|
+
* function_call items without matching function_call_output items. The OpenAI
|
|
1032
|
+
* Responses API requires every function_call to have a matching function_call_output.
|
|
1033
|
+
*
|
|
1034
|
+
* This function detects such cases and injects synthetic function_call_output items
|
|
1035
|
+
* indicating the tools were interrupted, allowing the conversation to continue.
|
|
1036
|
+
*/
|
|
1037
|
+
function fixOrphanedToolUse(items) {
|
|
1038
|
+
if (items.length < 2)
|
|
1039
|
+
return items;
|
|
1040
|
+
// First pass: collect all function_call_output call_ids
|
|
1041
|
+
const outputCallIds = new Set();
|
|
1042
|
+
for (const item of items) {
|
|
1043
|
+
if ('type' in item && item.type === 'function_call_output') {
|
|
1044
|
+
outputCallIds.add(item.call_id);
|
|
1045
|
+
}
|
|
1046
|
+
}
|
|
1047
|
+
// Second pass: build result, injecting synthetic outputs for orphaned function_calls
|
|
1048
|
+
const result = [];
|
|
1049
|
+
const pendingCalls = new Map(); // call_id -> tool name
|
|
1050
|
+
for (const item of items) {
|
|
1051
|
+
if ('type' in item && item.type === 'function_call') {
|
|
1052
|
+
const fc = item;
|
|
1053
|
+
// Only track if there's no matching output anywhere in the conversation
|
|
1054
|
+
if (!outputCallIds.has(fc.call_id)) {
|
|
1055
|
+
pendingCalls.set(fc.call_id, fc.name ?? 'unknown');
|
|
1056
|
+
}
|
|
1057
|
+
result.push(item);
|
|
1058
|
+
}
|
|
1059
|
+
else if ('type' in item && item.type === 'function_call_output') {
|
|
1060
|
+
result.push(item);
|
|
1061
|
+
}
|
|
1062
|
+
else {
|
|
1063
|
+
// Before any non-function item, flush pending orphaned calls
|
|
1064
|
+
if (pendingCalls.size > 0) {
|
|
1065
|
+
for (const [callId, toolName] of pendingCalls) {
|
|
1066
|
+
result.push({
|
|
1067
|
+
type: 'function_call_output',
|
|
1068
|
+
call_id: callId,
|
|
1069
|
+
output: `[Tool interrupted: The user stopped the operation before "${toolName}" could execute.]`,
|
|
1070
|
+
});
|
|
1071
|
+
}
|
|
1072
|
+
pendingCalls.clear();
|
|
1073
|
+
}
|
|
1074
|
+
result.push(item);
|
|
1075
|
+
}
|
|
1076
|
+
}
|
|
1077
|
+
// Handle trailing orphans at the end of the conversation
|
|
1078
|
+
if (pendingCalls.size > 0) {
|
|
1079
|
+
for (const [callId, toolName] of pendingCalls) {
|
|
1080
|
+
result.push({
|
|
1081
|
+
type: 'function_call_output',
|
|
1082
|
+
call_id: callId,
|
|
1083
|
+
output: `[Tool interrupted: The user stopped the operation before "${toolName}" could execute.]`,
|
|
1084
|
+
});
|
|
1085
|
+
}
|
|
1086
|
+
}
|
|
1087
|
+
return result;
|
|
1088
|
+
}
|
|
1089
|
+
function safeJsonParse(value) {
|
|
1090
|
+
if (typeof value !== 'string') {
|
|
1091
|
+
return value;
|
|
1092
|
+
}
|
|
1093
|
+
try {
|
|
1094
|
+
return JSON.parse(value);
|
|
1095
|
+
}
|
|
1096
|
+
catch {
|
|
1097
|
+
return value;
|
|
1098
|
+
}
|
|
1099
|
+
}
|
|
1100
|
+
//# sourceMappingURL=index.js.map
|