@aigne/openai 0.16.16 → 1.74.0-beta

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. package/README.md +11 -11
  2. package/dist/_virtual/rolldown_runtime.cjs +29 -0
  3. package/dist/index.cjs +10 -0
  4. package/dist/index.d.cts +4 -0
  5. package/dist/index.d.mts +4 -0
  6. package/dist/index.mjs +5 -0
  7. package/dist/openai-chat-model.cjs +371 -0
  8. package/dist/openai-chat-model.d.cts +165 -0
  9. package/dist/openai-chat-model.d.cts.map +1 -0
  10. package/dist/openai-chat-model.d.mts +165 -0
  11. package/dist/openai-chat-model.d.mts.map +1 -0
  12. package/dist/openai-chat-model.mjs +368 -0
  13. package/dist/openai-chat-model.mjs.map +1 -0
  14. package/dist/openai-image-model.cjs +123 -0
  15. package/dist/openai-image-model.d.cts +57 -0
  16. package/dist/openai-image-model.d.cts.map +1 -0
  17. package/dist/openai-image-model.d.mts +57 -0
  18. package/dist/openai-image-model.d.mts.map +1 -0
  19. package/dist/openai-image-model.mjs +123 -0
  20. package/dist/openai-image-model.mjs.map +1 -0
  21. package/dist/openai-video-model.cjs +112 -0
  22. package/dist/openai-video-model.d.cts +95 -0
  23. package/dist/openai-video-model.d.cts.map +1 -0
  24. package/dist/openai-video-model.d.mts +95 -0
  25. package/dist/openai-video-model.d.mts.map +1 -0
  26. package/dist/openai-video-model.mjs +112 -0
  27. package/dist/openai-video-model.mjs.map +1 -0
  28. package/dist/openai.cjs +14 -0
  29. package/dist/openai.mjs +13 -0
  30. package/dist/openai.mjs.map +1 -0
  31. package/package.json +29 -30
  32. package/CHANGELOG.md +0 -2448
  33. package/lib/cjs/index.d.ts +0 -3
  34. package/lib/cjs/index.js +0 -19
  35. package/lib/cjs/openai-chat-model.d.ts +0 -160
  36. package/lib/cjs/openai-chat-model.js +0 -465
  37. package/lib/cjs/openai-image-model.d.ts +0 -55
  38. package/lib/cjs/openai-image-model.js +0 -110
  39. package/lib/cjs/openai-video-model.d.ts +0 -92
  40. package/lib/cjs/openai-video-model.js +0 -118
  41. package/lib/cjs/openai.d.ts +0 -4
  42. package/lib/cjs/openai.js +0 -17
  43. package/lib/cjs/package.json +0 -3
  44. package/lib/dts/index.d.ts +0 -3
  45. package/lib/dts/openai-chat-model.d.ts +0 -160
  46. package/lib/dts/openai-image-model.d.ts +0 -55
  47. package/lib/dts/openai-video-model.d.ts +0 -92
  48. package/lib/dts/openai.d.ts +0 -4
  49. package/lib/esm/index.d.ts +0 -3
  50. package/lib/esm/index.js +0 -3
  51. package/lib/esm/openai-chat-model.d.ts +0 -160
  52. package/lib/esm/openai-chat-model.js +0 -459
  53. package/lib/esm/openai-image-model.d.ts +0 -55
  54. package/lib/esm/openai-image-model.js +0 -106
  55. package/lib/esm/openai-video-model.d.ts +0 -92
  56. package/lib/esm/openai-video-model.js +0 -114
  57. package/lib/esm/openai.d.ts +0 -4
  58. package/lib/esm/openai.js +0 -10
  59. package/lib/esm/package.json +0 -3
@@ -0,0 +1,165 @@
1
+ import { AgentInvokeOptions, AgentProcessResult, ChatModel, ChatModelInput, ChatModelInputMessage, ChatModelInputTool, ChatModelOptions, ChatModelOutput } from "@aigne/core";
2
+ import { PromiseOrValue } from "@aigne/core/utils/type-utils";
3
+ import { z } from "zod";
4
+ import { ClientOptions, OpenAI as OpenAI$1 } from "openai";
5
+ import { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
6
+
7
+ //#region src/openai-chat-model.d.ts
8
+ interface OpenAIChatModelCapabilities {
9
+ supportsNativeStructuredOutputs: boolean;
10
+ supportsEndWithSystemMessage: boolean;
11
+ supportsToolsUseWithJsonSchema: boolean;
12
+ supportsParallelToolCalls: boolean;
13
+ supportsToolsEmptyParameters: boolean;
14
+ supportsToolStreaming: boolean;
15
+ supportsTemperature: boolean;
16
+ }
17
+ /**
18
+ * Configuration options for OpenAI Chat Model
19
+ */
20
+ interface OpenAIChatModelOptions extends ChatModelOptions {
21
+ /**
22
+ * API key for OpenAI API
23
+ *
24
+ * If not provided, will look for OPENAI_API_KEY in environment variables
25
+ */
26
+ apiKey?: string;
27
+ /**
28
+ * Base URL for OpenAI API
29
+ *
30
+ * Useful for proxies or alternate endpoints
31
+ */
32
+ baseURL?: string;
33
+ /**
34
+ * Client options for OpenAI API
35
+ */
36
+ clientOptions?: Partial<ClientOptions>;
37
+ }
38
+ /**
39
+ * @hidden
40
+ */
41
+ declare const openAIChatModelOptionsSchema: z.ZodObject<{
42
+ apiKey: z.ZodOptional<z.ZodString>;
43
+ baseURL: z.ZodOptional<z.ZodString>;
44
+ model: z.ZodOptional<z.ZodString>;
45
+ modelOptions: z.ZodOptional<z.ZodObject<{
46
+ model: z.ZodOptional<z.ZodString>;
47
+ temperature: z.ZodOptional<z.ZodNumber>;
48
+ topP: z.ZodOptional<z.ZodNumber>;
49
+ frequencyPenalty: z.ZodOptional<z.ZodNumber>;
50
+ presencePenalty: z.ZodOptional<z.ZodNumber>;
51
+ parallelToolCalls: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
52
+ }, "strip", z.ZodTypeAny, {
53
+ parallelToolCalls: boolean;
54
+ model?: string | undefined;
55
+ temperature?: number | undefined;
56
+ topP?: number | undefined;
57
+ frequencyPenalty?: number | undefined;
58
+ presencePenalty?: number | undefined;
59
+ }, {
60
+ model?: string | undefined;
61
+ temperature?: number | undefined;
62
+ topP?: number | undefined;
63
+ frequencyPenalty?: number | undefined;
64
+ presencePenalty?: number | undefined;
65
+ parallelToolCalls?: boolean | undefined;
66
+ }>>;
67
+ }, "strip", z.ZodTypeAny, {
68
+ baseURL?: string | undefined;
69
+ apiKey?: string | undefined;
70
+ model?: string | undefined;
71
+ modelOptions?: {
72
+ parallelToolCalls: boolean;
73
+ model?: string | undefined;
74
+ temperature?: number | undefined;
75
+ topP?: number | undefined;
76
+ frequencyPenalty?: number | undefined;
77
+ presencePenalty?: number | undefined;
78
+ } | undefined;
79
+ }, {
80
+ baseURL?: string | undefined;
81
+ apiKey?: string | undefined;
82
+ model?: string | undefined;
83
+ modelOptions?: {
84
+ model?: string | undefined;
85
+ temperature?: number | undefined;
86
+ topP?: number | undefined;
87
+ frequencyPenalty?: number | undefined;
88
+ presencePenalty?: number | undefined;
89
+ parallelToolCalls?: boolean | undefined;
90
+ } | undefined;
91
+ }>;
92
+ /**
93
+ * Implementation of the ChatModel interface for OpenAI's API
94
+ *
95
+ * This model provides access to OpenAI's capabilities including:
96
+ * - Text generation
97
+ * - Tool use with parallel tool calls
98
+ * - JSON structured output
99
+ * - Image understanding
100
+ *
101
+ * Default model: 'gpt-4o-mini'
102
+ *
103
+ * @example
104
+ * Here's how to create and use an OpenAI chat model:
105
+ * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model}
106
+ *
107
+ * @example
108
+ * Here's an example with streaming response:
109
+ * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model-stream}
110
+ */
111
+ declare class OpenAIChatModel extends ChatModel {
112
+ options?: OpenAIChatModelOptions | undefined;
113
+ constructor(options?: OpenAIChatModelOptions | undefined);
114
+ /**
115
+ * @hidden
116
+ */
117
+ protected _client?: OpenAI$1;
118
+ protected apiKeyEnvName: string;
119
+ protected apiKeyDefault: string | undefined;
120
+ protected supportsNativeStructuredOutputs: boolean;
121
+ protected supportsToolsUseWithJsonSchema: boolean;
122
+ protected supportsParallelToolCalls: boolean;
123
+ protected supportsToolsEmptyParameters: boolean;
124
+ protected supportsToolStreaming: boolean;
125
+ protected supportsTemperature: boolean;
126
+ get client(): OpenAI$1;
127
+ get credential(): {
128
+ url: string | undefined;
129
+ apiKey: string | undefined;
130
+ model: string;
131
+ };
132
+ /**
133
+ * Process the input and generate a response
134
+ * @param input The input to process
135
+ * @returns The generated response
136
+ */
137
+ process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
138
+ private getReasoningEffort;
139
+ private _process;
140
+ private getParallelToolCalls;
141
+ protected getRunMessages(input: ChatModelInput): Promise<ChatCompletionMessageParam[]>;
142
+ private getRunResponseFormat;
143
+ private requestStructuredOutput;
144
+ private extractResultFromStream;
145
+ /**
146
+ * Controls how optional fields are handled in JSON schema conversion
147
+ * - "anyOf": All fields are required but can be null (default)
148
+ * - "optional": Fields marked as optional in schema remain optional
149
+ */
150
+ protected optionalFieldMode?: "anyOf" | "optional";
151
+ protected jsonSchemaToOpenAIJsonSchema(schema: Record<string, unknown>): Record<string, unknown>;
152
+ }
153
+ /**
154
+ * @hidden
155
+ */
156
+ declare function contentsFromInputMessages(messages: ChatModelInputMessage[]): Promise<ChatCompletionMessageParam[]>;
157
+ /**
158
+ * @hidden
159
+ */
160
+ declare function toolsFromInputTools(tools?: ChatModelInputTool[], options?: {
161
+ addTypeToEmptyParameters?: boolean;
162
+ }): ChatCompletionTool[] | undefined;
163
+ //#endregion
164
+ export { OpenAIChatModel, OpenAIChatModelCapabilities, OpenAIChatModelOptions, contentsFromInputMessages, openAIChatModelOptionsSchema, toolsFromInputTools };
165
+ //# sourceMappingURL=openai-chat-model.d.mts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"openai-chat-model.d.mts","names":[],"sources":["../src/openai-chat-model.ts"],"mappings":";;;;;;;UAoCiB,2BAAA;EAAA,+BAAA;EAAA,4BAAA;EAAA,8BAAA;EAAA,yBAAA;EAAA,4BAAA;EAAA,qBAAA;EAAA,mBAAA;AAAA;AAAA;AAkBjB;;AAlBiB,UAkBA,sBAAA,SAA+B,gBAAA;EAAA;;AAwBhD;;;EAxBgD,MAAA;EAAA;;AAwBhD;;;EAxBgD,OAAA;EAAA;;AAwBhD;EAxBgD,aAAA,GAkB9B,OAAA,CAAQ,aAAA;AAAA;AAAA;;;AAAA,cAMb,4BAAA,EAA4B,CAAA,CAAA,SAAA;EAAA,MAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAmCzC;;;;;;;;;;;;;;;;cAAa,eAAA,SAAwB,SAAA;EAAA,OAAA,GACG,sBAAA;EAAA,YAAA,OAAA,GAAA,sBAAA;EAAA;;;EAAA,UAAA,OAAA,GAWlB,QAAA;EAAA,UAAA,aAAA;EAAA,UAAA,aAAA;EAAA,UAAA,+BAAA;EAAA,UAAA,8BAAA;EAAA,UAAA,yBAAA;EAAA,UAAA,4BAAA;EAAA,UAAA,qBAAA;EAAA,UAAA,mBAAA;EAAA,IAAA,OAAA,GAWV,QAAA;EAAA,IAAA,WAAA;IAAA,GAAA;IAAA,MAAA;IAAA,KAAA;EAAA;EAAA;;;;;EAAA,QAAA,KAAA,EA6BD,cAAA,EAAA,OAAA,EACE,kBAAA,GACR,cAAA,CAAe,kBAAA,CAAmB,eAAA;EAAA,QAAA,kBAAA;EAAA,QAAA,QAAA;EAAA,QAAA,oBAAA;EAAA,UAAA,eAAA,KAAA,EAkGC,cAAA,GAAiB,OAAA,CAAQ,0BAAA;EAAA,QAAA,oBAAA;EAAA,QAAA,uBAAA;EAAA,QAAA,uBAAA;EAAA;;;;;EAAA,UAAA,iBAAA;EAAA,UAAA,6BAAA,MAAA,EAyNhB,MAAA,oBAA0B,MAAA;AAAA;AAAA;;;AAAA,iBA+CrD,yBAAA,CAAA,QAAA,EACV,qBAAA,KACT,OAAA,CAAQ,0BAAA;AAAA;;;AAAA,iBAmDK,mBAAA,CAAA,KAAA,GACN,kBAAA,IAAA,OAAA;EAAA,wBAAA;AAAA,IAEP,kBAAA"}
@@ -0,0 +1,368 @@
1
+ import { CustomOpenAI } from "./openai.mjs";
2
+ import { ChatModel, STANDARD_ROLE_MAP, createRoleMapper, safeParseJSON } from "@aigne/core";
3
+ import { logger } from "@aigne/core/utils/logger";
4
+ import { mergeUsage } from "@aigne/core/utils/model-utils";
5
+ import { getJsonOutputPrompt } from "@aigne/core/utils/prompts";
6
+ import { agentResponseStreamToObject } from "@aigne/core/utils/stream-utils";
7
+ import { checkArguments, isNonNullable } from "@aigne/core/utils/type-utils";
8
+ import { v7 } from "@aigne/uuid";
9
+ import { z } from "zod";
10
+
11
+ //#region src/openai-chat-model.ts
12
+ const CHAT_MODEL_OPENAI_DEFAULT_MODEL = "gpt-4o-mini";
13
+ const OPENAI_CHAT_MODEL_CAPABILITIES = {
14
+ "o4-mini": {
15
+ supportsParallelToolCalls: false,
16
+ supportsTemperature: false
17
+ },
18
+ "o3-mini": {
19
+ supportsParallelToolCalls: false,
20
+ supportsTemperature: false
21
+ }
22
+ };
23
+ /**
24
+ * @hidden
25
+ */
26
+ const openAIChatModelOptionsSchema = z.object({
27
+ apiKey: z.string().optional(),
28
+ baseURL: z.string().optional(),
29
+ model: z.string().optional(),
30
+ modelOptions: z.object({
31
+ model: z.string().optional(),
32
+ temperature: z.number().optional(),
33
+ topP: z.number().optional(),
34
+ frequencyPenalty: z.number().optional(),
35
+ presencePenalty: z.number().optional(),
36
+ parallelToolCalls: z.boolean().optional().default(true)
37
+ }).optional()
38
+ });
39
+ /**
40
+ * Implementation of the ChatModel interface for OpenAI's API
41
+ *
42
+ * This model provides access to OpenAI's capabilities including:
43
+ * - Text generation
44
+ * - Tool use with parallel tool calls
45
+ * - JSON structured output
46
+ * - Image understanding
47
+ *
48
+ * Default model: 'gpt-4o-mini'
49
+ *
50
+ * @example
51
+ * Here's how to create and use an OpenAI chat model:
52
+ * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model}
53
+ *
54
+ * @example
55
+ * Here's an example with streaming response:
56
+ * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model-stream}
57
+ */
58
+ var OpenAIChatModel = class extends ChatModel {
59
+ constructor(options) {
60
+ super();
61
+ this.options = options;
62
+ if (options) checkArguments(this.name, openAIChatModelOptionsSchema, options);
63
+ const preset = options?.model ? OPENAI_CHAT_MODEL_CAPABILITIES[options.model] : void 0;
64
+ Object.assign(this, preset);
65
+ }
66
+ /**
67
+ * @hidden
68
+ */
69
+ _client;
70
+ apiKeyEnvName = "OPENAI_API_KEY";
71
+ apiKeyDefault;
72
+ supportsNativeStructuredOutputs = true;
73
+ supportsToolsUseWithJsonSchema = true;
74
+ supportsParallelToolCalls = true;
75
+ supportsToolsEmptyParameters = true;
76
+ supportsToolStreaming = true;
77
+ supportsTemperature = true;
78
+ get client() {
79
+ const { apiKey, url } = this.credential;
80
+ if (!apiKey) throw new Error(`${this.name} requires an API key. Please provide it via \`options.apiKey\`, or set the \`${this.apiKeyEnvName}\` environment variable`);
81
+ this._client ??= new CustomOpenAI({
82
+ baseURL: url,
83
+ apiKey,
84
+ ...this.options?.clientOptions
85
+ });
86
+ return this._client;
87
+ }
88
+ get credential() {
89
+ return {
90
+ url: this.options?.baseURL || process.env.OPENAI_BASE_URL,
91
+ apiKey: this.options?.apiKey || process.env[this.apiKeyEnvName] || this.apiKeyDefault,
92
+ model: this.options?.model || CHAT_MODEL_OPENAI_DEFAULT_MODEL
93
+ };
94
+ }
95
+ /**
96
+ * Process the input and generate a response
97
+ * @param input The input to process
98
+ * @returns The generated response
99
+ */
100
+ process(input, options) {
101
+ return this._process(input, options);
102
+ }
103
+ getReasoningEffort(effort) {
104
+ if (typeof effort === "number") {
105
+ if (effort > 5e3) return "high";
106
+ if (effort > 1e3) return "medium";
107
+ if (effort > 500) return "low";
108
+ if (effort > 0) return "minimal";
109
+ return;
110
+ }
111
+ return effort;
112
+ }
113
+ async _process(input, _options) {
114
+ const { modelOptions = {} } = input;
115
+ const messages = await this.getRunMessages(input);
116
+ const model = modelOptions?.model || this.credential.model;
117
+ const body = {
118
+ model,
119
+ temperature: this.supportsTemperature ? modelOptions.temperature : void 0,
120
+ top_p: modelOptions.topP,
121
+ frequency_penalty: modelOptions.frequencyPenalty,
122
+ presence_penalty: modelOptions.presencePenalty,
123
+ messages,
124
+ stream_options: { include_usage: true },
125
+ stream: true,
126
+ reasoning_effort: this.getReasoningEffort(modelOptions.reasoningEffort)
127
+ };
128
+ if (model.includes("gpt-5") || model.includes("o1-")) {
129
+ delete body.temperature;
130
+ delete body.top_p;
131
+ }
132
+ if (!input.tools?.length && input.responseFormat?.type === "json_schema") return await this.requestStructuredOutput(body, input.responseFormat);
133
+ const { jsonMode, responseFormat } = await this.getRunResponseFormat(input);
134
+ const stream = await this.client.chat.completions.create({
135
+ ...body,
136
+ tools: toolsFromInputTools(input.tools, { addTypeToEmptyParameters: !this.supportsToolsEmptyParameters }),
137
+ tool_choice: input.toolChoice,
138
+ parallel_tool_calls: this.getParallelToolCalls(input, modelOptions),
139
+ response_format: responseFormat
140
+ });
141
+ if (input.responseFormat?.type !== "json_schema") return await this.extractResultFromStream(body, stream, false, true);
142
+ const result = await this.extractResultFromStream(body, stream, jsonMode);
143
+ if (result.toolCalls?.length || result.json) return result;
144
+ const json = safeParseJSON(result.text || "");
145
+ const validated = this.validateJsonSchema(input.responseFormat.jsonSchema.schema, json, { safe: true });
146
+ if (validated.success) return {
147
+ ...result,
148
+ json: validated.data,
149
+ text: void 0
150
+ };
151
+ logger.warn(`${this.name}: Text response does not match JSON schema, trying to use tool to extract json `, { text: result.text });
152
+ const output = await this.requestStructuredOutput(body, input.responseFormat);
153
+ return {
154
+ ...output,
155
+ usage: mergeUsage(result.usage, output.usage)
156
+ };
157
+ }
158
+ getParallelToolCalls(input, modelOptions) {
159
+ if (!this.supportsParallelToolCalls) return void 0;
160
+ if (!input.tools?.length) return void 0;
161
+ return modelOptions.parallelToolCalls;
162
+ }
163
+ async getRunMessages(input) {
164
+ const messages = await contentsFromInputMessages(input.messages);
165
+ if (input.responseFormat?.type === "json_schema") {
166
+ if (!this.supportsNativeStructuredOutputs || !this.supportsToolsUseWithJsonSchema && input.tools?.length) messages.unshift({
167
+ role: "system",
168
+ content: getJsonOutputPrompt(input.responseFormat.jsonSchema.schema)
169
+ });
170
+ }
171
+ return messages;
172
+ }
173
+ async getRunResponseFormat(input) {
174
+ if (!this.supportsToolsUseWithJsonSchema && input.tools?.length) return {
175
+ jsonMode: false,
176
+ responseFormat: void 0
177
+ };
178
+ if (!this.supportsNativeStructuredOutputs) {
179
+ const jsonMode = input.responseFormat?.type === "json_schema";
180
+ return {
181
+ jsonMode,
182
+ responseFormat: jsonMode ? { type: "json_object" } : void 0
183
+ };
184
+ }
185
+ if (input.responseFormat?.type === "json_schema") return {
186
+ jsonMode: true,
187
+ responseFormat: {
188
+ type: "json_schema",
189
+ json_schema: {
190
+ ...input.responseFormat.jsonSchema,
191
+ schema: this.jsonSchemaToOpenAIJsonSchema(input.responseFormat.jsonSchema.schema)
192
+ }
193
+ }
194
+ };
195
+ return {
196
+ jsonMode: false,
197
+ responseFormat: void 0
198
+ };
199
+ }
200
+ async requestStructuredOutput(body, responseFormat) {
201
+ if (responseFormat?.type !== "json_schema") throw new Error("Expected json_schema response format");
202
+ const { jsonMode, responseFormat: resolvedResponseFormat } = await this.getRunResponseFormat({ responseFormat });
203
+ const res = await this.client.chat.completions.create({
204
+ ...body,
205
+ response_format: resolvedResponseFormat
206
+ });
207
+ return this.extractResultFromStream(body, res, jsonMode);
208
+ }
209
+ async extractResultFromStream(body, stream, jsonMode, streaming) {
210
+ const result = new ReadableStream({ start: async (controller) => {
211
+ try {
212
+ controller.enqueue({ delta: { json: { modelOptions: { reasoningEffort: body.reasoning_effort } } } });
213
+ let text = "";
214
+ let refusal = "";
215
+ const toolCalls = [];
216
+ let model;
217
+ for await (const chunk of stream) {
218
+ const delta = (chunk.choices?.[0])?.delta;
219
+ if (!model) {
220
+ model = chunk.model;
221
+ controller.enqueue({ delta: { json: { model } } });
222
+ }
223
+ if (delta?.tool_calls?.length) for (const call of delta.tool_calls) if (this.supportsToolStreaming && call.index !== void 0) handleToolCallDelta(toolCalls, call);
224
+ else handleCompleteToolCall(toolCalls, call);
225
+ if (delta && "reasoning" in delta && typeof delta.reasoning === "string") controller.enqueue({ delta: { text: { thoughts: delta.reasoning } } });
226
+ if (delta?.content) {
227
+ text += delta.content;
228
+ if (!jsonMode) controller.enqueue({ delta: { text: { text: delta.content } } });
229
+ }
230
+ if (delta?.refusal) refusal += delta.refusal;
231
+ if (chunk.usage) {
232
+ const usage = {
233
+ inputTokens: chunk.usage.prompt_tokens,
234
+ outputTokens: chunk.usage.completion_tokens
235
+ };
236
+ const inputDetails = chunk.usage.prompt_tokens_details;
237
+ if (inputDetails?.cached_tokens) usage.cacheReadInputTokens = inputDetails.cached_tokens;
238
+ controller.enqueue({ delta: { json: { usage } } });
239
+ }
240
+ }
241
+ if (jsonMode && text) controller.enqueue({ delta: { json: { json: safeParseJSON(text) } } });
242
+ if (toolCalls.length) controller.enqueue({ delta: { json: { toolCalls: toolCalls.map(({ args, ...c }) => ({
243
+ ...c,
244
+ function: {
245
+ ...c.function,
246
+ arguments: args ? safeParseJSON(args) : {}
247
+ }
248
+ })) } } });
249
+ if (refusal) controller.error(/* @__PURE__ */ new Error(`Got refusal from LLM: ${refusal}`));
250
+ controller.close();
251
+ } catch (error) {
252
+ controller.error(error);
253
+ }
254
+ } });
255
+ return streaming ? result : await agentResponseStreamToObject(result);
256
+ }
257
+ /**
258
+ * Controls how optional fields are handled in JSON schema conversion
259
+ * - "anyOf": All fields are required but can be null (default)
260
+ * - "optional": Fields marked as optional in schema remain optional
261
+ */
262
+ optionalFieldMode = "anyOf";
263
+ jsonSchemaToOpenAIJsonSchema(schema) {
264
+ if (schema?.type === "object") {
265
+ const s = schema;
266
+ const required = this.optionalFieldMode === "anyOf" ? Object.keys(s.properties) : s.required;
267
+ return {
268
+ ...schema,
269
+ properties: Object.fromEntries(Object.entries(s.properties).map(([key, value]) => {
270
+ const valueSchema = this.jsonSchemaToOpenAIJsonSchema(value);
271
+ return [key, this.optionalFieldMode === "optional" || s.required?.includes(key) ? valueSchema : { anyOf: [valueSchema, { type: ["null"] }] }];
272
+ })),
273
+ required
274
+ };
275
+ }
276
+ if (schema?.type === "array") {
277
+ const { items } = schema;
278
+ return {
279
+ ...schema,
280
+ items: this.jsonSchemaToOpenAIJsonSchema(items)
281
+ };
282
+ }
283
+ return schema;
284
+ }
285
+ };
286
+ const mapRole = createRoleMapper(STANDARD_ROLE_MAP);
287
+ /**
288
+ * @hidden
289
+ */
290
+ async function contentsFromInputMessages(messages) {
291
+ return Promise.all(messages.map(async (i) => ({
292
+ role: mapRole(i.role),
293
+ content: typeof i.content === "string" ? i.content : i.content && (await Promise.all(i.content.map(async (c) => {
294
+ switch (c.type) {
295
+ case "text": return {
296
+ type: "text",
297
+ text: c.text
298
+ };
299
+ case "url": return {
300
+ type: "image_url",
301
+ image_url: { url: c.url }
302
+ };
303
+ case "file": return {
304
+ type: "image_url",
305
+ image_url: { url: `data:${c.mimeType || "image/png"};base64,${c.data}` }
306
+ };
307
+ case "local": throw new Error(`Unsupported local file: ${c.path}, it should be converted to base64 at ChatModel`);
308
+ }
309
+ }))).filter(isNonNullable),
310
+ tool_calls: i.toolCalls?.map((i$1) => ({
311
+ ...i$1,
312
+ function: {
313
+ ...i$1.function,
314
+ arguments: JSON.stringify(i$1.function.arguments)
315
+ }
316
+ })),
317
+ tool_call_id: i.toolCallId,
318
+ name: i.name
319
+ })));
320
+ }
321
+ /**
322
+ * @hidden
323
+ */
324
+ function toolsFromInputTools(tools, options) {
325
+ return tools?.length ? tools.map((i) => {
326
+ const parameters = i.function.parameters;
327
+ if (options?.addTypeToEmptyParameters && Object.keys(parameters).length === 0) parameters.type = "object";
328
+ return {
329
+ type: "function",
330
+ function: {
331
+ name: i.function.name,
332
+ description: i.function.description,
333
+ parameters
334
+ }
335
+ };
336
+ }) : void 0;
337
+ }
338
+ function handleToolCallDelta(toolCalls, call) {
339
+ toolCalls[call.index] ??= {
340
+ id: call.id || v7(),
341
+ type: "function",
342
+ function: {
343
+ name: "",
344
+ arguments: {}
345
+ },
346
+ args: ""
347
+ };
348
+ const c = toolCalls[call.index];
349
+ if (!c) throw new Error("Tool call not found");
350
+ if (call.type) c.type = call.type;
351
+ c.function.name = c.function.name + (call.function?.name || "");
352
+ c.args = c.args.concat(call.function?.arguments || "");
353
+ }
354
+ function handleCompleteToolCall(toolCalls, call) {
355
+ toolCalls.push({
356
+ id: call.id || v7(),
357
+ type: "function",
358
+ function: {
359
+ name: call.function?.name || "",
360
+ arguments: safeParseJSON(call.function?.arguments || "{}")
361
+ },
362
+ args: call.function?.arguments || ""
363
+ });
364
+ }
365
+
366
+ //#endregion
367
+ export { OpenAIChatModel, contentsFromInputMessages, openAIChatModelOptionsSchema, toolsFromInputTools };
368
+ //# sourceMappingURL=openai-chat-model.mjs.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"openai-chat-model.mjs","names":["i"],"sources":["../src/openai-chat-model.ts"],"sourcesContent":["import {\n type AgentInvokeOptions,\n type AgentProcessResult,\n type AgentResponse,\n type AgentResponseChunk,\n ChatModel,\n type ChatModelInput,\n type ChatModelInputMessage,\n type ChatModelInputOptions,\n type ChatModelInputTool,\n type ChatModelOptions,\n type ChatModelOutput,\n type ChatModelOutputUsage,\n createRoleMapper,\n STANDARD_ROLE_MAP,\n safeParseJSON,\n} from \"@aigne/core\";\nimport { logger } from \"@aigne/core/utils/logger\";\nimport { mergeUsage } from \"@aigne/core/utils/model-utils\";\nimport { getJsonOutputPrompt } from \"@aigne/core/utils/prompts\";\nimport { agentResponseStreamToObject } from \"@aigne/core/utils/stream-utils\";\nimport { checkArguments, isNonNullable, type PromiseOrValue } from \"@aigne/core/utils/type-utils\";\nimport { v7 } from \"@aigne/uuid\";\nimport type { ClientOptions, OpenAI } from \"openai\";\nimport type {\n ChatCompletionContentPart,\n ChatCompletionMessageParam,\n ChatCompletionTool,\n ResponseFormatJSONSchema,\n} from \"openai/resources\";\nimport type { Stream } from \"openai/streaming.js\";\nimport { z } from \"zod\";\nimport { CustomOpenAI } from \"./openai.js\";\n\nconst CHAT_MODEL_OPENAI_DEFAULT_MODEL = \"gpt-4o-mini\";\n\nexport interface OpenAIChatModelCapabilities {\n supportsNativeStructuredOutputs: boolean;\n supportsEndWithSystemMessage: boolean;\n supportsToolsUseWithJsonSchema: boolean;\n supportsParallelToolCalls: boolean;\n supportsToolsEmptyParameters: boolean;\n supportsToolStreaming: boolean;\n supportsTemperature: boolean;\n}\n\nconst OPENAI_CHAT_MODEL_CAPABILITIES: Record<string, Partial<OpenAIChatModelCapabilities>> = {\n \"o4-mini\": { supportsParallelToolCalls: false, supportsTemperature: false },\n \"o3-mini\": { supportsParallelToolCalls: false, supportsTemperature: false },\n};\n\n/**\n * Configuration options for OpenAI Chat Model\n */\nexport interface OpenAIChatModelOptions extends ChatModelOptions {\n /**\n * API key for OpenAI API\n *\n * If not provided, will look for OPENAI_API_KEY in environment variables\n */\n apiKey?: string;\n\n /**\n * Base URL for OpenAI API\n *\n * Useful for proxies or alternate endpoints\n */\n baseURL?: string;\n\n /**\n * Client options for OpenAI API\n */\n clientOptions?: Partial<ClientOptions>;\n}\n\n/**\n * @hidden\n */\nexport const openAIChatModelOptionsSchema = z.object({\n apiKey: z.string().optional(),\n baseURL: z.string().optional(),\n model: z.string().optional(),\n modelOptions: z\n .object({\n model: z.string().optional(),\n temperature: z.number().optional(),\n topP: z.number().optional(),\n frequencyPenalty: z.number().optional(),\n presencePenalty: z.number().optional(),\n parallelToolCalls: z.boolean().optional().default(true),\n })\n .optional(),\n});\n\n/**\n * Implementation of the ChatModel interface for OpenAI's API\n *\n * This model provides access to OpenAI's capabilities including:\n * - Text generation\n * - Tool use with parallel tool calls\n * - JSON structured output\n * - Image understanding\n *\n * Default model: 'gpt-4o-mini'\n *\n * @example\n * Here's how to create and use an OpenAI chat model:\n * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model}\n *\n * @example\n * Here's an example with streaming response:\n * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model-stream}\n */\nexport class OpenAIChatModel extends ChatModel {\n constructor(public override options?: OpenAIChatModelOptions) {\n super();\n if (options) checkArguments(this.name, openAIChatModelOptionsSchema, options);\n\n const preset = options?.model ? OPENAI_CHAT_MODEL_CAPABILITIES[options.model] : undefined;\n Object.assign(this, preset);\n }\n\n /**\n * @hidden\n */\n protected _client?: OpenAI;\n\n protected apiKeyEnvName = \"OPENAI_API_KEY\";\n protected apiKeyDefault: string | undefined;\n protected supportsNativeStructuredOutputs = true;\n protected supportsToolsUseWithJsonSchema = true;\n protected override supportsParallelToolCalls = true;\n protected supportsToolsEmptyParameters = true;\n protected supportsToolStreaming = true;\n protected supportsTemperature = true;\n\n get client() {\n const { apiKey, url } = this.credential;\n if (!apiKey)\n throw new Error(\n `${this.name} requires an API key. Please provide it via \\`options.apiKey\\`, or set the \\`${this.apiKeyEnvName}\\` environment variable`,\n );\n\n this._client ??= new CustomOpenAI({\n baseURL: url,\n apiKey,\n ...this.options?.clientOptions,\n });\n return this._client;\n }\n\n override get credential() {\n return {\n url: this.options?.baseURL || process.env.OPENAI_BASE_URL,\n apiKey: this.options?.apiKey || process.env[this.apiKeyEnvName] || this.apiKeyDefault,\n model: this.options?.model || CHAT_MODEL_OPENAI_DEFAULT_MODEL,\n };\n }\n\n /**\n * Process the input and generate a response\n * @param input The input to process\n * @returns The generated response\n */\n override process(\n input: ChatModelInput,\n options: AgentInvokeOptions,\n ): PromiseOrValue<AgentProcessResult<ChatModelOutput>> {\n return this._process(input, options);\n }\n\n private getReasoningEffort(\n effort: ChatModelInputOptions[\"reasoningEffort\"],\n ): Exclude<ChatModelInputOptions[\"reasoningEffort\"], number> {\n if (typeof effort === \"number\") {\n if (effort > 5000) return \"high\";\n if (effort > 1000) return \"medium\";\n if (effort > 500) return \"low\";\n if (effort > 0) return \"minimal\";\n return undefined;\n }\n\n return effort;\n }\n\n private async _process(\n input: ChatModelInput,\n _options: AgentInvokeOptions,\n ): Promise<AgentResponse<ChatModelOutput>> {\n const { modelOptions = {} } = input;\n\n const messages = await this.getRunMessages(input);\n const model = modelOptions?.model || this.credential.model;\n\n const body: OpenAI.Chat.ChatCompletionCreateParams = {\n model,\n temperature: this.supportsTemperature ? modelOptions.temperature : undefined,\n top_p: modelOptions.topP,\n frequency_penalty: modelOptions.frequencyPenalty,\n presence_penalty: modelOptions.presencePenalty,\n messages,\n stream_options: { include_usage: true },\n stream: true,\n reasoning_effort: this.getReasoningEffort(modelOptions.reasoningEffort),\n };\n\n if (model.includes(\"gpt-5\") || model.includes(\"o1-\")) {\n delete body.temperature;\n delete body.top_p;\n }\n\n // For models that do not support tools use with JSON schema in same request,\n // so we need to handle the case where tools are not used and responseFormat is json\n if (!input.tools?.length && input.responseFormat?.type === \"json_schema\") {\n return await this.requestStructuredOutput(body, input.responseFormat);\n }\n\n const { jsonMode, responseFormat } = await this.getRunResponseFormat(input);\n const stream = (await this.client.chat.completions.create({\n ...body,\n tools: toolsFromInputTools(input.tools, {\n addTypeToEmptyParameters: !this.supportsToolsEmptyParameters,\n }),\n tool_choice: input.toolChoice,\n parallel_tool_calls: this.getParallelToolCalls(input, modelOptions),\n response_format: responseFormat,\n })) as unknown as Stream<OpenAI.Chat.Completions.ChatCompletionChunk>;\n\n if (input.responseFormat?.type !== \"json_schema\") {\n return await this.extractResultFromStream(body, stream, false, true);\n }\n\n const result = await this.extractResultFromStream(body, stream, jsonMode);\n // Just return the result if it has tool calls\n if (result.toolCalls?.length || result.json) return result;\n\n // Try to parse the text response as JSON\n // If it matches the json_schema, return it as json\n const json = safeParseJSON(result.text || \"\");\n const validated = this.validateJsonSchema(input.responseFormat.jsonSchema.schema, json, {\n safe: true,\n });\n if (validated.success) {\n return { ...result, json: validated.data, text: undefined };\n }\n logger.warn(\n `${this.name}: Text response does not match JSON schema, trying to use tool to extract json `,\n {\n text: result.text,\n },\n );\n\n const output = await this.requestStructuredOutput(body, input.responseFormat);\n return { ...output, usage: mergeUsage(result.usage, output.usage) };\n }\n\n private getParallelToolCalls(\n input: ChatModelInput,\n modelOptions: ChatModelInputOptions,\n ): boolean | undefined {\n if (!this.supportsParallelToolCalls) return undefined;\n if (!input.tools?.length) return undefined;\n return modelOptions.parallelToolCalls;\n }\n\n protected async getRunMessages(input: ChatModelInput): Promise<ChatCompletionMessageParam[]> {\n const messages = await contentsFromInputMessages(input.messages);\n\n if (input.responseFormat?.type === \"json_schema\") {\n if (\n !this.supportsNativeStructuredOutputs ||\n (!this.supportsToolsUseWithJsonSchema && input.tools?.length)\n ) {\n messages.unshift({\n role: \"system\",\n content: getJsonOutputPrompt(input.responseFormat.jsonSchema.schema),\n });\n }\n }\n return messages;\n }\n\n private async getRunResponseFormat(input: Partial<ChatModelInput>): Promise<{\n jsonMode: boolean;\n responseFormat: ResponseFormatJSONSchema | { type: \"json_object\" } | undefined;\n }> {\n if (!this.supportsToolsUseWithJsonSchema && input.tools?.length)\n return { jsonMode: false, responseFormat: undefined };\n\n if (!this.supportsNativeStructuredOutputs) {\n const jsonMode = input.responseFormat?.type === \"json_schema\";\n return {\n jsonMode,\n responseFormat: jsonMode ? { type: \"json_object\" } : undefined,\n };\n }\n\n if (input.responseFormat?.type === \"json_schema\") {\n return {\n jsonMode: true,\n responseFormat: {\n type: \"json_schema\",\n json_schema: {\n ...input.responseFormat.jsonSchema,\n schema: this.jsonSchemaToOpenAIJsonSchema(input.responseFormat.jsonSchema.schema),\n },\n },\n };\n }\n\n return { jsonMode: false, responseFormat: undefined };\n }\n\n private async requestStructuredOutput(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n responseFormat: ChatModelInput[\"responseFormat\"],\n ): Promise<ChatModelOutput> {\n if (responseFormat?.type !== \"json_schema\") {\n throw new Error(\"Expected json_schema response format\");\n }\n\n const { jsonMode, responseFormat: resolvedResponseFormat } = await this.getRunResponseFormat({\n responseFormat,\n });\n const res = (await this.client.chat.completions.create({\n ...body,\n response_format: resolvedResponseFormat,\n })) as unknown as Stream<OpenAI.Chat.Completions.ChatCompletionChunk>;\n\n return this.extractResultFromStream(body, res, jsonMode);\n }\n\n private async extractResultFromStream(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,\n jsonMode: boolean | undefined,\n streaming: true,\n ): Promise<ReadableStream<AgentResponseChunk<ChatModelOutput>>>;\n private async extractResultFromStream(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,\n jsonMode?: boolean,\n streaming?: false,\n ): Promise<ChatModelOutput>;\n private async extractResultFromStream(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,\n jsonMode?: boolean,\n streaming?: boolean,\n ): Promise<ReadableStream<AgentResponseChunk<ChatModelOutput>> | ChatModelOutput> {\n const result = new ReadableStream<AgentResponseChunk<ChatModelOutput>>({\n start: async (controller) => {\n try {\n controller.enqueue({\n delta: {\n json: {\n modelOptions: {\n reasoningEffort: body.reasoning_effort,\n },\n },\n },\n });\n\n let text = \"\";\n let refusal = \"\";\n const toolCalls: (NonNullable<ChatModelOutput[\"toolCalls\"]>[number] & {\n args: string;\n })[] = [];\n let model: string | undefined;\n\n for await (const chunk of stream) {\n const choice = chunk.choices?.[0];\n const delta = choice?.delta;\n\n if (!model) {\n model = chunk.model;\n controller.enqueue({\n delta: {\n json: {\n model,\n },\n },\n });\n }\n\n if (delta?.tool_calls?.length) {\n for (const call of delta.tool_calls) {\n if (this.supportsToolStreaming && call.index !== undefined) {\n handleToolCallDelta(toolCalls, call);\n } else {\n handleCompleteToolCall(toolCalls, call);\n }\n }\n }\n\n if (delta && \"reasoning\" in delta && typeof delta.reasoning === \"string\") {\n controller.enqueue({ delta: { text: { thoughts: delta.reasoning } } });\n }\n\n if (delta?.content) {\n text += delta.content;\n if (!jsonMode) {\n controller.enqueue({\n delta: {\n text: {\n text: delta.content,\n },\n },\n });\n }\n }\n\n if (delta?.refusal) {\n refusal += delta.refusal;\n }\n\n if (chunk.usage) {\n const usage: ChatModelOutputUsage = {\n inputTokens: chunk.usage.prompt_tokens,\n outputTokens: chunk.usage.completion_tokens,\n };\n\n // Parse cache statistics if available\n const inputDetails = chunk.usage.prompt_tokens_details;\n if (inputDetails?.cached_tokens) {\n usage.cacheReadInputTokens = inputDetails.cached_tokens;\n }\n\n controller.enqueue({\n delta: {\n json: {\n usage,\n },\n },\n });\n }\n }\n\n if (jsonMode && text) {\n controller.enqueue({\n delta: {\n json: {\n json: safeParseJSON(text),\n },\n },\n });\n }\n\n if (toolCalls.length) {\n controller.enqueue({\n delta: {\n json: {\n toolCalls: toolCalls.map(({ args, ...c }) => ({\n ...c,\n function: { ...c.function, arguments: args ? safeParseJSON(args) : {} },\n })),\n },\n },\n });\n }\n\n if (refusal) {\n controller.error(new Error(`Got refusal from LLM: ${refusal}`));\n }\n\n controller.close();\n } catch (error) {\n controller.error(error);\n }\n },\n });\n\n return streaming ? result : await agentResponseStreamToObject(result);\n }\n\n /**\n * Controls how optional fields are handled in JSON schema conversion\n * - \"anyOf\": All fields are required but can be null (default)\n * - \"optional\": Fields marked as optional in schema remain optional\n */\n protected optionalFieldMode?: \"anyOf\" | \"optional\" = \"anyOf\";\n\n protected jsonSchemaToOpenAIJsonSchema(schema: Record<string, unknown>): Record<string, unknown> {\n if (schema?.type === \"object\") {\n const s = schema as {\n required?: string[];\n properties: Record<string, unknown>;\n };\n\n const required = this.optionalFieldMode === \"anyOf\" ? Object.keys(s.properties) : s.required;\n\n return {\n ...schema,\n properties: Object.fromEntries(\n Object.entries(s.properties).map(([key, value]) => {\n const valueSchema = this.jsonSchemaToOpenAIJsonSchema(value as Record<string, unknown>);\n\n // NOTE: All fields must be required https://platform.openai.com/docs/guides/structured-outputs/all-fields-must-be-required\n return [\n key,\n this.optionalFieldMode === \"optional\" || s.required?.includes(key)\n ? valueSchema\n : { anyOf: [valueSchema, { type: [\"null\"] }] },\n ];\n }),\n ),\n required,\n };\n }\n\n if (schema?.type === \"array\") {\n const { items } = schema as { items: Record<string, unknown> };\n\n return {\n ...schema,\n items: this.jsonSchemaToOpenAIJsonSchema(items),\n };\n }\n\n return schema;\n }\n}\n\n// Create role mapper for OpenAI (uses standard mapping)\nconst mapRole = createRoleMapper(STANDARD_ROLE_MAP);\n\n/**\n * @hidden\n */\nexport async function contentsFromInputMessages(\n messages: ChatModelInputMessage[],\n): Promise<ChatCompletionMessageParam[]> {\n return Promise.all(\n messages.map(\n async (i) =>\n ({\n role: mapRole(i.role),\n content:\n typeof i.content === \"string\"\n ? i.content\n : i.content &&\n (\n await Promise.all(\n i.content.map<Promise<ChatCompletionContentPart>>(async (c) => {\n switch (c.type) {\n case \"text\":\n return { type: \"text\", text: c.text };\n case \"url\":\n return { type: \"image_url\", image_url: { url: c.url } };\n case \"file\":\n return {\n type: \"image_url\",\n image_url: {\n url: `data:${c.mimeType || \"image/png\"};base64,${c.data}`,\n },\n };\n case \"local\": {\n throw new Error(\n `Unsupported local file: ${c.path}, it should be converted to base64 at ChatModel`,\n );\n }\n }\n }),\n )\n ).filter(isNonNullable),\n tool_calls: i.toolCalls?.map((i) => ({\n ...i,\n function: {\n ...i.function,\n arguments: JSON.stringify(i.function.arguments),\n },\n })),\n tool_call_id: i.toolCallId,\n name: i.name,\n }) as ChatCompletionMessageParam,\n ),\n );\n}\n\n/**\n * @hidden\n */\nexport function toolsFromInputTools(\n tools?: ChatModelInputTool[],\n options?: { addTypeToEmptyParameters?: boolean },\n): ChatCompletionTool[] | undefined {\n return tools?.length\n ? tools.map((i) => {\n const parameters = i.function.parameters as Record<string, unknown>;\n if (options?.addTypeToEmptyParameters && Object.keys(parameters).length === 0) {\n parameters.type = \"object\";\n }\n return {\n type: \"function\",\n function: {\n name: i.function.name,\n description: i.function.description,\n parameters,\n },\n };\n })\n : undefined;\n}\n\nfunction handleToolCallDelta(\n toolCalls: (NonNullable<ChatModelOutput[\"toolCalls\"]>[number] & {\n args: string;\n })[],\n call: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall & {\n index: number;\n },\n) {\n toolCalls[call.index] ??= {\n id: call.id || v7(),\n type: \"function\" as const,\n function: { name: \"\", arguments: {} },\n args: \"\",\n };\n const c = toolCalls[call.index];\n if (!c) throw new Error(\"Tool call not found\");\n\n if (call.type) c.type = call.type;\n\n c.function.name = c.function.name + (call.function?.name || \"\");\n c.args = c.args.concat(call.function?.arguments || \"\");\n}\n\nfunction handleCompleteToolCall(\n toolCalls: (NonNullable<ChatModelOutput[\"toolCalls\"]>[number] & {\n args: string;\n })[],\n call: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall,\n) {\n toolCalls.push({\n id: call.id || v7(),\n type: \"function\" as const,\n function: {\n name: call.function?.name || \"\",\n arguments: safeParseJSON(call.function?.arguments || \"{}\"),\n },\n args: call.function?.arguments || \"\",\n });\n}\n\n// safeParseJSON is now imported from @aigne/core\n"],"mappings":";;;;;;;;;;;AAkCA,MAAM,kCAAkC;AAYxC,MAAM,iCAAuF;CAC3F,WAAW;EAAE,2BAA2B;EAAO,qBAAqB;EAAO;CAC3E,WAAW;EAAE,2BAA2B;EAAO,qBAAqB;EAAO;CAC5E;;;;AA6BD,MAAa,+BAA+B,EAAE,OAAO;CACnD,QAAQ,EAAE,QAAQ,CAAC,UAAU;CAC7B,SAAS,EAAE,QAAQ,CAAC,UAAU;CAC9B,OAAO,EAAE,QAAQ,CAAC,UAAU;CAC5B,cAAc,EACX,OAAO;EACN,OAAO,EAAE,QAAQ,CAAC,UAAU;EAC5B,aAAa,EAAE,QAAQ,CAAC,UAAU;EAClC,MAAM,EAAE,QAAQ,CAAC,UAAU;EAC3B,kBAAkB,EAAE,QAAQ,CAAC,UAAU;EACvC,iBAAiB,EAAE,QAAQ,CAAC,UAAU;EACtC,mBAAmB,EAAE,SAAS,CAAC,UAAU,CAAC,QAAQ,KAAK;EACxD,CAAC,CACD,UAAU;CACd,CAAC;;;;;;;;;;;;;;;;;;;;AAqBF,IAAa,kBAAb,cAAqC,UAAU;CAC7C,YAAY,AAAgB,SAAkC;AAC5D,SAAO;EADmB;AAE1B,MAAI,QAAS,gBAAe,KAAK,MAAM,8BAA8B,QAAQ;EAE7E,MAAM,SAAS,SAAS,QAAQ,+BAA+B,QAAQ,SAAS;AAChF,SAAO,OAAO,MAAM,OAAO;;;;;CAM7B,AAAU;CAEV,AAAU,gBAAgB;CAC1B,AAAU;CACV,AAAU,kCAAkC;CAC5C,AAAU,iCAAiC;CAC3C,AAAmB,4BAA4B;CAC/C,AAAU,+BAA+B;CACzC,AAAU,wBAAwB;CAClC,AAAU,sBAAsB;CAEhC,IAAI,SAAS;EACX,MAAM,EAAE,QAAQ,QAAQ,KAAK;AAC7B,MAAI,CAAC,OACH,OAAM,IAAI,MACR,GAAG,KAAK,KAAK,+EAA+E,KAAK,cAAc,yBAChH;AAEH,OAAK,YAAY,IAAI,aAAa;GAChC,SAAS;GACT;GACA,GAAG,KAAK,SAAS;GAClB,CAAC;AACF,SAAO,KAAK;;CAGd,IAAa,aAAa;AACxB,SAAO;GACL,KAAK,KAAK,SAAS,WAAW,QAAQ,IAAI;GAC1C,QAAQ,KAAK,SAAS,UAAU,QAAQ,IAAI,KAAK,kBAAkB,KAAK;GACxE,OAAO,KAAK,SAAS,SAAS;GAC/B;;;;;;;CAQH,AAAS,QACP,OACA,SACqD;AACrD,SAAO,KAAK,SAAS,OAAO,QAAQ;;CAGtC,AAAQ,mBACN,QAC2D;AAC3D,MAAI,OAAO,WAAW,UAAU;AAC9B,OAAI,SAAS,IAAM,QAAO;AAC1B,OAAI,SAAS,IAAM,QAAO;AAC1B,OAAI,SAAS,IAAK,QAAO;AACzB,OAAI,SAAS,EAAG,QAAO;AACvB;;AAGF,SAAO;;CAGT,MAAc,SACZ,OACA,UACyC;EACzC,MAAM,EAAE,eAAe,EAAE,KAAK;EAE9B,MAAM,WAAW,MAAM,KAAK,eAAe,MAAM;EACjD,MAAM,QAAQ,cAAc,SAAS,KAAK,WAAW;EAErD,MAAM,OAA+C;GACnD;GACA,aAAa,KAAK,sBAAsB,aAAa,cAAc;GACnE,OAAO,aAAa;GACpB,mBAAmB,aAAa;GAChC,kBAAkB,aAAa;GAC/B;GACA,gBAAgB,EAAE,eAAe,MAAM;GACvC,QAAQ;GACR,kBAAkB,KAAK,mBAAmB,aAAa,gBAAgB;GACxE;AAED,MAAI,MAAM,SAAS,QAAQ,IAAI,MAAM,SAAS,MAAM,EAAE;AACpD,UAAO,KAAK;AACZ,UAAO,KAAK;;AAKd,MAAI,CAAC,MAAM,OAAO,UAAU,MAAM,gBAAgB,SAAS,cACzD,QAAO,MAAM,KAAK,wBAAwB,MAAM,MAAM,eAAe;EAGvE,MAAM,EAAE,UAAU,mBAAmB,MAAM,KAAK,qBAAqB,MAAM;EAC3E,MAAM,SAAU,MAAM,KAAK,OAAO,KAAK,YAAY,OAAO;GACxD,GAAG;GACH,OAAO,oBAAoB,MAAM,OAAO,EACtC,0BAA0B,CAAC,KAAK,8BACjC,CAAC;GACF,aAAa,MAAM;GACnB,qBAAqB,KAAK,qBAAqB,OAAO,aAAa;GACnE,iBAAiB;GAClB,CAAC;AAEF,MAAI,MAAM,gBAAgB,SAAS,cACjC,QAAO,MAAM,KAAK,wBAAwB,MAAM,QAAQ,OAAO,KAAK;EAGtE,MAAM,SAAS,MAAM,KAAK,wBAAwB,MAAM,QAAQ,SAAS;AAEzE,MAAI,OAAO,WAAW,UAAU,OAAO,KAAM,QAAO;EAIpD,MAAM,OAAO,cAAc,OAAO,QAAQ,GAAG;EAC7C,MAAM,YAAY,KAAK,mBAAmB,MAAM,eAAe,WAAW,QAAQ,MAAM,EACtF,MAAM,MACP,CAAC;AACF,MAAI,UAAU,QACZ,QAAO;GAAE,GAAG;GAAQ,MAAM,UAAU;GAAM,MAAM;GAAW;AAE7D,SAAO,KACL,GAAG,KAAK,KAAK,kFACb,EACE,MAAM,OAAO,MACd,CACF;EAED,MAAM,SAAS,MAAM,KAAK,wBAAwB,MAAM,MAAM,eAAe;AAC7E,SAAO;GAAE,GAAG;GAAQ,OAAO,WAAW,OAAO,OAAO,OAAO,MAAM;GAAE;;CAGrE,AAAQ,qBACN,OACA,cACqB;AACrB,MAAI,CAAC,KAAK,0BAA2B,QAAO;AAC5C,MAAI,CAAC,MAAM,OAAO,OAAQ,QAAO;AACjC,SAAO,aAAa;;CAGtB,MAAgB,eAAe,OAA8D;EAC3F,MAAM,WAAW,MAAM,0BAA0B,MAAM,SAAS;AAEhE,MAAI,MAAM,gBAAgB,SAAS,eACjC;OACE,CAAC,KAAK,mCACL,CAAC,KAAK,kCAAkC,MAAM,OAAO,OAEtD,UAAS,QAAQ;IACf,MAAM;IACN,SAAS,oBAAoB,MAAM,eAAe,WAAW,OAAO;IACrE,CAAC;;AAGN,SAAO;;CAGT,MAAc,qBAAqB,OAGhC;AACD,MAAI,CAAC,KAAK,kCAAkC,MAAM,OAAO,OACvD,QAAO;GAAE,UAAU;GAAO,gBAAgB;GAAW;AAEvD,MAAI,CAAC,KAAK,iCAAiC;GACzC,MAAM,WAAW,MAAM,gBAAgB,SAAS;AAChD,UAAO;IACL;IACA,gBAAgB,WAAW,EAAE,MAAM,eAAe,GAAG;IACtD;;AAGH,MAAI,MAAM,gBAAgB,SAAS,cACjC,QAAO;GACL,UAAU;GACV,gBAAgB;IACd,MAAM;IACN,aAAa;KACX,GAAG,MAAM,eAAe;KACxB,QAAQ,KAAK,6BAA6B,MAAM,eAAe,WAAW,OAAO;KAClF;IACF;GACF;AAGH,SAAO;GAAE,UAAU;GAAO,gBAAgB;GAAW;;CAGvD,MAAc,wBACZ,MACA,gBAC0B;AAC1B,MAAI,gBAAgB,SAAS,cAC3B,OAAM,IAAI,MAAM,uCAAuC;EAGzD,MAAM,EAAE,UAAU,gBAAgB,2BAA2B,MAAM,KAAK,qBAAqB,EAC3F,gBACD,CAAC;EACF,MAAM,MAAO,MAAM,KAAK,OAAO,KAAK,YAAY,OAAO;GACrD,GAAG;GACH,iBAAiB;GAClB,CAAC;AAEF,SAAO,KAAK,wBAAwB,MAAM,KAAK,SAAS;;CAe1D,MAAc,wBACZ,MACA,QACA,UACA,WACgF;EAChF,MAAM,SAAS,IAAI,eAAoD,EACrE,OAAO,OAAO,eAAe;AAC3B,OAAI;AACF,eAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,cAAc,EACZ,iBAAiB,KAAK,kBACvB,EACF,EACF,EACF,CAAC;IAEF,IAAI,OAAO;IACX,IAAI,UAAU;IACd,MAAM,YAEC,EAAE;IACT,IAAI;AAEJ,eAAW,MAAM,SAAS,QAAQ;KAEhC,MAAM,SADS,MAAM,UAAU,KACT;AAEtB,SAAI,CAAC,OAAO;AACV,cAAQ,MAAM;AACd,iBAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,OACD,EACF,EACF,CAAC;;AAGJ,SAAI,OAAO,YAAY,OACrB,MAAK,MAAM,QAAQ,MAAM,WACvB,KAAI,KAAK,yBAAyB,KAAK,UAAU,OAC/C,qBAAoB,WAAW,KAAK;SAEpC,wBAAuB,WAAW,KAAK;AAK7C,SAAI,SAAS,eAAe,SAAS,OAAO,MAAM,cAAc,SAC9D,YAAW,QAAQ,EAAE,OAAO,EAAE,MAAM,EAAE,UAAU,MAAM,WAAW,EAAE,EAAE,CAAC;AAGxE,SAAI,OAAO,SAAS;AAClB,cAAQ,MAAM;AACd,UAAI,CAAC,SACH,YAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,MAAM,MAAM,SACb,EACF,EACF,CAAC;;AAIN,SAAI,OAAO,QACT,YAAW,MAAM;AAGnB,SAAI,MAAM,OAAO;MACf,MAAM,QAA8B;OAClC,aAAa,MAAM,MAAM;OACzB,cAAc,MAAM,MAAM;OAC3B;MAGD,MAAM,eAAe,MAAM,MAAM;AACjC,UAAI,cAAc,cAChB,OAAM,uBAAuB,aAAa;AAG5C,iBAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,OACD,EACF,EACF,CAAC;;;AAIN,QAAI,YAAY,KACd,YAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,MAAM,cAAc,KAAK,EAC1B,EACF,EACF,CAAC;AAGJ,QAAI,UAAU,OACZ,YAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,WAAW,UAAU,KAAK,EAAE,MAAM,GAAG,SAAS;KAC5C,GAAG;KACH,UAAU;MAAE,GAAG,EAAE;MAAU,WAAW,OAAO,cAAc,KAAK,GAAG,EAAE;MAAE;KACxE,EAAE,EACJ,EACF,EACF,CAAC;AAGJ,QAAI,QACF,YAAW,sBAAM,IAAI,MAAM,yBAAyB,UAAU,CAAC;AAGjE,eAAW,OAAO;YACX,OAAO;AACd,eAAW,MAAM,MAAM;;KAG5B,CAAC;AAEF,SAAO,YAAY,SAAS,MAAM,4BAA4B,OAAO;;;;;;;CAQvE,AAAU,oBAA2C;CAErD,AAAU,6BAA6B,QAA0D;AAC/F,MAAI,QAAQ,SAAS,UAAU;GAC7B,MAAM,IAAI;GAKV,MAAM,WAAW,KAAK,sBAAsB,UAAU,OAAO,KAAK,EAAE,WAAW,GAAG,EAAE;AAEpF,UAAO;IACL,GAAG;IACH,YAAY,OAAO,YACjB,OAAO,QAAQ,EAAE,WAAW,CAAC,KAAK,CAAC,KAAK,WAAW;KACjD,MAAM,cAAc,KAAK,6BAA6B,MAAiC;AAGvF,YAAO,CACL,KACA,KAAK,sBAAsB,cAAc,EAAE,UAAU,SAAS,IAAI,GAC9D,cACA,EAAE,OAAO,CAAC,aAAa,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CACjD;MACD,CACH;IACD;IACD;;AAGH,MAAI,QAAQ,SAAS,SAAS;GAC5B,MAAM,EAAE,UAAU;AAElB,UAAO;IACL,GAAG;IACH,OAAO,KAAK,6BAA6B,MAAM;IAChD;;AAGH,SAAO;;;AAKX,MAAM,UAAU,iBAAiB,kBAAkB;;;;AAKnD,eAAsB,0BACpB,UACuC;AACvC,QAAO,QAAQ,IACb,SAAS,IACP,OAAO,OACJ;EACC,MAAM,QAAQ,EAAE,KAAK;EACrB,SACE,OAAO,EAAE,YAAY,WACjB,EAAE,UACF,EAAE,YAEA,MAAM,QAAQ,IACZ,EAAE,QAAQ,IAAwC,OAAO,MAAM;AAC7D,WAAQ,EAAE,MAAV;IACE,KAAK,OACH,QAAO;KAAE,MAAM;KAAQ,MAAM,EAAE;KAAM;IACvC,KAAK,MACH,QAAO;KAAE,MAAM;KAAa,WAAW,EAAE,KAAK,EAAE,KAAK;KAAE;IACzD,KAAK,OACH,QAAO;KACL,MAAM;KACN,WAAW,EACT,KAAK,QAAQ,EAAE,YAAY,YAAY,UAAU,EAAE,QACpD;KACF;IACH,KAAK,QACH,OAAM,IAAI,MACR,2BAA2B,EAAE,KAAK,iDACnC;;IAGL,CACH,EACD,OAAO,cAAc;EAC7B,YAAY,EAAE,WAAW,KAAK,SAAO;GACnC,GAAGA;GACH,UAAU;IACR,GAAGA,IAAE;IACL,WAAW,KAAK,UAAUA,IAAE,SAAS,UAAU;IAChD;GACF,EAAE;EACH,cAAc,EAAE;EAChB,MAAM,EAAE;EACT,EACJ,CACF;;;;;AAMH,SAAgB,oBACd,OACA,SACkC;AAClC,QAAO,OAAO,SACV,MAAM,KAAK,MAAM;EACf,MAAM,aAAa,EAAE,SAAS;AAC9B,MAAI,SAAS,4BAA4B,OAAO,KAAK,WAAW,CAAC,WAAW,EAC1E,YAAW,OAAO;AAEpB,SAAO;GACL,MAAM;GACN,UAAU;IACR,MAAM,EAAE,SAAS;IACjB,aAAa,EAAE,SAAS;IACxB;IACD;GACF;GACD,GACF;;AAGN,SAAS,oBACP,WAGA,MAGA;AACA,WAAU,KAAK,WAAW;EACxB,IAAI,KAAK,MAAM,IAAI;EACnB,MAAM;EACN,UAAU;GAAE,MAAM;GAAI,WAAW,EAAE;GAAE;EACrC,MAAM;EACP;CACD,MAAM,IAAI,UAAU,KAAK;AACzB,KAAI,CAAC,EAAG,OAAM,IAAI,MAAM,sBAAsB;AAE9C,KAAI,KAAK,KAAM,GAAE,OAAO,KAAK;AAE7B,GAAE,SAAS,OAAO,EAAE,SAAS,QAAQ,KAAK,UAAU,QAAQ;AAC5D,GAAE,OAAO,EAAE,KAAK,OAAO,KAAK,UAAU,aAAa,GAAG;;AAGxD,SAAS,uBACP,WAGA,MACA;AACA,WAAU,KAAK;EACb,IAAI,KAAK,MAAM,IAAI;EACnB,MAAM;EACN,UAAU;GACR,MAAM,KAAK,UAAU,QAAQ;GAC7B,WAAW,cAAc,KAAK,UAAU,aAAa,KAAK;GAC3D;EACD,MAAM,KAAK,UAAU,aAAa;EACnC,CAAC"}