@aigne/openai 1.74.0-beta.2 → 1.74.0-beta.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +4 -4
- package/dist/openai-chat-model.cjs +18 -18
- package/dist/openai-chat-model.d.cts +23 -23
- package/dist/openai-chat-model.d.cts.map +1 -1
- package/dist/openai-chat-model.d.mts +23 -23
- package/dist/openai-chat-model.d.mts.map +1 -1
- package/dist/openai-chat-model.mjs +7 -7
- package/dist/openai-chat-model.mjs.map +1 -1
- package/dist/openai-image-model.cjs +7 -7
- package/dist/openai-image-model.d.cts +23 -23
- package/dist/openai-image-model.d.cts.map +1 -1
- package/dist/openai-image-model.d.mts +23 -23
- package/dist/openai-image-model.d.mts.map +1 -1
- package/dist/openai-image-model.mjs +3 -3
- package/dist/openai-image-model.mjs.map +1 -1
- package/dist/openai-video-model.cjs +9 -9
- package/dist/openai-video-model.d.cts +42 -42
- package/dist/openai-video-model.d.cts.map +1 -1
- package/dist/openai-video-model.d.mts +42 -42
- package/dist/openai-video-model.d.mts.map +1 -1
- package/dist/openai-video-model.mjs +3 -3
- package/dist/openai-video-model.mjs.map +1 -1
- package/package.json +3 -6
- package/LICENSE.md +0 -93
package/README.md
CHANGED
|
@@ -42,19 +42,19 @@ AIGNE OpenAI SDK for integrating with OpenAI's GPT models and API services withi
|
|
|
42
42
|
### Using npm
|
|
43
43
|
|
|
44
44
|
```bash
|
|
45
|
-
npm install @aigne/openai @aigne/
|
|
45
|
+
npm install @aigne/openai @aigne/model-base
|
|
46
46
|
```
|
|
47
47
|
|
|
48
48
|
### Using yarn
|
|
49
49
|
|
|
50
50
|
```bash
|
|
51
|
-
yarn add @aigne/openai @aigne/
|
|
51
|
+
yarn add @aigne/openai @aigne/model-base
|
|
52
52
|
```
|
|
53
53
|
|
|
54
54
|
### Using pnpm
|
|
55
55
|
|
|
56
56
|
```bash
|
|
57
|
-
pnpm add @aigne/openai @aigne/
|
|
57
|
+
pnpm add @aigne/openai @aigne/model-base
|
|
58
58
|
```
|
|
59
59
|
|
|
60
60
|
## Chat Completions
|
|
@@ -124,7 +124,7 @@ console.log(result);
|
|
|
124
124
|
## Streaming Responses
|
|
125
125
|
|
|
126
126
|
```typescript file="test/openai-chat-model.test.ts" region="example-openai-chat-model-stream"
|
|
127
|
-
import { isAgentResponseDelta } from "@aigne/
|
|
127
|
+
import { isAgentResponseDelta } from "@aigne/model-base";
|
|
128
128
|
import { OpenAIChatModel } from "@aigne/openai";
|
|
129
129
|
|
|
130
130
|
const model = new OpenAIChatModel({
|
|
@@ -1,11 +1,11 @@
|
|
|
1
1
|
const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
|
|
2
2
|
const require_openai = require('./openai.cjs');
|
|
3
|
-
let
|
|
4
|
-
let
|
|
5
|
-
let
|
|
6
|
-
let
|
|
7
|
-
let
|
|
8
|
-
let
|
|
3
|
+
let _aigne_model_base = require("@aigne/model-base");
|
|
4
|
+
let _aigne_model_base_utils_logger = require("@aigne/model-base/utils/logger");
|
|
5
|
+
let _aigne_model_base_utils_model_utils = require("@aigne/model-base/utils/model-utils");
|
|
6
|
+
let _aigne_model_base_utils_prompts = require("@aigne/model-base/utils/prompts");
|
|
7
|
+
let _aigne_model_base_utils_stream_utils = require("@aigne/model-base/utils/stream-utils");
|
|
8
|
+
let _aigne_model_base_utils_type_utils = require("@aigne/model-base/utils/type-utils");
|
|
9
9
|
let _aigne_uuid = require("@aigne/uuid");
|
|
10
10
|
let zod = require("zod");
|
|
11
11
|
|
|
@@ -56,11 +56,11 @@ const openAIChatModelOptionsSchema = zod.z.object({
|
|
|
56
56
|
* Here's an example with streaming response:
|
|
57
57
|
* {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model-stream}
|
|
58
58
|
*/
|
|
59
|
-
var OpenAIChatModel = class extends
|
|
59
|
+
var OpenAIChatModel = class extends _aigne_model_base.ChatModel {
|
|
60
60
|
constructor(options) {
|
|
61
61
|
super();
|
|
62
62
|
this.options = options;
|
|
63
|
-
if (options) (0,
|
|
63
|
+
if (options) (0, _aigne_model_base_utils_type_utils.checkArguments)(this.name, openAIChatModelOptionsSchema, options);
|
|
64
64
|
const preset = options?.model ? OPENAI_CHAT_MODEL_CAPABILITIES[options.model] : void 0;
|
|
65
65
|
Object.assign(this, preset);
|
|
66
66
|
}
|
|
@@ -142,18 +142,18 @@ var OpenAIChatModel = class extends _aigne_core.ChatModel {
|
|
|
142
142
|
if (input.responseFormat?.type !== "json_schema") return await this.extractResultFromStream(body, stream, false, true);
|
|
143
143
|
const result = await this.extractResultFromStream(body, stream, jsonMode);
|
|
144
144
|
if (result.toolCalls?.length || result.json) return result;
|
|
145
|
-
const json = (0,
|
|
145
|
+
const json = (0, _aigne_model_base.safeParseJSON)(result.text || "");
|
|
146
146
|
const validated = this.validateJsonSchema(input.responseFormat.jsonSchema.schema, json, { safe: true });
|
|
147
147
|
if (validated.success) return {
|
|
148
148
|
...result,
|
|
149
149
|
json: validated.data,
|
|
150
150
|
text: void 0
|
|
151
151
|
};
|
|
152
|
-
|
|
152
|
+
_aigne_model_base_utils_logger.logger.warn(`${this.name}: Text response does not match JSON schema, trying to use tool to extract json `, { text: result.text });
|
|
153
153
|
const output = await this.requestStructuredOutput(body, input.responseFormat);
|
|
154
154
|
return {
|
|
155
155
|
...output,
|
|
156
|
-
usage: (0,
|
|
156
|
+
usage: (0, _aigne_model_base_utils_model_utils.mergeUsage)(result.usage, output.usage)
|
|
157
157
|
};
|
|
158
158
|
}
|
|
159
159
|
getParallelToolCalls(input, modelOptions) {
|
|
@@ -166,7 +166,7 @@ var OpenAIChatModel = class extends _aigne_core.ChatModel {
|
|
|
166
166
|
if (input.responseFormat?.type === "json_schema") {
|
|
167
167
|
if (!this.supportsNativeStructuredOutputs || !this.supportsToolsUseWithJsonSchema && input.tools?.length) messages.unshift({
|
|
168
168
|
role: "system",
|
|
169
|
-
content: (0,
|
|
169
|
+
content: (0, _aigne_model_base_utils_prompts.getJsonOutputPrompt)(input.responseFormat.jsonSchema.schema)
|
|
170
170
|
});
|
|
171
171
|
}
|
|
172
172
|
return messages;
|
|
@@ -239,12 +239,12 @@ var OpenAIChatModel = class extends _aigne_core.ChatModel {
|
|
|
239
239
|
controller.enqueue({ delta: { json: { usage } } });
|
|
240
240
|
}
|
|
241
241
|
}
|
|
242
|
-
if (jsonMode && text) controller.enqueue({ delta: { json: { json: (0,
|
|
242
|
+
if (jsonMode && text) controller.enqueue({ delta: { json: { json: (0, _aigne_model_base.safeParseJSON)(text) } } });
|
|
243
243
|
if (toolCalls.length) controller.enqueue({ delta: { json: { toolCalls: toolCalls.map(({ args, ...c }) => ({
|
|
244
244
|
...c,
|
|
245
245
|
function: {
|
|
246
246
|
...c.function,
|
|
247
|
-
arguments: args ? (0,
|
|
247
|
+
arguments: args ? (0, _aigne_model_base.safeParseJSON)(args) : {}
|
|
248
248
|
}
|
|
249
249
|
})) } } });
|
|
250
250
|
if (refusal) controller.error(/* @__PURE__ */ new Error(`Got refusal from LLM: ${refusal}`));
|
|
@@ -253,7 +253,7 @@ var OpenAIChatModel = class extends _aigne_core.ChatModel {
|
|
|
253
253
|
controller.error(error);
|
|
254
254
|
}
|
|
255
255
|
} });
|
|
256
|
-
return streaming ? result : await (0,
|
|
256
|
+
return streaming ? result : await (0, _aigne_model_base_utils_stream_utils.modelResponseStreamToObject)(result);
|
|
257
257
|
}
|
|
258
258
|
/**
|
|
259
259
|
* Controls how optional fields are handled in JSON schema conversion
|
|
@@ -284,7 +284,7 @@ var OpenAIChatModel = class extends _aigne_core.ChatModel {
|
|
|
284
284
|
return schema;
|
|
285
285
|
}
|
|
286
286
|
};
|
|
287
|
-
const mapRole = (0,
|
|
287
|
+
const mapRole = (0, _aigne_model_base.createRoleMapper)(_aigne_model_base.STANDARD_ROLE_MAP);
|
|
288
288
|
/**
|
|
289
289
|
* @hidden
|
|
290
290
|
*/
|
|
@@ -307,7 +307,7 @@ async function contentsFromInputMessages(messages) {
|
|
|
307
307
|
};
|
|
308
308
|
case "local": throw new Error(`Unsupported local file: ${c.path}, it should be converted to base64 at ChatModel`);
|
|
309
309
|
}
|
|
310
|
-
}))).filter(
|
|
310
|
+
}))).filter(_aigne_model_base_utils_type_utils.isNonNullable),
|
|
311
311
|
tool_calls: i.toolCalls?.map((i$1) => ({
|
|
312
312
|
...i$1,
|
|
313
313
|
function: {
|
|
@@ -358,7 +358,7 @@ function handleCompleteToolCall(toolCalls, call) {
|
|
|
358
358
|
type: "function",
|
|
359
359
|
function: {
|
|
360
360
|
name: call.function?.name || "",
|
|
361
|
-
arguments: (0,
|
|
361
|
+
arguments: (0, _aigne_model_base.safeParseJSON)(call.function?.arguments || "{}")
|
|
362
362
|
},
|
|
363
363
|
args: call.function?.arguments || ""
|
|
364
364
|
});
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { PromiseOrValue } from "@aigne/
|
|
1
|
+
import { ChatModel, ChatModelInput, ChatModelInputMessage, ChatModelInputTool, ChatModelOptions, ChatModelOutput, ModelInvokeOptions, ModelProcessResult } from "@aigne/model-base";
|
|
2
|
+
import { PromiseOrValue } from "@aigne/model-base/utils/type-utils";
|
|
3
3
|
import { ClientOptions, OpenAI as OpenAI$1 } from "openai";
|
|
4
4
|
import { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
|
|
5
5
|
import { z } from "zod";
|
|
@@ -19,20 +19,20 @@ interface OpenAIChatModelCapabilities {
|
|
|
19
19
|
*/
|
|
20
20
|
interface OpenAIChatModelOptions extends ChatModelOptions {
|
|
21
21
|
/**
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
22
|
+
* API key for OpenAI API
|
|
23
|
+
*
|
|
24
|
+
* If not provided, will look for OPENAI_API_KEY in environment variables
|
|
25
|
+
*/
|
|
26
26
|
apiKey?: string;
|
|
27
27
|
/**
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
28
|
+
* Base URL for OpenAI API
|
|
29
|
+
*
|
|
30
|
+
* Useful for proxies or alternate endpoints
|
|
31
|
+
*/
|
|
32
32
|
baseURL?: string;
|
|
33
33
|
/**
|
|
34
|
-
|
|
35
|
-
|
|
34
|
+
* Client options for OpenAI API
|
|
35
|
+
*/
|
|
36
36
|
clientOptions?: Partial<ClientOptions>;
|
|
37
37
|
}
|
|
38
38
|
/**
|
|
@@ -112,8 +112,8 @@ declare class OpenAIChatModel extends ChatModel {
|
|
|
112
112
|
options?: OpenAIChatModelOptions | undefined;
|
|
113
113
|
constructor(options?: OpenAIChatModelOptions | undefined);
|
|
114
114
|
/**
|
|
115
|
-
|
|
116
|
-
|
|
115
|
+
* @hidden
|
|
116
|
+
*/
|
|
117
117
|
protected _client?: OpenAI$1;
|
|
118
118
|
protected apiKeyEnvName: string;
|
|
119
119
|
protected apiKeyDefault: string | undefined;
|
|
@@ -130,11 +130,11 @@ declare class OpenAIChatModel extends ChatModel {
|
|
|
130
130
|
model: string;
|
|
131
131
|
};
|
|
132
132
|
/**
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
process(input: ChatModelInput, options:
|
|
133
|
+
* Process the input and generate a response
|
|
134
|
+
* @param input The input to process
|
|
135
|
+
* @returns The generated response
|
|
136
|
+
*/
|
|
137
|
+
process(input: ChatModelInput, options: ModelInvokeOptions): PromiseOrValue<ModelProcessResult<ChatModelOutput>>;
|
|
138
138
|
private getReasoningEffort;
|
|
139
139
|
private _process;
|
|
140
140
|
private getParallelToolCalls;
|
|
@@ -143,10 +143,10 @@ declare class OpenAIChatModel extends ChatModel {
|
|
|
143
143
|
private requestStructuredOutput;
|
|
144
144
|
private extractResultFromStream;
|
|
145
145
|
/**
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
146
|
+
* Controls how optional fields are handled in JSON schema conversion
|
|
147
|
+
* - "anyOf": All fields are required but can be null (default)
|
|
148
|
+
* - "optional": Fields marked as optional in schema remain optional
|
|
149
|
+
*/
|
|
150
150
|
protected optionalFieldMode?: "anyOf" | "optional";
|
|
151
151
|
protected jsonSchemaToOpenAIJsonSchema(schema: Record<string, unknown>): Record<string, unknown>;
|
|
152
152
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai-chat-model.d.cts","names":[],"sources":["../src/openai-chat-model.ts"],"mappings":";;;;;;;
|
|
1
|
+
{"version":3,"file":"openai-chat-model.d.cts","names":[],"sources":["../src/openai-chat-model.ts"],"mappings":";;;;;;;UAwCiB,2BAAA;EACf,+BAAA;EACA,4BAAA;EACA,8BAAA;EACA,yBAAA;EACA,4BAAA;EACA,qBAAA;EACA,mBAAA;AAAA;;;;UAWe,sBAAA,SAA+B,gBAAA;EAX3B;;AAWrB;;;EAME,MAAA;EAYgB;;;;;EALhB,OAAA;EAAA;;;EAKA,aAAA,GAAgB,OAAA,CAAQ,aAAA;AAAA;;AAM1B;;cAAa,4BAAA,EAA4B,CAAA,CAAA,SAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAmC5B,eAAA,SAAwB,SAAA;EACP,OAAA,GAAU,sBAAA;cAAV,OAAA,GAAU,sBAAA;;;;YAW5B,OAAA,GAAU,QAAA;EAAA,UAEV,aAAA;EAAA,UACA,aAAA;EAAA,UACA,+BAAA;EAAA,UACA,8BAAA;EAAA,UACS,yBAAA;EAAA,UACT,4BAAA;EAAA,UACA,qBAAA;EAAA,UACA,mBAAA;EAAA,IAEN,MAAA,CAAA,GAAM,QAAA;EAAA,IAeG,UAAA,CAAA;;;;;;;;;;EAaJ,OAAA,CACP,KAAA,EAAO,cAAA,EACP,OAAA,EAAS,kBAAA,GACR,cAAA,CAAe,kBAAA,CAAmB,eAAA;EAAA,QAI7B,kBAAA;EAAA,QAcM,QAAA;EAAA,QAuEN,oBAAA;EAAA,UASQ,cAAA,CAAe,KAAA,EAAO,cAAA,GAAiB,OAAA,CAAQ,0BAAA;EAAA,QAiBjD,oBAAA;EAAA,QA+BA,uBAAA;EAAA,QAmBA,uBAAA;;;;;;YAoJJ,iBAAA;EAAA,UAEA,4BAAA,CAA6B,MAAA,EAAQ,MAAA,oBAA0B,MAAA;AAAA;;;;iBA+CrD,yBAAA,CACpB,QAAA,EAAU,qBAAA,KACT,OAAA,CAAQ,0BAAA;;;;iBAmDK,mBAAA,CACd,KAAA,GAAQ,kBAAA,IACR,OAAA;EAAY,wBAAA;AAAA,IACX,kBAAA"}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import { PromiseOrValue } from "@aigne/
|
|
1
|
+
import { ChatModel, ChatModelInput, ChatModelInputMessage, ChatModelInputTool, ChatModelOptions, ChatModelOutput, ModelInvokeOptions, ModelProcessResult } from "@aigne/model-base";
|
|
2
|
+
import { PromiseOrValue } from "@aigne/model-base/utils/type-utils";
|
|
3
3
|
import { z } from "zod";
|
|
4
4
|
import { ClientOptions, OpenAI as OpenAI$1 } from "openai";
|
|
5
5
|
import { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
|
|
@@ -19,20 +19,20 @@ interface OpenAIChatModelCapabilities {
|
|
|
19
19
|
*/
|
|
20
20
|
interface OpenAIChatModelOptions extends ChatModelOptions {
|
|
21
21
|
/**
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
22
|
+
* API key for OpenAI API
|
|
23
|
+
*
|
|
24
|
+
* If not provided, will look for OPENAI_API_KEY in environment variables
|
|
25
|
+
*/
|
|
26
26
|
apiKey?: string;
|
|
27
27
|
/**
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
28
|
+
* Base URL for OpenAI API
|
|
29
|
+
*
|
|
30
|
+
* Useful for proxies or alternate endpoints
|
|
31
|
+
*/
|
|
32
32
|
baseURL?: string;
|
|
33
33
|
/**
|
|
34
|
-
|
|
35
|
-
|
|
34
|
+
* Client options for OpenAI API
|
|
35
|
+
*/
|
|
36
36
|
clientOptions?: Partial<ClientOptions>;
|
|
37
37
|
}
|
|
38
38
|
/**
|
|
@@ -112,8 +112,8 @@ declare class OpenAIChatModel extends ChatModel {
|
|
|
112
112
|
options?: OpenAIChatModelOptions | undefined;
|
|
113
113
|
constructor(options?: OpenAIChatModelOptions | undefined);
|
|
114
114
|
/**
|
|
115
|
-
|
|
116
|
-
|
|
115
|
+
* @hidden
|
|
116
|
+
*/
|
|
117
117
|
protected _client?: OpenAI$1;
|
|
118
118
|
protected apiKeyEnvName: string;
|
|
119
119
|
protected apiKeyDefault: string | undefined;
|
|
@@ -130,11 +130,11 @@ declare class OpenAIChatModel extends ChatModel {
|
|
|
130
130
|
model: string;
|
|
131
131
|
};
|
|
132
132
|
/**
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
process(input: ChatModelInput, options:
|
|
133
|
+
* Process the input and generate a response
|
|
134
|
+
* @param input The input to process
|
|
135
|
+
* @returns The generated response
|
|
136
|
+
*/
|
|
137
|
+
process(input: ChatModelInput, options: ModelInvokeOptions): PromiseOrValue<ModelProcessResult<ChatModelOutput>>;
|
|
138
138
|
private getReasoningEffort;
|
|
139
139
|
private _process;
|
|
140
140
|
private getParallelToolCalls;
|
|
@@ -143,10 +143,10 @@ declare class OpenAIChatModel extends ChatModel {
|
|
|
143
143
|
private requestStructuredOutput;
|
|
144
144
|
private extractResultFromStream;
|
|
145
145
|
/**
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
146
|
+
* Controls how optional fields are handled in JSON schema conversion
|
|
147
|
+
* - "anyOf": All fields are required but can be null (default)
|
|
148
|
+
* - "optional": Fields marked as optional in schema remain optional
|
|
149
|
+
*/
|
|
150
150
|
protected optionalFieldMode?: "anyOf" | "optional";
|
|
151
151
|
protected jsonSchemaToOpenAIJsonSchema(schema: Record<string, unknown>): Record<string, unknown>;
|
|
152
152
|
}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai-chat-model.d.mts","names":[],"sources":["../src/openai-chat-model.ts"],"mappings":";;;;;;;
|
|
1
|
+
{"version":3,"file":"openai-chat-model.d.mts","names":[],"sources":["../src/openai-chat-model.ts"],"mappings":";;;;;;;UAwCiB,2BAAA;EACf,+BAAA;EACA,4BAAA;EACA,8BAAA;EACA,yBAAA;EACA,4BAAA;EACA,qBAAA;EACA,mBAAA;AAAA;;;;UAWe,sBAAA,SAA+B,gBAAA;EAX3B;;AAWrB;;;EAME,MAAA;EAYgB;;;;;EALhB,OAAA;EAAA;;;EAKA,aAAA,GAAgB,OAAA,CAAQ,aAAA;AAAA;;AAM1B;;cAAa,4BAAA,EAA4B,CAAA,CAAA,SAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAmC5B,eAAA,SAAwB,SAAA;EACP,OAAA,GAAU,sBAAA;cAAV,OAAA,GAAU,sBAAA;;;;YAW5B,OAAA,GAAU,QAAA;EAAA,UAEV,aAAA;EAAA,UACA,aAAA;EAAA,UACA,+BAAA;EAAA,UACA,8BAAA;EAAA,UACS,yBAAA;EAAA,UACT,4BAAA;EAAA,UACA,qBAAA;EAAA,UACA,mBAAA;EAAA,IAEN,MAAA,CAAA,GAAM,QAAA;EAAA,IAeG,UAAA,CAAA;;;;;;;;;;EAaJ,OAAA,CACP,KAAA,EAAO,cAAA,EACP,OAAA,EAAS,kBAAA,GACR,cAAA,CAAe,kBAAA,CAAmB,eAAA;EAAA,QAI7B,kBAAA;EAAA,QAcM,QAAA;EAAA,QAuEN,oBAAA;EAAA,UASQ,cAAA,CAAe,KAAA,EAAO,cAAA,GAAiB,OAAA,CAAQ,0BAAA;EAAA,QAiBjD,oBAAA;EAAA,QA+BA,uBAAA;EAAA,QAmBA,uBAAA;;;;;;YAoJJ,iBAAA;EAAA,UAEA,4BAAA,CAA6B,MAAA,EAAQ,MAAA,oBAA0B,MAAA;AAAA;;;;iBA+CrD,yBAAA,CACpB,QAAA,EAAU,qBAAA,KACT,OAAA,CAAQ,0BAAA;;;;iBAmDK,mBAAA,CACd,KAAA,GAAQ,kBAAA,IACR,OAAA;EAAY,wBAAA;AAAA,IACX,kBAAA"}
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
import { CustomOpenAI } from "./openai.mjs";
|
|
2
|
-
import { ChatModel, STANDARD_ROLE_MAP, createRoleMapper, safeParseJSON } from "@aigne/
|
|
3
|
-
import { logger } from "@aigne/
|
|
4
|
-
import { mergeUsage } from "@aigne/
|
|
5
|
-
import { getJsonOutputPrompt } from "@aigne/
|
|
6
|
-
import {
|
|
7
|
-
import { checkArguments, isNonNullable } from "@aigne/
|
|
2
|
+
import { ChatModel, STANDARD_ROLE_MAP, createRoleMapper, safeParseJSON } from "@aigne/model-base";
|
|
3
|
+
import { logger } from "@aigne/model-base/utils/logger";
|
|
4
|
+
import { mergeUsage } from "@aigne/model-base/utils/model-utils";
|
|
5
|
+
import { getJsonOutputPrompt } from "@aigne/model-base/utils/prompts";
|
|
6
|
+
import { modelResponseStreamToObject } from "@aigne/model-base/utils/stream-utils";
|
|
7
|
+
import { checkArguments, isNonNullable } from "@aigne/model-base/utils/type-utils";
|
|
8
8
|
import { v7 } from "@aigne/uuid";
|
|
9
9
|
import { z } from "zod";
|
|
10
10
|
|
|
@@ -252,7 +252,7 @@ var OpenAIChatModel = class extends ChatModel {
|
|
|
252
252
|
controller.error(error);
|
|
253
253
|
}
|
|
254
254
|
} });
|
|
255
|
-
return streaming ? result : await
|
|
255
|
+
return streaming ? result : await modelResponseStreamToObject(result);
|
|
256
256
|
}
|
|
257
257
|
/**
|
|
258
258
|
* Controls how optional fields are handled in JSON schema conversion
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai-chat-model.mjs","names":["i"],"sources":["../src/openai-chat-model.ts"],"sourcesContent":["import {\n type AgentInvokeOptions,\n type AgentProcessResult,\n type AgentResponse,\n type AgentResponseChunk,\n ChatModel,\n type ChatModelInput,\n type ChatModelInputMessage,\n type ChatModelInputOptions,\n type ChatModelInputTool,\n type ChatModelOptions,\n type ChatModelOutput,\n type ChatModelOutputUsage,\n createRoleMapper,\n STANDARD_ROLE_MAP,\n safeParseJSON,\n} from \"@aigne/core\";\nimport { logger } from \"@aigne/core/utils/logger\";\nimport { mergeUsage } from \"@aigne/core/utils/model-utils\";\nimport { getJsonOutputPrompt } from \"@aigne/core/utils/prompts\";\nimport { agentResponseStreamToObject } from \"@aigne/core/utils/stream-utils\";\nimport { checkArguments, isNonNullable, type PromiseOrValue } from \"@aigne/core/utils/type-utils\";\nimport { v7 } from \"@aigne/uuid\";\nimport type { ClientOptions, OpenAI } from \"openai\";\nimport type {\n ChatCompletionContentPart,\n ChatCompletionMessageParam,\n ChatCompletionTool,\n ResponseFormatJSONSchema,\n} from \"openai/resources\";\nimport type { Stream } from \"openai/streaming.js\";\nimport { z } from \"zod\";\nimport { CustomOpenAI } from \"./openai.js\";\n\nconst CHAT_MODEL_OPENAI_DEFAULT_MODEL = \"gpt-4o-mini\";\n\nexport interface OpenAIChatModelCapabilities {\n supportsNativeStructuredOutputs: boolean;\n supportsEndWithSystemMessage: boolean;\n supportsToolsUseWithJsonSchema: boolean;\n supportsParallelToolCalls: boolean;\n supportsToolsEmptyParameters: boolean;\n supportsToolStreaming: boolean;\n supportsTemperature: boolean;\n}\n\nconst OPENAI_CHAT_MODEL_CAPABILITIES: Record<string, Partial<OpenAIChatModelCapabilities>> = {\n \"o4-mini\": { supportsParallelToolCalls: false, supportsTemperature: false },\n \"o3-mini\": { supportsParallelToolCalls: false, supportsTemperature: false },\n};\n\n/**\n * Configuration options for OpenAI Chat Model\n */\nexport interface OpenAIChatModelOptions extends ChatModelOptions {\n /**\n * API key for OpenAI API\n *\n * If not provided, will look for OPENAI_API_KEY in environment variables\n */\n apiKey?: string;\n\n /**\n * Base URL for OpenAI API\n *\n * Useful for proxies or alternate endpoints\n */\n baseURL?: string;\n\n /**\n * Client options for OpenAI API\n */\n clientOptions?: Partial<ClientOptions>;\n}\n\n/**\n * @hidden\n */\nexport const openAIChatModelOptionsSchema = z.object({\n apiKey: z.string().optional(),\n baseURL: z.string().optional(),\n model: z.string().optional(),\n modelOptions: z\n .object({\n model: z.string().optional(),\n temperature: z.number().optional(),\n topP: z.number().optional(),\n frequencyPenalty: z.number().optional(),\n presencePenalty: z.number().optional(),\n parallelToolCalls: z.boolean().optional().default(true),\n })\n .optional(),\n});\n\n/**\n * Implementation of the ChatModel interface for OpenAI's API\n *\n * This model provides access to OpenAI's capabilities including:\n * - Text generation\n * - Tool use with parallel tool calls\n * - JSON structured output\n * - Image understanding\n *\n * Default model: 'gpt-4o-mini'\n *\n * @example\n * Here's how to create and use an OpenAI chat model:\n * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model}\n *\n * @example\n * Here's an example with streaming response:\n * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model-stream}\n */\nexport class OpenAIChatModel extends ChatModel {\n constructor(public override options?: OpenAIChatModelOptions) {\n super();\n if (options) checkArguments(this.name, openAIChatModelOptionsSchema, options);\n\n const preset = options?.model ? OPENAI_CHAT_MODEL_CAPABILITIES[options.model] : undefined;\n Object.assign(this, preset);\n }\n\n /**\n * @hidden\n */\n protected _client?: OpenAI;\n\n protected apiKeyEnvName = \"OPENAI_API_KEY\";\n protected apiKeyDefault: string | undefined;\n protected supportsNativeStructuredOutputs = true;\n protected supportsToolsUseWithJsonSchema = true;\n protected override supportsParallelToolCalls = true;\n protected supportsToolsEmptyParameters = true;\n protected supportsToolStreaming = true;\n protected supportsTemperature = true;\n\n get client() {\n const { apiKey, url } = this.credential;\n if (!apiKey)\n throw new Error(\n `${this.name} requires an API key. Please provide it via \\`options.apiKey\\`, or set the \\`${this.apiKeyEnvName}\\` environment variable`,\n );\n\n this._client ??= new CustomOpenAI({\n baseURL: url,\n apiKey,\n ...this.options?.clientOptions,\n });\n return this._client;\n }\n\n override get credential() {\n return {\n url: this.options?.baseURL || process.env.OPENAI_BASE_URL,\n apiKey: this.options?.apiKey || process.env[this.apiKeyEnvName] || this.apiKeyDefault,\n model: this.options?.model || CHAT_MODEL_OPENAI_DEFAULT_MODEL,\n };\n }\n\n /**\n * Process the input and generate a response\n * @param input The input to process\n * @returns The generated response\n */\n override process(\n input: ChatModelInput,\n options: AgentInvokeOptions,\n ): PromiseOrValue<AgentProcessResult<ChatModelOutput>> {\n return this._process(input, options);\n }\n\n private getReasoningEffort(\n effort: ChatModelInputOptions[\"reasoningEffort\"],\n ): Exclude<ChatModelInputOptions[\"reasoningEffort\"], number> {\n if (typeof effort === \"number\") {\n if (effort > 5000) return \"high\";\n if (effort > 1000) return \"medium\";\n if (effort > 500) return \"low\";\n if (effort > 0) return \"minimal\";\n return undefined;\n }\n\n return effort;\n }\n\n private async _process(\n input: ChatModelInput,\n _options: AgentInvokeOptions,\n ): Promise<AgentResponse<ChatModelOutput>> {\n const { modelOptions = {} } = input;\n\n const messages = await this.getRunMessages(input);\n const model = modelOptions?.model || this.credential.model;\n\n const body: OpenAI.Chat.ChatCompletionCreateParams = {\n model,\n temperature: this.supportsTemperature ? modelOptions.temperature : undefined,\n top_p: modelOptions.topP,\n frequency_penalty: modelOptions.frequencyPenalty,\n presence_penalty: modelOptions.presencePenalty,\n messages,\n stream_options: { include_usage: true },\n stream: true,\n reasoning_effort: this.getReasoningEffort(modelOptions.reasoningEffort),\n };\n\n if (model.includes(\"gpt-5\") || model.includes(\"o1-\")) {\n delete body.temperature;\n delete body.top_p;\n }\n\n // For models that do not support tools use with JSON schema in same request,\n // so we need to handle the case where tools are not used and responseFormat is json\n if (!input.tools?.length && input.responseFormat?.type === \"json_schema\") {\n return await this.requestStructuredOutput(body, input.responseFormat);\n }\n\n const { jsonMode, responseFormat } = await this.getRunResponseFormat(input);\n const stream = (await this.client.chat.completions.create({\n ...body,\n tools: toolsFromInputTools(input.tools, {\n addTypeToEmptyParameters: !this.supportsToolsEmptyParameters,\n }),\n tool_choice: input.toolChoice,\n parallel_tool_calls: this.getParallelToolCalls(input, modelOptions),\n response_format: responseFormat,\n })) as unknown as Stream<OpenAI.Chat.Completions.ChatCompletionChunk>;\n\n if (input.responseFormat?.type !== \"json_schema\") {\n return await this.extractResultFromStream(body, stream, false, true);\n }\n\n const result = await this.extractResultFromStream(body, stream, jsonMode);\n // Just return the result if it has tool calls\n if (result.toolCalls?.length || result.json) return result;\n\n // Try to parse the text response as JSON\n // If it matches the json_schema, return it as json\n const json = safeParseJSON(result.text || \"\");\n const validated = this.validateJsonSchema(input.responseFormat.jsonSchema.schema, json, {\n safe: true,\n });\n if (validated.success) {\n return { ...result, json: validated.data, text: undefined };\n }\n logger.warn(\n `${this.name}: Text response does not match JSON schema, trying to use tool to extract json `,\n {\n text: result.text,\n },\n );\n\n const output = await this.requestStructuredOutput(body, input.responseFormat);\n return { ...output, usage: mergeUsage(result.usage, output.usage) };\n }\n\n private getParallelToolCalls(\n input: ChatModelInput,\n modelOptions: ChatModelInputOptions,\n ): boolean | undefined {\n if (!this.supportsParallelToolCalls) return undefined;\n if (!input.tools?.length) return undefined;\n return modelOptions.parallelToolCalls;\n }\n\n protected async getRunMessages(input: ChatModelInput): Promise<ChatCompletionMessageParam[]> {\n const messages = await contentsFromInputMessages(input.messages);\n\n if (input.responseFormat?.type === \"json_schema\") {\n if (\n !this.supportsNativeStructuredOutputs ||\n (!this.supportsToolsUseWithJsonSchema && input.tools?.length)\n ) {\n messages.unshift({\n role: \"system\",\n content: getJsonOutputPrompt(input.responseFormat.jsonSchema.schema),\n });\n }\n }\n return messages;\n }\n\n private async getRunResponseFormat(input: Partial<ChatModelInput>): Promise<{\n jsonMode: boolean;\n responseFormat: ResponseFormatJSONSchema | { type: \"json_object\" } | undefined;\n }> {\n if (!this.supportsToolsUseWithJsonSchema && input.tools?.length)\n return { jsonMode: false, responseFormat: undefined };\n\n if (!this.supportsNativeStructuredOutputs) {\n const jsonMode = input.responseFormat?.type === \"json_schema\";\n return {\n jsonMode,\n responseFormat: jsonMode ? { type: \"json_object\" } : undefined,\n };\n }\n\n if (input.responseFormat?.type === \"json_schema\") {\n return {\n jsonMode: true,\n responseFormat: {\n type: \"json_schema\",\n json_schema: {\n ...input.responseFormat.jsonSchema,\n schema: this.jsonSchemaToOpenAIJsonSchema(input.responseFormat.jsonSchema.schema),\n },\n },\n };\n }\n\n return { jsonMode: false, responseFormat: undefined };\n }\n\n private async requestStructuredOutput(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n responseFormat: ChatModelInput[\"responseFormat\"],\n ): Promise<ChatModelOutput> {\n if (responseFormat?.type !== \"json_schema\") {\n throw new Error(\"Expected json_schema response format\");\n }\n\n const { jsonMode, responseFormat: resolvedResponseFormat } = await this.getRunResponseFormat({\n responseFormat,\n });\n const res = (await this.client.chat.completions.create({\n ...body,\n response_format: resolvedResponseFormat,\n })) as unknown as Stream<OpenAI.Chat.Completions.ChatCompletionChunk>;\n\n return this.extractResultFromStream(body, res, jsonMode);\n }\n\n private async extractResultFromStream(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,\n jsonMode: boolean | undefined,\n streaming: true,\n ): Promise<ReadableStream<AgentResponseChunk<ChatModelOutput>>>;\n private async extractResultFromStream(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,\n jsonMode?: boolean,\n streaming?: false,\n ): Promise<ChatModelOutput>;\n private async extractResultFromStream(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,\n jsonMode?: boolean,\n streaming?: boolean,\n ): Promise<ReadableStream<AgentResponseChunk<ChatModelOutput>> | ChatModelOutput> {\n const result = new ReadableStream<AgentResponseChunk<ChatModelOutput>>({\n start: async (controller) => {\n try {\n controller.enqueue({\n delta: {\n json: {\n modelOptions: {\n reasoningEffort: body.reasoning_effort,\n },\n },\n },\n });\n\n let text = \"\";\n let refusal = \"\";\n const toolCalls: (NonNullable<ChatModelOutput[\"toolCalls\"]>[number] & {\n args: string;\n })[] = [];\n let model: string | undefined;\n\n for await (const chunk of stream) {\n const choice = chunk.choices?.[0];\n const delta = choice?.delta;\n\n if (!model) {\n model = chunk.model;\n controller.enqueue({\n delta: {\n json: {\n model,\n },\n },\n });\n }\n\n if (delta?.tool_calls?.length) {\n for (const call of delta.tool_calls) {\n if (this.supportsToolStreaming && call.index !== undefined) {\n handleToolCallDelta(toolCalls, call);\n } else {\n handleCompleteToolCall(toolCalls, call);\n }\n }\n }\n\n if (delta && \"reasoning\" in delta && typeof delta.reasoning === \"string\") {\n controller.enqueue({ delta: { text: { thoughts: delta.reasoning } } });\n }\n\n if (delta?.content) {\n text += delta.content;\n if (!jsonMode) {\n controller.enqueue({\n delta: {\n text: {\n text: delta.content,\n },\n },\n });\n }\n }\n\n if (delta?.refusal) {\n refusal += delta.refusal;\n }\n\n if (chunk.usage) {\n const usage: ChatModelOutputUsage = {\n inputTokens: chunk.usage.prompt_tokens,\n outputTokens: chunk.usage.completion_tokens,\n };\n\n // Parse cache statistics if available\n const inputDetails = chunk.usage.prompt_tokens_details;\n if (inputDetails?.cached_tokens) {\n usage.cacheReadInputTokens = inputDetails.cached_tokens;\n }\n\n controller.enqueue({\n delta: {\n json: {\n usage,\n },\n },\n });\n }\n }\n\n if (jsonMode && text) {\n controller.enqueue({\n delta: {\n json: {\n json: safeParseJSON(text),\n },\n },\n });\n }\n\n if (toolCalls.length) {\n controller.enqueue({\n delta: {\n json: {\n toolCalls: toolCalls.map(({ args, ...c }) => ({\n ...c,\n function: { ...c.function, arguments: args ? safeParseJSON(args) : {} },\n })),\n },\n },\n });\n }\n\n if (refusal) {\n controller.error(new Error(`Got refusal from LLM: ${refusal}`));\n }\n\n controller.close();\n } catch (error) {\n controller.error(error);\n }\n },\n });\n\n return streaming ? result : await agentResponseStreamToObject(result);\n }\n\n /**\n * Controls how optional fields are handled in JSON schema conversion\n * - \"anyOf\": All fields are required but can be null (default)\n * - \"optional\": Fields marked as optional in schema remain optional\n */\n protected optionalFieldMode?: \"anyOf\" | \"optional\" = \"anyOf\";\n\n protected jsonSchemaToOpenAIJsonSchema(schema: Record<string, unknown>): Record<string, unknown> {\n if (schema?.type === \"object\") {\n const s = schema as {\n required?: string[];\n properties: Record<string, unknown>;\n };\n\n const required = this.optionalFieldMode === \"anyOf\" ? Object.keys(s.properties) : s.required;\n\n return {\n ...schema,\n properties: Object.fromEntries(\n Object.entries(s.properties).map(([key, value]) => {\n const valueSchema = this.jsonSchemaToOpenAIJsonSchema(value as Record<string, unknown>);\n\n // NOTE: All fields must be required https://platform.openai.com/docs/guides/structured-outputs/all-fields-must-be-required\n return [\n key,\n this.optionalFieldMode === \"optional\" || s.required?.includes(key)\n ? valueSchema\n : { anyOf: [valueSchema, { type: [\"null\"] }] },\n ];\n }),\n ),\n required,\n };\n }\n\n if (schema?.type === \"array\") {\n const { items } = schema as { items: Record<string, unknown> };\n\n return {\n ...schema,\n items: this.jsonSchemaToOpenAIJsonSchema(items),\n };\n }\n\n return schema;\n }\n}\n\n// Create role mapper for OpenAI (uses standard mapping)\nconst mapRole = createRoleMapper(STANDARD_ROLE_MAP);\n\n/**\n * @hidden\n */\nexport async function contentsFromInputMessages(\n messages: ChatModelInputMessage[],\n): Promise<ChatCompletionMessageParam[]> {\n return Promise.all(\n messages.map(\n async (i) =>\n ({\n role: mapRole(i.role),\n content:\n typeof i.content === \"string\"\n ? i.content\n : i.content &&\n (\n await Promise.all(\n i.content.map<Promise<ChatCompletionContentPart>>(async (c) => {\n switch (c.type) {\n case \"text\":\n return { type: \"text\", text: c.text };\n case \"url\":\n return { type: \"image_url\", image_url: { url: c.url } };\n case \"file\":\n return {\n type: \"image_url\",\n image_url: {\n url: `data:${c.mimeType || \"image/png\"};base64,${c.data}`,\n },\n };\n case \"local\": {\n throw new Error(\n `Unsupported local file: ${c.path}, it should be converted to base64 at ChatModel`,\n );\n }\n }\n }),\n )\n ).filter(isNonNullable),\n tool_calls: i.toolCalls?.map((i) => ({\n ...i,\n function: {\n ...i.function,\n arguments: JSON.stringify(i.function.arguments),\n },\n })),\n tool_call_id: i.toolCallId,\n name: i.name,\n }) as ChatCompletionMessageParam,\n ),\n );\n}\n\n/**\n * @hidden\n */\nexport function toolsFromInputTools(\n tools?: ChatModelInputTool[],\n options?: { addTypeToEmptyParameters?: boolean },\n): ChatCompletionTool[] | undefined {\n return tools?.length\n ? tools.map((i) => {\n const parameters = i.function.parameters as Record<string, unknown>;\n if (options?.addTypeToEmptyParameters && Object.keys(parameters).length === 0) {\n parameters.type = \"object\";\n }\n return {\n type: \"function\",\n function: {\n name: i.function.name,\n description: i.function.description,\n parameters,\n },\n };\n })\n : undefined;\n}\n\nfunction handleToolCallDelta(\n toolCalls: (NonNullable<ChatModelOutput[\"toolCalls\"]>[number] & {\n args: string;\n })[],\n call: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall & {\n index: number;\n },\n) {\n toolCalls[call.index] ??= {\n id: call.id || v7(),\n type: \"function\" as const,\n function: { name: \"\", arguments: {} },\n args: \"\",\n };\n const c = toolCalls[call.index];\n if (!c) throw new Error(\"Tool call not found\");\n\n if (call.type) c.type = call.type;\n\n c.function.name = c.function.name + (call.function?.name || \"\");\n c.args = c.args.concat(call.function?.arguments || \"\");\n}\n\nfunction handleCompleteToolCall(\n toolCalls: (NonNullable<ChatModelOutput[\"toolCalls\"]>[number] & {\n args: string;\n })[],\n call: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall,\n) {\n toolCalls.push({\n id: call.id || v7(),\n type: \"function\" as const,\n function: {\n name: call.function?.name || \"\",\n arguments: safeParseJSON(call.function?.arguments || \"{}\"),\n },\n args: call.function?.arguments || \"\",\n });\n}\n\n// safeParseJSON is now imported from @aigne/core\n"],"mappings":";;;;;;;;;;;AAkCA,MAAM,kCAAkC;AAYxC,MAAM,iCAAuF;CAC3F,WAAW;EAAE,2BAA2B;EAAO,qBAAqB;EAAO;CAC3E,WAAW;EAAE,2BAA2B;EAAO,qBAAqB;EAAO;CAC5E;;;;AA6BD,MAAa,+BAA+B,EAAE,OAAO;CACnD,QAAQ,EAAE,QAAQ,CAAC,UAAU;CAC7B,SAAS,EAAE,QAAQ,CAAC,UAAU;CAC9B,OAAO,EAAE,QAAQ,CAAC,UAAU;CAC5B,cAAc,EACX,OAAO;EACN,OAAO,EAAE,QAAQ,CAAC,UAAU;EAC5B,aAAa,EAAE,QAAQ,CAAC,UAAU;EAClC,MAAM,EAAE,QAAQ,CAAC,UAAU;EAC3B,kBAAkB,EAAE,QAAQ,CAAC,UAAU;EACvC,iBAAiB,EAAE,QAAQ,CAAC,UAAU;EACtC,mBAAmB,EAAE,SAAS,CAAC,UAAU,CAAC,QAAQ,KAAK;EACxD,CAAC,CACD,UAAU;CACd,CAAC;;;;;;;;;;;;;;;;;;;;AAqBF,IAAa,kBAAb,cAAqC,UAAU;CAC7C,YAAY,AAAgB,SAAkC;AAC5D,SAAO;EADmB;AAE1B,MAAI,QAAS,gBAAe,KAAK,MAAM,8BAA8B,QAAQ;EAE7E,MAAM,SAAS,SAAS,QAAQ,+BAA+B,QAAQ,SAAS;AAChF,SAAO,OAAO,MAAM,OAAO;;;;;CAM7B,AAAU;CAEV,AAAU,gBAAgB;CAC1B,AAAU;CACV,AAAU,kCAAkC;CAC5C,AAAU,iCAAiC;CAC3C,AAAmB,4BAA4B;CAC/C,AAAU,+BAA+B;CACzC,AAAU,wBAAwB;CAClC,AAAU,sBAAsB;CAEhC,IAAI,SAAS;EACX,MAAM,EAAE,QAAQ,QAAQ,KAAK;AAC7B,MAAI,CAAC,OACH,OAAM,IAAI,MACR,GAAG,KAAK,KAAK,+EAA+E,KAAK,cAAc,yBAChH;AAEH,OAAK,YAAY,IAAI,aAAa;GAChC,SAAS;GACT;GACA,GAAG,KAAK,SAAS;GAClB,CAAC;AACF,SAAO,KAAK;;CAGd,IAAa,aAAa;AACxB,SAAO;GACL,KAAK,KAAK,SAAS,WAAW,QAAQ,IAAI;GAC1C,QAAQ,KAAK,SAAS,UAAU,QAAQ,IAAI,KAAK,kBAAkB,KAAK;GACxE,OAAO,KAAK,SAAS,SAAS;GAC/B;;;;;;;CAQH,AAAS,QACP,OACA,SACqD;AACrD,SAAO,KAAK,SAAS,OAAO,QAAQ;;CAGtC,AAAQ,mBACN,QAC2D;AAC3D,MAAI,OAAO,WAAW,UAAU;AAC9B,OAAI,SAAS,IAAM,QAAO;AAC1B,OAAI,SAAS,IAAM,QAAO;AAC1B,OAAI,SAAS,IAAK,QAAO;AACzB,OAAI,SAAS,EAAG,QAAO;AACvB;;AAGF,SAAO;;CAGT,MAAc,SACZ,OACA,UACyC;EACzC,MAAM,EAAE,eAAe,EAAE,KAAK;EAE9B,MAAM,WAAW,MAAM,KAAK,eAAe,MAAM;EACjD,MAAM,QAAQ,cAAc,SAAS,KAAK,WAAW;EAErD,MAAM,OAA+C;GACnD;GACA,aAAa,KAAK,sBAAsB,aAAa,cAAc;GACnE,OAAO,aAAa;GACpB,mBAAmB,aAAa;GAChC,kBAAkB,aAAa;GAC/B;GACA,gBAAgB,EAAE,eAAe,MAAM;GACvC,QAAQ;GACR,kBAAkB,KAAK,mBAAmB,aAAa,gBAAgB;GACxE;AAED,MAAI,MAAM,SAAS,QAAQ,IAAI,MAAM,SAAS,MAAM,EAAE;AACpD,UAAO,KAAK;AACZ,UAAO,KAAK;;AAKd,MAAI,CAAC,MAAM,OAAO,UAAU,MAAM,gBAAgB,SAAS,cACzD,QAAO,MAAM,KAAK,wBAAwB,MAAM,MAAM,eAAe;EAGvE,MAAM,EAAE,UAAU,mBAAmB,MAAM,KAAK,qBAAqB,MAAM;EAC3E,MAAM,SAAU,MAAM,KAAK,OAAO,KAAK,YAAY,OAAO;GACxD,GAAG;GACH,OAAO,oBAAoB,MAAM,OAAO,EACtC,0BAA0B,CAAC,KAAK,8BACjC,CAAC;GACF,aAAa,MAAM;GACnB,qBAAqB,KAAK,qBAAqB,OAAO,aAAa;GACnE,iBAAiB;GAClB,CAAC;AAEF,MAAI,MAAM,gBAAgB,SAAS,cACjC,QAAO,MAAM,KAAK,wBAAwB,MAAM,QAAQ,OAAO,KAAK;EAGtE,MAAM,SAAS,MAAM,KAAK,wBAAwB,MAAM,QAAQ,SAAS;AAEzE,MAAI,OAAO,WAAW,UAAU,OAAO,KAAM,QAAO;EAIpD,MAAM,OAAO,cAAc,OAAO,QAAQ,GAAG;EAC7C,MAAM,YAAY,KAAK,mBAAmB,MAAM,eAAe,WAAW,QAAQ,MAAM,EACtF,MAAM,MACP,CAAC;AACF,MAAI,UAAU,QACZ,QAAO;GAAE,GAAG;GAAQ,MAAM,UAAU;GAAM,MAAM;GAAW;AAE7D,SAAO,KACL,GAAG,KAAK,KAAK,kFACb,EACE,MAAM,OAAO,MACd,CACF;EAED,MAAM,SAAS,MAAM,KAAK,wBAAwB,MAAM,MAAM,eAAe;AAC7E,SAAO;GAAE,GAAG;GAAQ,OAAO,WAAW,OAAO,OAAO,OAAO,MAAM;GAAE;;CAGrE,AAAQ,qBACN,OACA,cACqB;AACrB,MAAI,CAAC,KAAK,0BAA2B,QAAO;AAC5C,MAAI,CAAC,MAAM,OAAO,OAAQ,QAAO;AACjC,SAAO,aAAa;;CAGtB,MAAgB,eAAe,OAA8D;EAC3F,MAAM,WAAW,MAAM,0BAA0B,MAAM,SAAS;AAEhE,MAAI,MAAM,gBAAgB,SAAS,eACjC;OACE,CAAC,KAAK,mCACL,CAAC,KAAK,kCAAkC,MAAM,OAAO,OAEtD,UAAS,QAAQ;IACf,MAAM;IACN,SAAS,oBAAoB,MAAM,eAAe,WAAW,OAAO;IACrE,CAAC;;AAGN,SAAO;;CAGT,MAAc,qBAAqB,OAGhC;AACD,MAAI,CAAC,KAAK,kCAAkC,MAAM,OAAO,OACvD,QAAO;GAAE,UAAU;GAAO,gBAAgB;GAAW;AAEvD,MAAI,CAAC,KAAK,iCAAiC;GACzC,MAAM,WAAW,MAAM,gBAAgB,SAAS;AAChD,UAAO;IACL;IACA,gBAAgB,WAAW,EAAE,MAAM,eAAe,GAAG;IACtD;;AAGH,MAAI,MAAM,gBAAgB,SAAS,cACjC,QAAO;GACL,UAAU;GACV,gBAAgB;IACd,MAAM;IACN,aAAa;KACX,GAAG,MAAM,eAAe;KACxB,QAAQ,KAAK,6BAA6B,MAAM,eAAe,WAAW,OAAO;KAClF;IACF;GACF;AAGH,SAAO;GAAE,UAAU;GAAO,gBAAgB;GAAW;;CAGvD,MAAc,wBACZ,MACA,gBAC0B;AAC1B,MAAI,gBAAgB,SAAS,cAC3B,OAAM,IAAI,MAAM,uCAAuC;EAGzD,MAAM,EAAE,UAAU,gBAAgB,2BAA2B,MAAM,KAAK,qBAAqB,EAC3F,gBACD,CAAC;EACF,MAAM,MAAO,MAAM,KAAK,OAAO,KAAK,YAAY,OAAO;GACrD,GAAG;GACH,iBAAiB;GAClB,CAAC;AAEF,SAAO,KAAK,wBAAwB,MAAM,KAAK,SAAS;;CAe1D,MAAc,wBACZ,MACA,QACA,UACA,WACgF;EAChF,MAAM,SAAS,IAAI,eAAoD,EACrE,OAAO,OAAO,eAAe;AAC3B,OAAI;AACF,eAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,cAAc,EACZ,iBAAiB,KAAK,kBACvB,EACF,EACF,EACF,CAAC;IAEF,IAAI,OAAO;IACX,IAAI,UAAU;IACd,MAAM,YAEC,EAAE;IACT,IAAI;AAEJ,eAAW,MAAM,SAAS,QAAQ;KAEhC,MAAM,SADS,MAAM,UAAU,KACT;AAEtB,SAAI,CAAC,OAAO;AACV,cAAQ,MAAM;AACd,iBAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,OACD,EACF,EACF,CAAC;;AAGJ,SAAI,OAAO,YAAY,OACrB,MAAK,MAAM,QAAQ,MAAM,WACvB,KAAI,KAAK,yBAAyB,KAAK,UAAU,OAC/C,qBAAoB,WAAW,KAAK;SAEpC,wBAAuB,WAAW,KAAK;AAK7C,SAAI,SAAS,eAAe,SAAS,OAAO,MAAM,cAAc,SAC9D,YAAW,QAAQ,EAAE,OAAO,EAAE,MAAM,EAAE,UAAU,MAAM,WAAW,EAAE,EAAE,CAAC;AAGxE,SAAI,OAAO,SAAS;AAClB,cAAQ,MAAM;AACd,UAAI,CAAC,SACH,YAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,MAAM,MAAM,SACb,EACF,EACF,CAAC;;AAIN,SAAI,OAAO,QACT,YAAW,MAAM;AAGnB,SAAI,MAAM,OAAO;MACf,MAAM,QAA8B;OAClC,aAAa,MAAM,MAAM;OACzB,cAAc,MAAM,MAAM;OAC3B;MAGD,MAAM,eAAe,MAAM,MAAM;AACjC,UAAI,cAAc,cAChB,OAAM,uBAAuB,aAAa;AAG5C,iBAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,OACD,EACF,EACF,CAAC;;;AAIN,QAAI,YAAY,KACd,YAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,MAAM,cAAc,KAAK,EAC1B,EACF,EACF,CAAC;AAGJ,QAAI,UAAU,OACZ,YAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,WAAW,UAAU,KAAK,EAAE,MAAM,GAAG,SAAS;KAC5C,GAAG;KACH,UAAU;MAAE,GAAG,EAAE;MAAU,WAAW,OAAO,cAAc,KAAK,GAAG,EAAE;MAAE;KACxE,EAAE,EACJ,EACF,EACF,CAAC;AAGJ,QAAI,QACF,YAAW,sBAAM,IAAI,MAAM,yBAAyB,UAAU,CAAC;AAGjE,eAAW,OAAO;YACX,OAAO;AACd,eAAW,MAAM,MAAM;;KAG5B,CAAC;AAEF,SAAO,YAAY,SAAS,MAAM,4BAA4B,OAAO;;;;;;;CAQvE,AAAU,oBAA2C;CAErD,AAAU,6BAA6B,QAA0D;AAC/F,MAAI,QAAQ,SAAS,UAAU;GAC7B,MAAM,IAAI;GAKV,MAAM,WAAW,KAAK,sBAAsB,UAAU,OAAO,KAAK,EAAE,WAAW,GAAG,EAAE;AAEpF,UAAO;IACL,GAAG;IACH,YAAY,OAAO,YACjB,OAAO,QAAQ,EAAE,WAAW,CAAC,KAAK,CAAC,KAAK,WAAW;KACjD,MAAM,cAAc,KAAK,6BAA6B,MAAiC;AAGvF,YAAO,CACL,KACA,KAAK,sBAAsB,cAAc,EAAE,UAAU,SAAS,IAAI,GAC9D,cACA,EAAE,OAAO,CAAC,aAAa,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CACjD;MACD,CACH;IACD;IACD;;AAGH,MAAI,QAAQ,SAAS,SAAS;GAC5B,MAAM,EAAE,UAAU;AAElB,UAAO;IACL,GAAG;IACH,OAAO,KAAK,6BAA6B,MAAM;IAChD;;AAGH,SAAO;;;AAKX,MAAM,UAAU,iBAAiB,kBAAkB;;;;AAKnD,eAAsB,0BACpB,UACuC;AACvC,QAAO,QAAQ,IACb,SAAS,IACP,OAAO,OACJ;EACC,MAAM,QAAQ,EAAE,KAAK;EACrB,SACE,OAAO,EAAE,YAAY,WACjB,EAAE,UACF,EAAE,YAEA,MAAM,QAAQ,IACZ,EAAE,QAAQ,IAAwC,OAAO,MAAM;AAC7D,WAAQ,EAAE,MAAV;IACE,KAAK,OACH,QAAO;KAAE,MAAM;KAAQ,MAAM,EAAE;KAAM;IACvC,KAAK,MACH,QAAO;KAAE,MAAM;KAAa,WAAW,EAAE,KAAK,EAAE,KAAK;KAAE;IACzD,KAAK,OACH,QAAO;KACL,MAAM;KACN,WAAW,EACT,KAAK,QAAQ,EAAE,YAAY,YAAY,UAAU,EAAE,QACpD;KACF;IACH,KAAK,QACH,OAAM,IAAI,MACR,2BAA2B,EAAE,KAAK,iDACnC;;IAGL,CACH,EACD,OAAO,cAAc;EAC7B,YAAY,EAAE,WAAW,KAAK,SAAO;GACnC,GAAGA;GACH,UAAU;IACR,GAAGA,IAAE;IACL,WAAW,KAAK,UAAUA,IAAE,SAAS,UAAU;IAChD;GACF,EAAE;EACH,cAAc,EAAE;EAChB,MAAM,EAAE;EACT,EACJ,CACF;;;;;AAMH,SAAgB,oBACd,OACA,SACkC;AAClC,QAAO,OAAO,SACV,MAAM,KAAK,MAAM;EACf,MAAM,aAAa,EAAE,SAAS;AAC9B,MAAI,SAAS,4BAA4B,OAAO,KAAK,WAAW,CAAC,WAAW,EAC1E,YAAW,OAAO;AAEpB,SAAO;GACL,MAAM;GACN,UAAU;IACR,MAAM,EAAE,SAAS;IACjB,aAAa,EAAE,SAAS;IACxB;IACD;GACF;GACD,GACF;;AAGN,SAAS,oBACP,WAGA,MAGA;AACA,WAAU,KAAK,WAAW;EACxB,IAAI,KAAK,MAAM,IAAI;EACnB,MAAM;EACN,UAAU;GAAE,MAAM;GAAI,WAAW,EAAE;GAAE;EACrC,MAAM;EACP;CACD,MAAM,IAAI,UAAU,KAAK;AACzB,KAAI,CAAC,EAAG,OAAM,IAAI,MAAM,sBAAsB;AAE9C,KAAI,KAAK,KAAM,GAAE,OAAO,KAAK;AAE7B,GAAE,SAAS,OAAO,EAAE,SAAS,QAAQ,KAAK,UAAU,QAAQ;AAC5D,GAAE,OAAO,EAAE,KAAK,OAAO,KAAK,UAAU,aAAa,GAAG;;AAGxD,SAAS,uBACP,WAGA,MACA;AACA,WAAU,KAAK;EACb,IAAI,KAAK,MAAM,IAAI;EACnB,MAAM;EACN,UAAU;GACR,MAAM,KAAK,UAAU,QAAQ;GAC7B,WAAW,cAAc,KAAK,UAAU,aAAa,KAAK;GAC3D;EACD,MAAM,KAAK,UAAU,aAAa;EACnC,CAAC"}
|
|
1
|
+
{"version":3,"file":"openai-chat-model.mjs","names":["i"],"sources":["../src/openai-chat-model.ts"],"sourcesContent":["import {\n ChatModel,\n type ChatModelInput,\n type ChatModelInputMessage,\n type ChatModelInputOptions,\n type ChatModelInputTool,\n type ChatModelOptions,\n type ChatModelOutput,\n type ChatModelOutputUsage,\n createRoleMapper,\n type ModelInvokeOptions,\n type ModelProcessResult,\n type ModelResponse,\n type ModelResponseChunk,\n STANDARD_ROLE_MAP,\n safeParseJSON,\n} from \"@aigne/model-base\";\nimport { logger } from \"@aigne/model-base/utils/logger\";\nimport { mergeUsage } from \"@aigne/model-base/utils/model-utils\";\nimport { getJsonOutputPrompt } from \"@aigne/model-base/utils/prompts\";\nimport { modelResponseStreamToObject } from \"@aigne/model-base/utils/stream-utils\";\nimport {\n checkArguments,\n isNonNullable,\n type PromiseOrValue,\n} from \"@aigne/model-base/utils/type-utils\";\nimport { v7 } from \"@aigne/uuid\";\nimport type { ClientOptions, OpenAI } from \"openai\";\nimport type {\n ChatCompletionContentPart,\n ChatCompletionMessageParam,\n ChatCompletionTool,\n ResponseFormatJSONSchema,\n} from \"openai/resources\";\nimport type { Stream } from \"openai/streaming.js\";\nimport { z } from \"zod\";\nimport { CustomOpenAI } from \"./openai.js\";\n\nconst CHAT_MODEL_OPENAI_DEFAULT_MODEL = \"gpt-4o-mini\";\n\nexport interface OpenAIChatModelCapabilities {\n supportsNativeStructuredOutputs: boolean;\n supportsEndWithSystemMessage: boolean;\n supportsToolsUseWithJsonSchema: boolean;\n supportsParallelToolCalls: boolean;\n supportsToolsEmptyParameters: boolean;\n supportsToolStreaming: boolean;\n supportsTemperature: boolean;\n}\n\nconst OPENAI_CHAT_MODEL_CAPABILITIES: Record<string, Partial<OpenAIChatModelCapabilities>> = {\n \"o4-mini\": { supportsParallelToolCalls: false, supportsTemperature: false },\n \"o3-mini\": { supportsParallelToolCalls: false, supportsTemperature: false },\n};\n\n/**\n * Configuration options for OpenAI Chat Model\n */\nexport interface OpenAIChatModelOptions extends ChatModelOptions {\n /**\n * API key for OpenAI API\n *\n * If not provided, will look for OPENAI_API_KEY in environment variables\n */\n apiKey?: string;\n\n /**\n * Base URL for OpenAI API\n *\n * Useful for proxies or alternate endpoints\n */\n baseURL?: string;\n\n /**\n * Client options for OpenAI API\n */\n clientOptions?: Partial<ClientOptions>;\n}\n\n/**\n * @hidden\n */\nexport const openAIChatModelOptionsSchema = z.object({\n apiKey: z.string().optional(),\n baseURL: z.string().optional(),\n model: z.string().optional(),\n modelOptions: z\n .object({\n model: z.string().optional(),\n temperature: z.number().optional(),\n topP: z.number().optional(),\n frequencyPenalty: z.number().optional(),\n presencePenalty: z.number().optional(),\n parallelToolCalls: z.boolean().optional().default(true),\n })\n .optional(),\n});\n\n/**\n * Implementation of the ChatModel interface for OpenAI's API\n *\n * This model provides access to OpenAI's capabilities including:\n * - Text generation\n * - Tool use with parallel tool calls\n * - JSON structured output\n * - Image understanding\n *\n * Default model: 'gpt-4o-mini'\n *\n * @example\n * Here's how to create and use an OpenAI chat model:\n * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model}\n *\n * @example\n * Here's an example with streaming response:\n * {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model-stream}\n */\nexport class OpenAIChatModel extends ChatModel {\n constructor(public override options?: OpenAIChatModelOptions) {\n super();\n if (options) checkArguments(this.name, openAIChatModelOptionsSchema, options);\n\n const preset = options?.model ? OPENAI_CHAT_MODEL_CAPABILITIES[options.model] : undefined;\n Object.assign(this, preset);\n }\n\n /**\n * @hidden\n */\n protected _client?: OpenAI;\n\n protected apiKeyEnvName = \"OPENAI_API_KEY\";\n protected apiKeyDefault: string | undefined;\n protected supportsNativeStructuredOutputs = true;\n protected supportsToolsUseWithJsonSchema = true;\n protected override supportsParallelToolCalls = true;\n protected supportsToolsEmptyParameters = true;\n protected supportsToolStreaming = true;\n protected supportsTemperature = true;\n\n get client() {\n const { apiKey, url } = this.credential;\n if (!apiKey)\n throw new Error(\n `${this.name} requires an API key. Please provide it via \\`options.apiKey\\`, or set the \\`${this.apiKeyEnvName}\\` environment variable`,\n );\n\n this._client ??= new CustomOpenAI({\n baseURL: url,\n apiKey,\n ...this.options?.clientOptions,\n });\n return this._client;\n }\n\n override get credential() {\n return {\n url: this.options?.baseURL || process.env.OPENAI_BASE_URL,\n apiKey: this.options?.apiKey || process.env[this.apiKeyEnvName] || this.apiKeyDefault,\n model: this.options?.model || CHAT_MODEL_OPENAI_DEFAULT_MODEL,\n };\n }\n\n /**\n * Process the input and generate a response\n * @param input The input to process\n * @returns The generated response\n */\n override process(\n input: ChatModelInput,\n options: ModelInvokeOptions,\n ): PromiseOrValue<ModelProcessResult<ChatModelOutput>> {\n return this._process(input, options);\n }\n\n private getReasoningEffort(\n effort: ChatModelInputOptions[\"reasoningEffort\"],\n ): Exclude<ChatModelInputOptions[\"reasoningEffort\"], number> {\n if (typeof effort === \"number\") {\n if (effort > 5000) return \"high\";\n if (effort > 1000) return \"medium\";\n if (effort > 500) return \"low\";\n if (effort > 0) return \"minimal\";\n return undefined;\n }\n\n return effort;\n }\n\n private async _process(\n input: ChatModelInput,\n _options: ModelInvokeOptions,\n ): Promise<ModelResponse<ChatModelOutput>> {\n const { modelOptions = {} } = input;\n\n const messages = await this.getRunMessages(input);\n const model = modelOptions?.model || this.credential.model;\n\n const body: OpenAI.Chat.ChatCompletionCreateParams = {\n model,\n temperature: this.supportsTemperature ? modelOptions.temperature : undefined,\n top_p: modelOptions.topP,\n frequency_penalty: modelOptions.frequencyPenalty,\n presence_penalty: modelOptions.presencePenalty,\n messages,\n stream_options: { include_usage: true },\n stream: true,\n reasoning_effort: this.getReasoningEffort(modelOptions.reasoningEffort),\n };\n\n if (model.includes(\"gpt-5\") || model.includes(\"o1-\")) {\n delete body.temperature;\n delete body.top_p;\n }\n\n // For models that do not support tools use with JSON schema in same request,\n // so we need to handle the case where tools are not used and responseFormat is json\n if (!input.tools?.length && input.responseFormat?.type === \"json_schema\") {\n return await this.requestStructuredOutput(body, input.responseFormat);\n }\n\n const { jsonMode, responseFormat } = await this.getRunResponseFormat(input);\n const stream = (await this.client.chat.completions.create({\n ...body,\n tools: toolsFromInputTools(input.tools, {\n addTypeToEmptyParameters: !this.supportsToolsEmptyParameters,\n }),\n tool_choice: input.toolChoice,\n parallel_tool_calls: this.getParallelToolCalls(input, modelOptions),\n response_format: responseFormat,\n })) as unknown as Stream<OpenAI.Chat.Completions.ChatCompletionChunk>;\n\n if (input.responseFormat?.type !== \"json_schema\") {\n return await this.extractResultFromStream(body, stream, false, true);\n }\n\n const result = await this.extractResultFromStream(body, stream, jsonMode);\n // Just return the result if it has tool calls\n if (result.toolCalls?.length || result.json) return result;\n\n // Try to parse the text response as JSON\n // If it matches the json_schema, return it as json\n const json = safeParseJSON(result.text || \"\");\n const validated = this.validateJsonSchema(input.responseFormat.jsonSchema.schema, json, {\n safe: true,\n });\n if (validated.success) {\n return { ...result, json: validated.data, text: undefined };\n }\n logger.warn(\n `${this.name}: Text response does not match JSON schema, trying to use tool to extract json `,\n {\n text: result.text,\n },\n );\n\n const output = await this.requestStructuredOutput(body, input.responseFormat);\n return { ...output, usage: mergeUsage(result.usage, output.usage) };\n }\n\n private getParallelToolCalls(\n input: ChatModelInput,\n modelOptions: ChatModelInputOptions,\n ): boolean | undefined {\n if (!this.supportsParallelToolCalls) return undefined;\n if (!input.tools?.length) return undefined;\n return modelOptions.parallelToolCalls;\n }\n\n protected async getRunMessages(input: ChatModelInput): Promise<ChatCompletionMessageParam[]> {\n const messages = await contentsFromInputMessages(input.messages);\n\n if (input.responseFormat?.type === \"json_schema\") {\n if (\n !this.supportsNativeStructuredOutputs ||\n (!this.supportsToolsUseWithJsonSchema && input.tools?.length)\n ) {\n messages.unshift({\n role: \"system\",\n content: getJsonOutputPrompt(input.responseFormat.jsonSchema.schema),\n });\n }\n }\n return messages;\n }\n\n private async getRunResponseFormat(input: Partial<ChatModelInput>): Promise<{\n jsonMode: boolean;\n responseFormat: ResponseFormatJSONSchema | { type: \"json_object\" } | undefined;\n }> {\n if (!this.supportsToolsUseWithJsonSchema && input.tools?.length)\n return { jsonMode: false, responseFormat: undefined };\n\n if (!this.supportsNativeStructuredOutputs) {\n const jsonMode = input.responseFormat?.type === \"json_schema\";\n return {\n jsonMode,\n responseFormat: jsonMode ? { type: \"json_object\" } : undefined,\n };\n }\n\n if (input.responseFormat?.type === \"json_schema\") {\n return {\n jsonMode: true,\n responseFormat: {\n type: \"json_schema\",\n json_schema: {\n ...input.responseFormat.jsonSchema,\n schema: this.jsonSchemaToOpenAIJsonSchema(input.responseFormat.jsonSchema.schema),\n },\n },\n };\n }\n\n return { jsonMode: false, responseFormat: undefined };\n }\n\n private async requestStructuredOutput(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n responseFormat: ChatModelInput[\"responseFormat\"],\n ): Promise<ChatModelOutput> {\n if (responseFormat?.type !== \"json_schema\") {\n throw new Error(\"Expected json_schema response format\");\n }\n\n const { jsonMode, responseFormat: resolvedResponseFormat } = await this.getRunResponseFormat({\n responseFormat,\n });\n const res = (await this.client.chat.completions.create({\n ...body,\n response_format: resolvedResponseFormat,\n })) as unknown as Stream<OpenAI.Chat.Completions.ChatCompletionChunk>;\n\n return this.extractResultFromStream(body, res, jsonMode);\n }\n\n private async extractResultFromStream(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,\n jsonMode: boolean | undefined,\n streaming: true,\n ): Promise<ReadableStream<ModelResponseChunk<ChatModelOutput>>>;\n private async extractResultFromStream(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,\n jsonMode?: boolean,\n streaming?: false,\n ): Promise<ChatModelOutput>;\n private async extractResultFromStream(\n body: OpenAI.Chat.ChatCompletionCreateParamsStreaming,\n stream: Stream<OpenAI.Chat.Completions.ChatCompletionChunk>,\n jsonMode?: boolean,\n streaming?: boolean,\n ): Promise<ReadableStream<ModelResponseChunk<ChatModelOutput>> | ChatModelOutput> {\n const result = new ReadableStream<ModelResponseChunk<ChatModelOutput>>({\n start: async (controller) => {\n try {\n controller.enqueue({\n delta: {\n json: {\n modelOptions: {\n reasoningEffort: body.reasoning_effort,\n },\n },\n },\n });\n\n let text = \"\";\n let refusal = \"\";\n const toolCalls: (NonNullable<ChatModelOutput[\"toolCalls\"]>[number] & {\n args: string;\n })[] = [];\n let model: string | undefined;\n\n for await (const chunk of stream) {\n const choice = chunk.choices?.[0];\n const delta = choice?.delta;\n\n if (!model) {\n model = chunk.model;\n controller.enqueue({\n delta: {\n json: {\n model,\n },\n },\n });\n }\n\n if (delta?.tool_calls?.length) {\n for (const call of delta.tool_calls) {\n if (this.supportsToolStreaming && call.index !== undefined) {\n handleToolCallDelta(toolCalls, call);\n } else {\n handleCompleteToolCall(toolCalls, call);\n }\n }\n }\n\n if (delta && \"reasoning\" in delta && typeof delta.reasoning === \"string\") {\n controller.enqueue({ delta: { text: { thoughts: delta.reasoning } } });\n }\n\n if (delta?.content) {\n text += delta.content;\n if (!jsonMode) {\n controller.enqueue({\n delta: {\n text: {\n text: delta.content,\n },\n },\n });\n }\n }\n\n if (delta?.refusal) {\n refusal += delta.refusal;\n }\n\n if (chunk.usage) {\n const usage: ChatModelOutputUsage = {\n inputTokens: chunk.usage.prompt_tokens,\n outputTokens: chunk.usage.completion_tokens,\n };\n\n // Parse cache statistics if available\n const inputDetails = chunk.usage.prompt_tokens_details;\n if (inputDetails?.cached_tokens) {\n usage.cacheReadInputTokens = inputDetails.cached_tokens;\n }\n\n controller.enqueue({\n delta: {\n json: {\n usage,\n },\n },\n });\n }\n }\n\n if (jsonMode && text) {\n controller.enqueue({\n delta: {\n json: {\n json: safeParseJSON(text),\n },\n },\n });\n }\n\n if (toolCalls.length) {\n controller.enqueue({\n delta: {\n json: {\n toolCalls: toolCalls.map(({ args, ...c }) => ({\n ...c,\n function: { ...c.function, arguments: args ? safeParseJSON(args) : {} },\n })),\n },\n },\n });\n }\n\n if (refusal) {\n controller.error(new Error(`Got refusal from LLM: ${refusal}`));\n }\n\n controller.close();\n } catch (error) {\n controller.error(error);\n }\n },\n });\n\n return streaming ? result : await modelResponseStreamToObject(result);\n }\n\n /**\n * Controls how optional fields are handled in JSON schema conversion\n * - \"anyOf\": All fields are required but can be null (default)\n * - \"optional\": Fields marked as optional in schema remain optional\n */\n protected optionalFieldMode?: \"anyOf\" | \"optional\" = \"anyOf\";\n\n protected jsonSchemaToOpenAIJsonSchema(schema: Record<string, unknown>): Record<string, unknown> {\n if (schema?.type === \"object\") {\n const s = schema as {\n required?: string[];\n properties: Record<string, unknown>;\n };\n\n const required = this.optionalFieldMode === \"anyOf\" ? Object.keys(s.properties) : s.required;\n\n return {\n ...schema,\n properties: Object.fromEntries(\n Object.entries(s.properties).map(([key, value]) => {\n const valueSchema = this.jsonSchemaToOpenAIJsonSchema(value as Record<string, unknown>);\n\n // NOTE: All fields must be required https://platform.openai.com/docs/guides/structured-outputs/all-fields-must-be-required\n return [\n key,\n this.optionalFieldMode === \"optional\" || s.required?.includes(key)\n ? valueSchema\n : { anyOf: [valueSchema, { type: [\"null\"] }] },\n ];\n }),\n ),\n required,\n };\n }\n\n if (schema?.type === \"array\") {\n const { items } = schema as { items: Record<string, unknown> };\n\n return {\n ...schema,\n items: this.jsonSchemaToOpenAIJsonSchema(items),\n };\n }\n\n return schema;\n }\n}\n\n// Create role mapper for OpenAI (uses standard mapping)\nconst mapRole = createRoleMapper(STANDARD_ROLE_MAP);\n\n/**\n * @hidden\n */\nexport async function contentsFromInputMessages(\n messages: ChatModelInputMessage[],\n): Promise<ChatCompletionMessageParam[]> {\n return Promise.all(\n messages.map(\n async (i) =>\n ({\n role: mapRole(i.role),\n content:\n typeof i.content === \"string\"\n ? i.content\n : i.content &&\n (\n await Promise.all(\n i.content.map<Promise<ChatCompletionContentPart>>(async (c) => {\n switch (c.type) {\n case \"text\":\n return { type: \"text\", text: c.text };\n case \"url\":\n return { type: \"image_url\", image_url: { url: c.url } };\n case \"file\":\n return {\n type: \"image_url\",\n image_url: {\n url: `data:${c.mimeType || \"image/png\"};base64,${c.data}`,\n },\n };\n case \"local\": {\n throw new Error(\n `Unsupported local file: ${c.path}, it should be converted to base64 at ChatModel`,\n );\n }\n }\n }),\n )\n ).filter(isNonNullable),\n tool_calls: i.toolCalls?.map((i) => ({\n ...i,\n function: {\n ...i.function,\n arguments: JSON.stringify(i.function.arguments),\n },\n })),\n tool_call_id: i.toolCallId,\n name: i.name,\n }) as ChatCompletionMessageParam,\n ),\n );\n}\n\n/**\n * @hidden\n */\nexport function toolsFromInputTools(\n tools?: ChatModelInputTool[],\n options?: { addTypeToEmptyParameters?: boolean },\n): ChatCompletionTool[] | undefined {\n return tools?.length\n ? tools.map((i) => {\n const parameters = i.function.parameters as Record<string, unknown>;\n if (options?.addTypeToEmptyParameters && Object.keys(parameters).length === 0) {\n parameters.type = \"object\";\n }\n return {\n type: \"function\",\n function: {\n name: i.function.name,\n description: i.function.description,\n parameters,\n },\n };\n })\n : undefined;\n}\n\nfunction handleToolCallDelta(\n toolCalls: (NonNullable<ChatModelOutput[\"toolCalls\"]>[number] & {\n args: string;\n })[],\n call: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall & {\n index: number;\n },\n) {\n toolCalls[call.index] ??= {\n id: call.id || v7(),\n type: \"function\" as const,\n function: { name: \"\", arguments: {} },\n args: \"\",\n };\n const c = toolCalls[call.index];\n if (!c) throw new Error(\"Tool call not found\");\n\n if (call.type) c.type = call.type;\n\n c.function.name = c.function.name + (call.function?.name || \"\");\n c.args = c.args.concat(call.function?.arguments || \"\");\n}\n\nfunction handleCompleteToolCall(\n toolCalls: (NonNullable<ChatModelOutput[\"toolCalls\"]>[number] & {\n args: string;\n })[],\n call: OpenAI.Chat.Completions.ChatCompletionChunk.Choice.Delta.ToolCall,\n) {\n toolCalls.push({\n id: call.id || v7(),\n type: \"function\" as const,\n function: {\n name: call.function?.name || \"\",\n arguments: safeParseJSON(call.function?.arguments || \"{}\"),\n },\n args: call.function?.arguments || \"\",\n });\n}\n\n// safeParseJSON is now imported from @aigne/model-base\n"],"mappings":";;;;;;;;;;;AAsCA,MAAM,kCAAkC;AAYxC,MAAM,iCAAuF;CAC3F,WAAW;EAAE,2BAA2B;EAAO,qBAAqB;EAAO;CAC3E,WAAW;EAAE,2BAA2B;EAAO,qBAAqB;EAAO;CAC5E;;;;AA6BD,MAAa,+BAA+B,EAAE,OAAO;CACnD,QAAQ,EAAE,QAAQ,CAAC,UAAU;CAC7B,SAAS,EAAE,QAAQ,CAAC,UAAU;CAC9B,OAAO,EAAE,QAAQ,CAAC,UAAU;CAC5B,cAAc,EACX,OAAO;EACN,OAAO,EAAE,QAAQ,CAAC,UAAU;EAC5B,aAAa,EAAE,QAAQ,CAAC,UAAU;EAClC,MAAM,EAAE,QAAQ,CAAC,UAAU;EAC3B,kBAAkB,EAAE,QAAQ,CAAC,UAAU;EACvC,iBAAiB,EAAE,QAAQ,CAAC,UAAU;EACtC,mBAAmB,EAAE,SAAS,CAAC,UAAU,CAAC,QAAQ,KAAK;EACxD,CAAC,CACD,UAAU;CACd,CAAC;;;;;;;;;;;;;;;;;;;;AAqBF,IAAa,kBAAb,cAAqC,UAAU;CAC7C,YAAY,AAAgB,SAAkC;AAC5D,SAAO;EADmB;AAE1B,MAAI,QAAS,gBAAe,KAAK,MAAM,8BAA8B,QAAQ;EAE7E,MAAM,SAAS,SAAS,QAAQ,+BAA+B,QAAQ,SAAS;AAChF,SAAO,OAAO,MAAM,OAAO;;;;;CAM7B,AAAU;CAEV,AAAU,gBAAgB;CAC1B,AAAU;CACV,AAAU,kCAAkC;CAC5C,AAAU,iCAAiC;CAC3C,AAAmB,4BAA4B;CAC/C,AAAU,+BAA+B;CACzC,AAAU,wBAAwB;CAClC,AAAU,sBAAsB;CAEhC,IAAI,SAAS;EACX,MAAM,EAAE,QAAQ,QAAQ,KAAK;AAC7B,MAAI,CAAC,OACH,OAAM,IAAI,MACR,GAAG,KAAK,KAAK,+EAA+E,KAAK,cAAc,yBAChH;AAEH,OAAK,YAAY,IAAI,aAAa;GAChC,SAAS;GACT;GACA,GAAG,KAAK,SAAS;GAClB,CAAC;AACF,SAAO,KAAK;;CAGd,IAAa,aAAa;AACxB,SAAO;GACL,KAAK,KAAK,SAAS,WAAW,QAAQ,IAAI;GAC1C,QAAQ,KAAK,SAAS,UAAU,QAAQ,IAAI,KAAK,kBAAkB,KAAK;GACxE,OAAO,KAAK,SAAS,SAAS;GAC/B;;;;;;;CAQH,AAAS,QACP,OACA,SACqD;AACrD,SAAO,KAAK,SAAS,OAAO,QAAQ;;CAGtC,AAAQ,mBACN,QAC2D;AAC3D,MAAI,OAAO,WAAW,UAAU;AAC9B,OAAI,SAAS,IAAM,QAAO;AAC1B,OAAI,SAAS,IAAM,QAAO;AAC1B,OAAI,SAAS,IAAK,QAAO;AACzB,OAAI,SAAS,EAAG,QAAO;AACvB;;AAGF,SAAO;;CAGT,MAAc,SACZ,OACA,UACyC;EACzC,MAAM,EAAE,eAAe,EAAE,KAAK;EAE9B,MAAM,WAAW,MAAM,KAAK,eAAe,MAAM;EACjD,MAAM,QAAQ,cAAc,SAAS,KAAK,WAAW;EAErD,MAAM,OAA+C;GACnD;GACA,aAAa,KAAK,sBAAsB,aAAa,cAAc;GACnE,OAAO,aAAa;GACpB,mBAAmB,aAAa;GAChC,kBAAkB,aAAa;GAC/B;GACA,gBAAgB,EAAE,eAAe,MAAM;GACvC,QAAQ;GACR,kBAAkB,KAAK,mBAAmB,aAAa,gBAAgB;GACxE;AAED,MAAI,MAAM,SAAS,QAAQ,IAAI,MAAM,SAAS,MAAM,EAAE;AACpD,UAAO,KAAK;AACZ,UAAO,KAAK;;AAKd,MAAI,CAAC,MAAM,OAAO,UAAU,MAAM,gBAAgB,SAAS,cACzD,QAAO,MAAM,KAAK,wBAAwB,MAAM,MAAM,eAAe;EAGvE,MAAM,EAAE,UAAU,mBAAmB,MAAM,KAAK,qBAAqB,MAAM;EAC3E,MAAM,SAAU,MAAM,KAAK,OAAO,KAAK,YAAY,OAAO;GACxD,GAAG;GACH,OAAO,oBAAoB,MAAM,OAAO,EACtC,0BAA0B,CAAC,KAAK,8BACjC,CAAC;GACF,aAAa,MAAM;GACnB,qBAAqB,KAAK,qBAAqB,OAAO,aAAa;GACnE,iBAAiB;GAClB,CAAC;AAEF,MAAI,MAAM,gBAAgB,SAAS,cACjC,QAAO,MAAM,KAAK,wBAAwB,MAAM,QAAQ,OAAO,KAAK;EAGtE,MAAM,SAAS,MAAM,KAAK,wBAAwB,MAAM,QAAQ,SAAS;AAEzE,MAAI,OAAO,WAAW,UAAU,OAAO,KAAM,QAAO;EAIpD,MAAM,OAAO,cAAc,OAAO,QAAQ,GAAG;EAC7C,MAAM,YAAY,KAAK,mBAAmB,MAAM,eAAe,WAAW,QAAQ,MAAM,EACtF,MAAM,MACP,CAAC;AACF,MAAI,UAAU,QACZ,QAAO;GAAE,GAAG;GAAQ,MAAM,UAAU;GAAM,MAAM;GAAW;AAE7D,SAAO,KACL,GAAG,KAAK,KAAK,kFACb,EACE,MAAM,OAAO,MACd,CACF;EAED,MAAM,SAAS,MAAM,KAAK,wBAAwB,MAAM,MAAM,eAAe;AAC7E,SAAO;GAAE,GAAG;GAAQ,OAAO,WAAW,OAAO,OAAO,OAAO,MAAM;GAAE;;CAGrE,AAAQ,qBACN,OACA,cACqB;AACrB,MAAI,CAAC,KAAK,0BAA2B,QAAO;AAC5C,MAAI,CAAC,MAAM,OAAO,OAAQ,QAAO;AACjC,SAAO,aAAa;;CAGtB,MAAgB,eAAe,OAA8D;EAC3F,MAAM,WAAW,MAAM,0BAA0B,MAAM,SAAS;AAEhE,MAAI,MAAM,gBAAgB,SAAS,eACjC;OACE,CAAC,KAAK,mCACL,CAAC,KAAK,kCAAkC,MAAM,OAAO,OAEtD,UAAS,QAAQ;IACf,MAAM;IACN,SAAS,oBAAoB,MAAM,eAAe,WAAW,OAAO;IACrE,CAAC;;AAGN,SAAO;;CAGT,MAAc,qBAAqB,OAGhC;AACD,MAAI,CAAC,KAAK,kCAAkC,MAAM,OAAO,OACvD,QAAO;GAAE,UAAU;GAAO,gBAAgB;GAAW;AAEvD,MAAI,CAAC,KAAK,iCAAiC;GACzC,MAAM,WAAW,MAAM,gBAAgB,SAAS;AAChD,UAAO;IACL;IACA,gBAAgB,WAAW,EAAE,MAAM,eAAe,GAAG;IACtD;;AAGH,MAAI,MAAM,gBAAgB,SAAS,cACjC,QAAO;GACL,UAAU;GACV,gBAAgB;IACd,MAAM;IACN,aAAa;KACX,GAAG,MAAM,eAAe;KACxB,QAAQ,KAAK,6BAA6B,MAAM,eAAe,WAAW,OAAO;KAClF;IACF;GACF;AAGH,SAAO;GAAE,UAAU;GAAO,gBAAgB;GAAW;;CAGvD,MAAc,wBACZ,MACA,gBAC0B;AAC1B,MAAI,gBAAgB,SAAS,cAC3B,OAAM,IAAI,MAAM,uCAAuC;EAGzD,MAAM,EAAE,UAAU,gBAAgB,2BAA2B,MAAM,KAAK,qBAAqB,EAC3F,gBACD,CAAC;EACF,MAAM,MAAO,MAAM,KAAK,OAAO,KAAK,YAAY,OAAO;GACrD,GAAG;GACH,iBAAiB;GAClB,CAAC;AAEF,SAAO,KAAK,wBAAwB,MAAM,KAAK,SAAS;;CAe1D,MAAc,wBACZ,MACA,QACA,UACA,WACgF;EAChF,MAAM,SAAS,IAAI,eAAoD,EACrE,OAAO,OAAO,eAAe;AAC3B,OAAI;AACF,eAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,cAAc,EACZ,iBAAiB,KAAK,kBACvB,EACF,EACF,EACF,CAAC;IAEF,IAAI,OAAO;IACX,IAAI,UAAU;IACd,MAAM,YAEC,EAAE;IACT,IAAI;AAEJ,eAAW,MAAM,SAAS,QAAQ;KAEhC,MAAM,SADS,MAAM,UAAU,KACT;AAEtB,SAAI,CAAC,OAAO;AACV,cAAQ,MAAM;AACd,iBAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,OACD,EACF,EACF,CAAC;;AAGJ,SAAI,OAAO,YAAY,OACrB,MAAK,MAAM,QAAQ,MAAM,WACvB,KAAI,KAAK,yBAAyB,KAAK,UAAU,OAC/C,qBAAoB,WAAW,KAAK;SAEpC,wBAAuB,WAAW,KAAK;AAK7C,SAAI,SAAS,eAAe,SAAS,OAAO,MAAM,cAAc,SAC9D,YAAW,QAAQ,EAAE,OAAO,EAAE,MAAM,EAAE,UAAU,MAAM,WAAW,EAAE,EAAE,CAAC;AAGxE,SAAI,OAAO,SAAS;AAClB,cAAQ,MAAM;AACd,UAAI,CAAC,SACH,YAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,MAAM,MAAM,SACb,EACF,EACF,CAAC;;AAIN,SAAI,OAAO,QACT,YAAW,MAAM;AAGnB,SAAI,MAAM,OAAO;MACf,MAAM,QAA8B;OAClC,aAAa,MAAM,MAAM;OACzB,cAAc,MAAM,MAAM;OAC3B;MAGD,MAAM,eAAe,MAAM,MAAM;AACjC,UAAI,cAAc,cAChB,OAAM,uBAAuB,aAAa;AAG5C,iBAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,OACD,EACF,EACF,CAAC;;;AAIN,QAAI,YAAY,KACd,YAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,MAAM,cAAc,KAAK,EAC1B,EACF,EACF,CAAC;AAGJ,QAAI,UAAU,OACZ,YAAW,QAAQ,EACjB,OAAO,EACL,MAAM,EACJ,WAAW,UAAU,KAAK,EAAE,MAAM,GAAG,SAAS;KAC5C,GAAG;KACH,UAAU;MAAE,GAAG,EAAE;MAAU,WAAW,OAAO,cAAc,KAAK,GAAG,EAAE;MAAE;KACxE,EAAE,EACJ,EACF,EACF,CAAC;AAGJ,QAAI,QACF,YAAW,sBAAM,IAAI,MAAM,yBAAyB,UAAU,CAAC;AAGjE,eAAW,OAAO;YACX,OAAO;AACd,eAAW,MAAM,MAAM;;KAG5B,CAAC;AAEF,SAAO,YAAY,SAAS,MAAM,4BAA4B,OAAO;;;;;;;CAQvE,AAAU,oBAA2C;CAErD,AAAU,6BAA6B,QAA0D;AAC/F,MAAI,QAAQ,SAAS,UAAU;GAC7B,MAAM,IAAI;GAKV,MAAM,WAAW,KAAK,sBAAsB,UAAU,OAAO,KAAK,EAAE,WAAW,GAAG,EAAE;AAEpF,UAAO;IACL,GAAG;IACH,YAAY,OAAO,YACjB,OAAO,QAAQ,EAAE,WAAW,CAAC,KAAK,CAAC,KAAK,WAAW;KACjD,MAAM,cAAc,KAAK,6BAA6B,MAAiC;AAGvF,YAAO,CACL,KACA,KAAK,sBAAsB,cAAc,EAAE,UAAU,SAAS,IAAI,GAC9D,cACA,EAAE,OAAO,CAAC,aAAa,EAAE,MAAM,CAAC,OAAO,EAAE,CAAC,EAAE,CACjD;MACD,CACH;IACD;IACD;;AAGH,MAAI,QAAQ,SAAS,SAAS;GAC5B,MAAM,EAAE,UAAU;AAElB,UAAO;IACL,GAAG;IACH,OAAO,KAAK,6BAA6B,MAAM;IAChD;;AAGH,SAAO;;;AAKX,MAAM,UAAU,iBAAiB,kBAAkB;;;;AAKnD,eAAsB,0BACpB,UACuC;AACvC,QAAO,QAAQ,IACb,SAAS,IACP,OAAO,OACJ;EACC,MAAM,QAAQ,EAAE,KAAK;EACrB,SACE,OAAO,EAAE,YAAY,WACjB,EAAE,UACF,EAAE,YAEA,MAAM,QAAQ,IACZ,EAAE,QAAQ,IAAwC,OAAO,MAAM;AAC7D,WAAQ,EAAE,MAAV;IACE,KAAK,OACH,QAAO;KAAE,MAAM;KAAQ,MAAM,EAAE;KAAM;IACvC,KAAK,MACH,QAAO;KAAE,MAAM;KAAa,WAAW,EAAE,KAAK,EAAE,KAAK;KAAE;IACzD,KAAK,OACH,QAAO;KACL,MAAM;KACN,WAAW,EACT,KAAK,QAAQ,EAAE,YAAY,YAAY,UAAU,EAAE,QACpD;KACF;IACH,KAAK,QACH,OAAM,IAAI,MACR,2BAA2B,EAAE,KAAK,iDACnC;;IAGL,CACH,EACD,OAAO,cAAc;EAC7B,YAAY,EAAE,WAAW,KAAK,SAAO;GACnC,GAAGA;GACH,UAAU;IACR,GAAGA,IAAE;IACL,WAAW,KAAK,UAAUA,IAAE,SAAS,UAAU;IAChD;GACF,EAAE;EACH,cAAc,EAAE;EAChB,MAAM,EAAE;EACT,EACJ,CACF;;;;;AAMH,SAAgB,oBACd,OACA,SACkC;AAClC,QAAO,OAAO,SACV,MAAM,KAAK,MAAM;EACf,MAAM,aAAa,EAAE,SAAS;AAC9B,MAAI,SAAS,4BAA4B,OAAO,KAAK,WAAW,CAAC,WAAW,EAC1E,YAAW,OAAO;AAEpB,SAAO;GACL,MAAM;GACN,UAAU;IACR,MAAM,EAAE,SAAS;IACjB,aAAa,EAAE,SAAS;IACxB;IACD;GACF;GACD,GACF;;AAGN,SAAS,oBACP,WAGA,MAGA;AACA,WAAU,KAAK,WAAW;EACxB,IAAI,KAAK,MAAM,IAAI;EACnB,MAAM;EACN,UAAU;GAAE,MAAM;GAAI,WAAW,EAAE;GAAE;EACrC,MAAM;EACP;CACD,MAAM,IAAI,UAAU,KAAK;AACzB,KAAI,CAAC,EAAG,OAAM,IAAI,MAAM,sBAAsB;AAE9C,KAAI,KAAK,KAAM,GAAE,OAAO,KAAK;AAE7B,GAAE,SAAS,OAAO,EAAE,SAAS,QAAQ,KAAK,UAAU,QAAQ;AAC5D,GAAE,OAAO,EAAE,KAAK,OAAO,KAAK,UAAU,aAAa,GAAG;;AAGxD,SAAS,uBACP,WAGA,MACA;AACA,WAAU,KAAK;EACb,IAAI,KAAK,MAAM,IAAI;EACnB,MAAM;EACN,UAAU;GACR,MAAM,KAAK,UAAU,QAAQ;GAC7B,WAAW,cAAc,KAAK,UAAU,aAAa,KAAK;GAC3D;EACD,MAAM,KAAK,UAAU,aAAa;EACnC,CAAC"}
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
|
|
2
2
|
const require_openai = require('./openai.cjs');
|
|
3
|
-
let
|
|
4
|
-
let
|
|
3
|
+
let _aigne_model_base = require("@aigne/model-base");
|
|
4
|
+
let _aigne_model_base_utils_type_utils = require("@aigne/model-base/utils/type-utils");
|
|
5
5
|
let zod = require("zod");
|
|
6
|
-
let
|
|
6
|
+
let _aigne_model_base_utils_camelize = require("@aigne/model-base/utils/camelize");
|
|
7
7
|
|
|
8
8
|
//#region src/openai-image-model.ts
|
|
9
9
|
const DEFAULT_MODEL = "dall-e-2";
|
|
@@ -35,7 +35,7 @@ const SUPPORTED_PARAMS = {
|
|
|
35
35
|
]
|
|
36
36
|
};
|
|
37
37
|
const SUPPORT_EDIT_MODELS = ["gpt-image-1"];
|
|
38
|
-
const openAIImageModelInputSchema =
|
|
38
|
+
const openAIImageModelInputSchema = _aigne_model_base.imageModelInputSchema.extend({});
|
|
39
39
|
const openAIImageModelOptionsSchema = zod.z.object({
|
|
40
40
|
apiKey: zod.z.string().optional(),
|
|
41
41
|
baseURL: zod.z.string().optional(),
|
|
@@ -43,7 +43,7 @@ const openAIImageModelOptionsSchema = zod.z.object({
|
|
|
43
43
|
modelOptions: zod.z.object({}).optional(),
|
|
44
44
|
clientOptions: zod.z.object({}).optional()
|
|
45
45
|
});
|
|
46
|
-
var OpenAIImageModel = class extends
|
|
46
|
+
var OpenAIImageModel = class extends _aigne_model_base.ImageModel {
|
|
47
47
|
constructor(options) {
|
|
48
48
|
super({
|
|
49
49
|
...options,
|
|
@@ -51,7 +51,7 @@ var OpenAIImageModel = class extends _aigne_core.ImageModel {
|
|
|
51
51
|
description: options?.description ?? "Draw or edit image by OpenAI image models"
|
|
52
52
|
});
|
|
53
53
|
this.options = options;
|
|
54
|
-
if (options) (0,
|
|
54
|
+
if (options) (0, _aigne_model_base_utils_type_utils.checkArguments)(this.name, openAIImageModelOptionsSchema, options);
|
|
55
55
|
}
|
|
56
56
|
_client;
|
|
57
57
|
apiKeyEnvName = "OPENAI_API_KEY";
|
|
@@ -85,7 +85,7 @@ var OpenAIImageModel = class extends _aigne_core.ImageModel {
|
|
|
85
85
|
const model = input.modelOptions?.model || this.credential.model;
|
|
86
86
|
if (input.image?.length && !SUPPORT_EDIT_MODELS.includes(model)) throw new Error(`Model ${model} does not support image editing`);
|
|
87
87
|
const body = {
|
|
88
|
-
...(0,
|
|
88
|
+
...(0, _aigne_model_base_utils_camelize.snakelize)((0, _aigne_model_base_utils_type_utils.pick)({
|
|
89
89
|
...this.modelOptions,
|
|
90
90
|
...input.modelOptions,
|
|
91
91
|
...input
|
|
@@ -1,36 +1,36 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { ImageModel, ImageModelInput, ImageModelOptions, ImageModelOutput, ModelInvokeOptions } from "@aigne/model-base";
|
|
2
2
|
import OpenAI, { ClientOptions } from "openai";
|
|
3
|
-
import { Camelize } from "@aigne/
|
|
3
|
+
import { Camelize } from "@aigne/model-base/utils/camelize";
|
|
4
4
|
|
|
5
5
|
//#region src/openai-image-model.d.ts
|
|
6
6
|
interface OpenAIImageModelInput extends ImageModelInput, Camelize<Omit<OpenAI.ImageGenerateParams | OpenAI.ImageEditParams, "prompt" | "model" | "n" | "response_format">> {}
|
|
7
7
|
interface OpenAIImageModelOutput extends ImageModelOutput {}
|
|
8
8
|
interface OpenAIImageModelOptions extends ImageModelOptions<OpenAIImageModelInput, OpenAIImageModelOutput> {
|
|
9
9
|
/**
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
10
|
+
* API key for OpenAI API
|
|
11
|
+
*
|
|
12
|
+
* If not provided, will look for OPENAI_API_KEY in environment variables
|
|
13
|
+
*/
|
|
14
14
|
apiKey?: string;
|
|
15
15
|
/**
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
16
|
+
* Base URL for OpenAI API
|
|
17
|
+
*
|
|
18
|
+
* Useful for proxies or alternate endpoints
|
|
19
|
+
*/
|
|
20
20
|
baseURL?: string;
|
|
21
21
|
/**
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
22
|
+
* OpenAI model to use
|
|
23
|
+
*
|
|
24
|
+
* Defaults to 'dall-e-2'
|
|
25
|
+
*/
|
|
26
26
|
model?: string;
|
|
27
27
|
/**
|
|
28
|
-
|
|
29
|
-
|
|
28
|
+
* Additional model options to control behavior
|
|
29
|
+
*/
|
|
30
30
|
modelOptions?: Omit<Partial<OpenAIImageModelInput>, "model">;
|
|
31
31
|
/**
|
|
32
|
-
|
|
33
|
-
|
|
32
|
+
* Client options for OpenAI API
|
|
33
|
+
*/
|
|
34
34
|
clientOptions?: Partial<ClientOptions>;
|
|
35
35
|
}
|
|
36
36
|
declare class OpenAIImageModel extends ImageModel<OpenAIImageModelInput, OpenAIImageModelOutput> {
|
|
@@ -46,11 +46,11 @@ declare class OpenAIImageModel extends ImageModel<OpenAIImageModelInput, OpenAII
|
|
|
46
46
|
};
|
|
47
47
|
get modelOptions(): Omit<Partial<OpenAIImageModelInput>, "model"> | undefined;
|
|
48
48
|
/**
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
process(input: OpenAIImageModelInput, _options:
|
|
49
|
+
* Process the input and generate a response
|
|
50
|
+
* @param input The input to process
|
|
51
|
+
* @returns The generated response
|
|
52
|
+
*/
|
|
53
|
+
process(input: OpenAIImageModelInput, _options: ModelInvokeOptions): Promise<OpenAIImageModelOutput>;
|
|
54
54
|
}
|
|
55
55
|
//#endregion
|
|
56
56
|
export { OpenAIImageModel, OpenAIImageModelInput, OpenAIImageModelOptions, OpenAIImageModelOutput };
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"openai-image-model.d.cts","names":[],"sources":["../src/openai-image-model.ts"],"mappings":";;;;;UAqCiB,qBAAA,SACP,eAAA,EACN,QAAA,CACE,IAAA,CACE,MAAA,CAAO,mBAAA,GAAsB,MAAA,CAAO,eAAA;AAAA,UAK3B,sBAAA,SAA+B,gBAAA;AAAA,UAE/B,uBAAA,SACP,iBAAA,CAAkB,qBAAA,EAAuB,sBAAA;
|
|
1
|
+
{"version":3,"file":"openai-image-model.d.cts","names":[],"sources":["../src/openai-image-model.ts"],"mappings":";;;;;UAqCiB,qBAAA,SACP,eAAA,EACN,QAAA,CACE,IAAA,CACE,MAAA,CAAO,mBAAA,GAAsB,MAAA,CAAO,eAAA;AAAA,UAK3B,sBAAA,SAA+B,gBAAA;AAAA,UAE/B,uBAAA,SACP,iBAAA,CAAkB,qBAAA,EAAuB,sBAAA;EARd;;;;;EAcnC,MAAA;EAjBQ;;;;;EAwBR,OAAA;EArB0C;;;AAK5C;;EAuBE,KAAA;EAvB8C;;AAEhD;EA0BE,YAAA,GAAe,IAAA,CAAK,OAAA,CAAQ,qBAAA;;;;EAK5B,aAAA,GAAgB,OAAA,CAAQ,aAAA;AAAA;AAAA,cAeb,gBAAA,SAAyB,UAAA,CAAW,qBAAA,EAAuB,sBAAA;EAC1C,OAAA,GAAU,uBAAA;cAAV,OAAA,GAAU,uBAAA;EAAA,UAS5B,OAAA,GAAU,MAAA;EAAA,UAEV,aAAA;EAAA,IAEN,MAAA,CAAA,GAAM,MAAA;EAAA,IAmBG,UAAA,CAAA;;;;;MAQT,YAAA,CAAA,GAAY,IAAA,CAAA,OAAA,CAAA,qBAAA;EA7DhB;;;;;EAsEe,OAAA,CACb,KAAA,EAAO,qBAAA,EACP,QAAA,EAAU,kBAAA,GACT,OAAA,CAAQ,sBAAA;AAAA"}
|