@aigne/openai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +17 -0
- package/LICENSE.md +93 -0
- package/README.md +112 -0
- package/README.zh.md +112 -0
- package/lib/cjs/index.d.ts +1 -0
- package/lib/cjs/index.js +17 -0
- package/lib/cjs/openai-chat-model.d.ts +165 -0
- package/lib/cjs/openai-chat-model.js +415 -0
- package/lib/cjs/package.json +1 -0
- package/lib/dts/index.d.ts +1 -0
- package/lib/dts/openai-chat-model.d.ts +165 -0
- package/lib/esm/index.d.ts +1 -0
- package/lib/esm/index.js +1 -0
- package/lib/esm/openai-chat-model.d.ts +165 -0
- package/lib/esm/openai-chat-model.js +405 -0
- package/lib/esm/package.json +1 -0
- package/package.json +56 -0
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
import { type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput, type Role } from "@aigne/core";
|
|
2
|
+
import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
|
|
3
|
+
import OpenAI from "openai";
|
|
4
|
+
import type { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
|
|
5
|
+
import { z } from "zod";
|
|
6
|
+
export interface OpenAIChatModelCapabilities {
|
|
7
|
+
supportsNativeStructuredOutputs: boolean;
|
|
8
|
+
supportsEndWithSystemMessage: boolean;
|
|
9
|
+
supportsToolsUseWithJsonSchema: boolean;
|
|
10
|
+
supportsParallelToolCalls: boolean;
|
|
11
|
+
supportsToolsEmptyParameters: boolean;
|
|
12
|
+
supportsToolStreaming: boolean;
|
|
13
|
+
supportsTemperature: boolean;
|
|
14
|
+
}
|
|
15
|
+
/**
|
|
16
|
+
* Configuration options for OpenAI Chat Model
|
|
17
|
+
*/
|
|
18
|
+
export interface OpenAIChatModelOptions {
|
|
19
|
+
/**
|
|
20
|
+
* API key for OpenAI API
|
|
21
|
+
*
|
|
22
|
+
* If not provided, will look for OPENAI_API_KEY in environment variables
|
|
23
|
+
*/
|
|
24
|
+
apiKey?: string;
|
|
25
|
+
/**
|
|
26
|
+
* Base URL for OpenAI API
|
|
27
|
+
*
|
|
28
|
+
* Useful for proxies or alternate endpoints
|
|
29
|
+
*/
|
|
30
|
+
baseURL?: string;
|
|
31
|
+
/**
|
|
32
|
+
* OpenAI model to use
|
|
33
|
+
*
|
|
34
|
+
* Defaults to 'gpt-4o-mini'
|
|
35
|
+
*/
|
|
36
|
+
model?: string;
|
|
37
|
+
/**
|
|
38
|
+
* Additional model options to control behavior
|
|
39
|
+
*/
|
|
40
|
+
modelOptions?: ChatModelOptions;
|
|
41
|
+
}
|
|
42
|
+
/**
|
|
43
|
+
* @hidden
|
|
44
|
+
*/
|
|
45
|
+
export declare const openAIChatModelOptionsSchema: z.ZodObject<{
|
|
46
|
+
apiKey: z.ZodOptional<z.ZodString>;
|
|
47
|
+
baseURL: z.ZodOptional<z.ZodString>;
|
|
48
|
+
model: z.ZodOptional<z.ZodString>;
|
|
49
|
+
modelOptions: z.ZodOptional<z.ZodObject<{
|
|
50
|
+
model: z.ZodOptional<z.ZodString>;
|
|
51
|
+
temperature: z.ZodOptional<z.ZodNumber>;
|
|
52
|
+
topP: z.ZodOptional<z.ZodNumber>;
|
|
53
|
+
frequencyPenalty: z.ZodOptional<z.ZodNumber>;
|
|
54
|
+
presencePenalty: z.ZodOptional<z.ZodNumber>;
|
|
55
|
+
parallelToolCalls: z.ZodDefault<z.ZodOptional<z.ZodBoolean>>;
|
|
56
|
+
}, "strip", z.ZodTypeAny, {
|
|
57
|
+
parallelToolCalls: boolean;
|
|
58
|
+
model?: string | undefined;
|
|
59
|
+
temperature?: number | undefined;
|
|
60
|
+
topP?: number | undefined;
|
|
61
|
+
frequencyPenalty?: number | undefined;
|
|
62
|
+
presencePenalty?: number | undefined;
|
|
63
|
+
}, {
|
|
64
|
+
model?: string | undefined;
|
|
65
|
+
temperature?: number | undefined;
|
|
66
|
+
topP?: number | undefined;
|
|
67
|
+
frequencyPenalty?: number | undefined;
|
|
68
|
+
presencePenalty?: number | undefined;
|
|
69
|
+
parallelToolCalls?: boolean | undefined;
|
|
70
|
+
}>>;
|
|
71
|
+
}, "strip", z.ZodTypeAny, {
|
|
72
|
+
apiKey?: string | undefined;
|
|
73
|
+
baseURL?: string | undefined;
|
|
74
|
+
model?: string | undefined;
|
|
75
|
+
modelOptions?: {
|
|
76
|
+
parallelToolCalls: boolean;
|
|
77
|
+
model?: string | undefined;
|
|
78
|
+
temperature?: number | undefined;
|
|
79
|
+
topP?: number | undefined;
|
|
80
|
+
frequencyPenalty?: number | undefined;
|
|
81
|
+
presencePenalty?: number | undefined;
|
|
82
|
+
} | undefined;
|
|
83
|
+
}, {
|
|
84
|
+
apiKey?: string | undefined;
|
|
85
|
+
baseURL?: string | undefined;
|
|
86
|
+
model?: string | undefined;
|
|
87
|
+
modelOptions?: {
|
|
88
|
+
model?: string | undefined;
|
|
89
|
+
temperature?: number | undefined;
|
|
90
|
+
topP?: number | undefined;
|
|
91
|
+
frequencyPenalty?: number | undefined;
|
|
92
|
+
presencePenalty?: number | undefined;
|
|
93
|
+
parallelToolCalls?: boolean | undefined;
|
|
94
|
+
} | undefined;
|
|
95
|
+
}>;
|
|
96
|
+
/**
|
|
97
|
+
* Implementation of the ChatModel interface for OpenAI's API
|
|
98
|
+
*
|
|
99
|
+
* This model provides access to OpenAI's capabilities including:
|
|
100
|
+
* - Text generation
|
|
101
|
+
* - Tool use with parallel tool calls
|
|
102
|
+
* - JSON structured output
|
|
103
|
+
* - Image understanding
|
|
104
|
+
*
|
|
105
|
+
* Default model: 'gpt-4o-mini'
|
|
106
|
+
*
|
|
107
|
+
* @example
|
|
108
|
+
* Here's how to create and use an OpenAI chat model:
|
|
109
|
+
* {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model}
|
|
110
|
+
*
|
|
111
|
+
* @example
|
|
112
|
+
* Here's an example with streaming response:
|
|
113
|
+
* {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model-streaming}
|
|
114
|
+
*/
|
|
115
|
+
export declare class OpenAIChatModel extends ChatModel {
|
|
116
|
+
options?: OpenAIChatModelOptions | undefined;
|
|
117
|
+
constructor(options?: OpenAIChatModelOptions | undefined);
|
|
118
|
+
/**
|
|
119
|
+
* @hidden
|
|
120
|
+
*/
|
|
121
|
+
protected _client?: OpenAI;
|
|
122
|
+
protected apiKeyEnvName: string;
|
|
123
|
+
protected apiKeyDefault: string | undefined;
|
|
124
|
+
protected supportsNativeStructuredOutputs: boolean;
|
|
125
|
+
protected supportsEndWithSystemMessage: boolean;
|
|
126
|
+
protected supportsToolsUseWithJsonSchema: boolean;
|
|
127
|
+
protected supportsParallelToolCalls: boolean;
|
|
128
|
+
protected supportsToolsEmptyParameters: boolean;
|
|
129
|
+
protected supportsToolStreaming: boolean;
|
|
130
|
+
protected supportsTemperature: boolean;
|
|
131
|
+
get client(): OpenAI;
|
|
132
|
+
get modelOptions(): ChatModelOptions | undefined;
|
|
133
|
+
/**
|
|
134
|
+
* Process the input and generate a response
|
|
135
|
+
* @param input The input to process
|
|
136
|
+
* @returns The generated response
|
|
137
|
+
*/
|
|
138
|
+
process(input: ChatModelInput): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
|
|
139
|
+
private _process;
|
|
140
|
+
private getParallelToolCalls;
|
|
141
|
+
private getRunMessages;
|
|
142
|
+
private getRunResponseFormat;
|
|
143
|
+
private requestStructuredOutput;
|
|
144
|
+
private extractResultFromStream;
|
|
145
|
+
}
|
|
146
|
+
/**
|
|
147
|
+
* @hidden
|
|
148
|
+
*/
|
|
149
|
+
export declare const ROLE_MAP: {
|
|
150
|
+
[key in Role]: ChatCompletionMessageParam["role"];
|
|
151
|
+
};
|
|
152
|
+
/**
|
|
153
|
+
* @hidden
|
|
154
|
+
*/
|
|
155
|
+
export declare function contentsFromInputMessages(messages: ChatModelInputMessage[]): Promise<ChatCompletionMessageParam[]>;
|
|
156
|
+
/**
|
|
157
|
+
* @hidden
|
|
158
|
+
*/
|
|
159
|
+
export declare function toolsFromInputTools(tools?: ChatModelInputTool[], options?: {
|
|
160
|
+
addTypeToEmptyParameters?: boolean;
|
|
161
|
+
}): ChatCompletionTool[] | undefined;
|
|
162
|
+
/**
|
|
163
|
+
* @hidden
|
|
164
|
+
*/
|
|
165
|
+
export declare function jsonSchemaToOpenAIJsonSchema(schema: Record<string, unknown>): Record<string, unknown>;
|
|
@@ -0,0 +1,405 @@
|
|
|
1
|
+
import { ChatModel, } from "@aigne/core";
|
|
2
|
+
import { parseJSON } from "@aigne/core/utils/json-schema.js";
|
|
3
|
+
import { mergeUsage } from "@aigne/core/utils/model-utils.js";
|
|
4
|
+
import { getJsonOutputPrompt } from "@aigne/core/utils/prompts.js";
|
|
5
|
+
import { agentResponseStreamToObject } from "@aigne/core/utils/stream-utils.js";
|
|
6
|
+
import { checkArguments, isNonNullable, } from "@aigne/core/utils/type-utils.js";
|
|
7
|
+
import { nanoid } from "nanoid";
|
|
8
|
+
import OpenAI from "openai";
|
|
9
|
+
import { z } from "zod";
|
|
10
|
+
const CHAT_MODEL_OPENAI_DEFAULT_MODEL = "gpt-4o-mini";
|
|
11
|
+
const OPENAI_CHAT_MODEL_CAPABILITIES = {
|
|
12
|
+
"o4-mini": { supportsParallelToolCalls: false, supportsTemperature: false },
|
|
13
|
+
"o3-mini": { supportsParallelToolCalls: false, supportsTemperature: false },
|
|
14
|
+
};
|
|
15
|
+
/**
|
|
16
|
+
* @hidden
|
|
17
|
+
*/
|
|
18
|
+
export const openAIChatModelOptionsSchema = z.object({
|
|
19
|
+
apiKey: z.string().optional(),
|
|
20
|
+
baseURL: z.string().optional(),
|
|
21
|
+
model: z.string().optional(),
|
|
22
|
+
modelOptions: z
|
|
23
|
+
.object({
|
|
24
|
+
model: z.string().optional(),
|
|
25
|
+
temperature: z.number().optional(),
|
|
26
|
+
topP: z.number().optional(),
|
|
27
|
+
frequencyPenalty: z.number().optional(),
|
|
28
|
+
presencePenalty: z.number().optional(),
|
|
29
|
+
parallelToolCalls: z.boolean().optional().default(true),
|
|
30
|
+
})
|
|
31
|
+
.optional(),
|
|
32
|
+
});
|
|
33
|
+
/**
|
|
34
|
+
* Implementation of the ChatModel interface for OpenAI's API
|
|
35
|
+
*
|
|
36
|
+
* This model provides access to OpenAI's capabilities including:
|
|
37
|
+
* - Text generation
|
|
38
|
+
* - Tool use with parallel tool calls
|
|
39
|
+
* - JSON structured output
|
|
40
|
+
* - Image understanding
|
|
41
|
+
*
|
|
42
|
+
* Default model: 'gpt-4o-mini'
|
|
43
|
+
*
|
|
44
|
+
* @example
|
|
45
|
+
* Here's how to create and use an OpenAI chat model:
|
|
46
|
+
* {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model}
|
|
47
|
+
*
|
|
48
|
+
* @example
|
|
49
|
+
* Here's an example with streaming response:
|
|
50
|
+
* {@includeCode ../test/openai-chat-model.test.ts#example-openai-chat-model-streaming}
|
|
51
|
+
*/
|
|
52
|
+
export class OpenAIChatModel extends ChatModel {
|
|
53
|
+
options;
|
|
54
|
+
constructor(options) {
|
|
55
|
+
super();
|
|
56
|
+
this.options = options;
|
|
57
|
+
if (options)
|
|
58
|
+
checkArguments(this.name, openAIChatModelOptionsSchema, options);
|
|
59
|
+
const preset = options?.model ? OPENAI_CHAT_MODEL_CAPABILITIES[options.model] : undefined;
|
|
60
|
+
Object.assign(this, preset);
|
|
61
|
+
}
|
|
62
|
+
/**
|
|
63
|
+
* @hidden
|
|
64
|
+
*/
|
|
65
|
+
_client;
|
|
66
|
+
apiKeyEnvName = "OPENAI_API_KEY";
|
|
67
|
+
apiKeyDefault;
|
|
68
|
+
supportsNativeStructuredOutputs = true;
|
|
69
|
+
supportsEndWithSystemMessage = true;
|
|
70
|
+
supportsToolsUseWithJsonSchema = true;
|
|
71
|
+
supportsParallelToolCalls = true;
|
|
72
|
+
supportsToolsEmptyParameters = true;
|
|
73
|
+
supportsToolStreaming = true;
|
|
74
|
+
supportsTemperature = true;
|
|
75
|
+
get client() {
|
|
76
|
+
const apiKey = this.options?.apiKey || process.env[this.apiKeyEnvName] || this.apiKeyDefault;
|
|
77
|
+
if (!apiKey)
|
|
78
|
+
throw new Error(`Api Key is required for ${this.name}`);
|
|
79
|
+
this._client ??= new OpenAI({
|
|
80
|
+
baseURL: this.options?.baseURL,
|
|
81
|
+
apiKey,
|
|
82
|
+
});
|
|
83
|
+
return this._client;
|
|
84
|
+
}
|
|
85
|
+
get modelOptions() {
|
|
86
|
+
return this.options?.modelOptions;
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Process the input and generate a response
|
|
90
|
+
* @param input The input to process
|
|
91
|
+
* @returns The generated response
|
|
92
|
+
*/
|
|
93
|
+
process(input) {
|
|
94
|
+
return this._process(input);
|
|
95
|
+
}
|
|
96
|
+
async _process(input) {
|
|
97
|
+
const messages = await this.getRunMessages(input);
|
|
98
|
+
const body = {
|
|
99
|
+
model: this.options?.model || CHAT_MODEL_OPENAI_DEFAULT_MODEL,
|
|
100
|
+
temperature: this.supportsTemperature
|
|
101
|
+
? (input.modelOptions?.temperature ?? this.modelOptions?.temperature)
|
|
102
|
+
: undefined,
|
|
103
|
+
top_p: input.modelOptions?.topP ?? this.modelOptions?.topP,
|
|
104
|
+
frequency_penalty: input.modelOptions?.frequencyPenalty ?? this.modelOptions?.frequencyPenalty,
|
|
105
|
+
presence_penalty: input.modelOptions?.presencePenalty ?? this.modelOptions?.presencePenalty,
|
|
106
|
+
messages,
|
|
107
|
+
stream_options: {
|
|
108
|
+
include_usage: true,
|
|
109
|
+
},
|
|
110
|
+
stream: true,
|
|
111
|
+
};
|
|
112
|
+
const { jsonMode, responseFormat } = await this.getRunResponseFormat(input);
|
|
113
|
+
const stream = await this.client.chat.completions.create({
|
|
114
|
+
...body,
|
|
115
|
+
tools: toolsFromInputTools(input.tools, {
|
|
116
|
+
addTypeToEmptyParameters: !this.supportsToolsEmptyParameters,
|
|
117
|
+
}),
|
|
118
|
+
tool_choice: input.toolChoice,
|
|
119
|
+
parallel_tool_calls: this.getParallelToolCalls(input),
|
|
120
|
+
response_format: responseFormat,
|
|
121
|
+
});
|
|
122
|
+
if (input.responseFormat?.type !== "json_schema") {
|
|
123
|
+
return await this.extractResultFromStream(stream, false, true);
|
|
124
|
+
}
|
|
125
|
+
const result = await this.extractResultFromStream(stream, jsonMode);
|
|
126
|
+
if (!this.supportsToolsUseWithJsonSchema &&
|
|
127
|
+
!result.toolCalls?.length &&
|
|
128
|
+
input.responseFormat?.type === "json_schema" &&
|
|
129
|
+
result.text) {
|
|
130
|
+
const output = await this.requestStructuredOutput(body, input.responseFormat);
|
|
131
|
+
return { ...output, usage: mergeUsage(result.usage, output.usage) };
|
|
132
|
+
}
|
|
133
|
+
return result;
|
|
134
|
+
}
|
|
135
|
+
getParallelToolCalls(input) {
|
|
136
|
+
if (!this.supportsParallelToolCalls)
|
|
137
|
+
return undefined;
|
|
138
|
+
if (!input.tools?.length)
|
|
139
|
+
return undefined;
|
|
140
|
+
return input.modelOptions?.parallelToolCalls ?? this.modelOptions?.parallelToolCalls;
|
|
141
|
+
}
|
|
142
|
+
async getRunMessages(input) {
|
|
143
|
+
const messages = await contentsFromInputMessages(input.messages);
|
|
144
|
+
if (!this.supportsEndWithSystemMessage && messages.at(-1)?.role !== "user") {
|
|
145
|
+
messages.push({ role: "user", content: "" });
|
|
146
|
+
}
|
|
147
|
+
if (!this.supportsToolsUseWithJsonSchema && input.tools?.length)
|
|
148
|
+
return messages;
|
|
149
|
+
if (this.supportsNativeStructuredOutputs)
|
|
150
|
+
return messages;
|
|
151
|
+
if (input.responseFormat?.type === "json_schema") {
|
|
152
|
+
messages.unshift({
|
|
153
|
+
role: "system",
|
|
154
|
+
content: getJsonOutputPrompt(input.responseFormat.jsonSchema.schema),
|
|
155
|
+
});
|
|
156
|
+
}
|
|
157
|
+
return messages;
|
|
158
|
+
}
|
|
159
|
+
async getRunResponseFormat(input) {
|
|
160
|
+
if (!this.supportsToolsUseWithJsonSchema && input.tools?.length)
|
|
161
|
+
return { jsonMode: false, responseFormat: undefined };
|
|
162
|
+
if (!this.supportsNativeStructuredOutputs) {
|
|
163
|
+
const jsonMode = input.responseFormat?.type === "json_schema";
|
|
164
|
+
return { jsonMode, responseFormat: jsonMode ? { type: "json_object" } : undefined };
|
|
165
|
+
}
|
|
166
|
+
if (input.responseFormat?.type === "json_schema") {
|
|
167
|
+
return {
|
|
168
|
+
jsonMode: true,
|
|
169
|
+
responseFormat: {
|
|
170
|
+
type: "json_schema",
|
|
171
|
+
json_schema: {
|
|
172
|
+
...input.responseFormat.jsonSchema,
|
|
173
|
+
schema: jsonSchemaToOpenAIJsonSchema(input.responseFormat.jsonSchema.schema),
|
|
174
|
+
},
|
|
175
|
+
},
|
|
176
|
+
};
|
|
177
|
+
}
|
|
178
|
+
return { jsonMode: false, responseFormat: undefined };
|
|
179
|
+
}
|
|
180
|
+
async requestStructuredOutput(body, responseFormat) {
|
|
181
|
+
if (responseFormat?.type !== "json_schema") {
|
|
182
|
+
throw new Error("Expected json_schema response format");
|
|
183
|
+
}
|
|
184
|
+
const { jsonMode, responseFormat: resolvedResponseFormat } = await this.getRunResponseFormat({
|
|
185
|
+
responseFormat,
|
|
186
|
+
});
|
|
187
|
+
const res = await this.client.chat.completions.create({
|
|
188
|
+
...body,
|
|
189
|
+
response_format: resolvedResponseFormat,
|
|
190
|
+
});
|
|
191
|
+
return this.extractResultFromStream(res, jsonMode);
|
|
192
|
+
}
|
|
193
|
+
async extractResultFromStream(stream, jsonMode, streaming) {
|
|
194
|
+
const result = new ReadableStream({
|
|
195
|
+
start: async (controller) => {
|
|
196
|
+
try {
|
|
197
|
+
let text = "";
|
|
198
|
+
let refusal = "";
|
|
199
|
+
const toolCalls = [];
|
|
200
|
+
let model;
|
|
201
|
+
for await (const chunk of stream) {
|
|
202
|
+
const choice = chunk.choices?.[0];
|
|
203
|
+
if (!model) {
|
|
204
|
+
model = chunk.model;
|
|
205
|
+
controller.enqueue({
|
|
206
|
+
delta: {
|
|
207
|
+
json: {
|
|
208
|
+
model,
|
|
209
|
+
},
|
|
210
|
+
},
|
|
211
|
+
});
|
|
212
|
+
}
|
|
213
|
+
if (choice?.delta.tool_calls?.length) {
|
|
214
|
+
for (const call of choice.delta.tool_calls) {
|
|
215
|
+
if (this.supportsToolStreaming && call.index !== undefined) {
|
|
216
|
+
handleToolCallDelta(toolCalls, call);
|
|
217
|
+
}
|
|
218
|
+
else {
|
|
219
|
+
handleCompleteToolCall(toolCalls, call);
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
if (choice?.delta.content) {
|
|
224
|
+
text += choice.delta.content;
|
|
225
|
+
if (!jsonMode) {
|
|
226
|
+
controller.enqueue({
|
|
227
|
+
delta: {
|
|
228
|
+
text: {
|
|
229
|
+
text: choice.delta.content,
|
|
230
|
+
},
|
|
231
|
+
},
|
|
232
|
+
});
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
if (choice?.delta.refusal) {
|
|
236
|
+
refusal += choice.delta.refusal;
|
|
237
|
+
if (!jsonMode) {
|
|
238
|
+
controller.enqueue({
|
|
239
|
+
delta: {
|
|
240
|
+
text: { text: choice.delta.refusal },
|
|
241
|
+
},
|
|
242
|
+
});
|
|
243
|
+
}
|
|
244
|
+
}
|
|
245
|
+
if (chunk.usage) {
|
|
246
|
+
controller.enqueue({
|
|
247
|
+
delta: {
|
|
248
|
+
json: {
|
|
249
|
+
usage: {
|
|
250
|
+
inputTokens: chunk.usage.prompt_tokens,
|
|
251
|
+
outputTokens: chunk.usage.completion_tokens,
|
|
252
|
+
},
|
|
253
|
+
},
|
|
254
|
+
},
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
text = text || refusal;
|
|
259
|
+
if (jsonMode && text) {
|
|
260
|
+
controller.enqueue({
|
|
261
|
+
delta: {
|
|
262
|
+
json: {
|
|
263
|
+
json: parseJSON(text),
|
|
264
|
+
},
|
|
265
|
+
},
|
|
266
|
+
});
|
|
267
|
+
}
|
|
268
|
+
if (toolCalls.length) {
|
|
269
|
+
controller.enqueue({
|
|
270
|
+
delta: {
|
|
271
|
+
json: {
|
|
272
|
+
toolCalls: toolCalls.map(({ args, ...c }) => ({
|
|
273
|
+
...c,
|
|
274
|
+
function: { ...c.function, arguments: parseJSON(args) },
|
|
275
|
+
})),
|
|
276
|
+
},
|
|
277
|
+
},
|
|
278
|
+
});
|
|
279
|
+
}
|
|
280
|
+
controller.close();
|
|
281
|
+
}
|
|
282
|
+
catch (error) {
|
|
283
|
+
controller.error(error);
|
|
284
|
+
}
|
|
285
|
+
},
|
|
286
|
+
});
|
|
287
|
+
return streaming ? result : await agentResponseStreamToObject(result);
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
/**
|
|
291
|
+
* @hidden
|
|
292
|
+
*/
|
|
293
|
+
export const ROLE_MAP = {
|
|
294
|
+
system: "system",
|
|
295
|
+
user: "user",
|
|
296
|
+
agent: "assistant",
|
|
297
|
+
tool: "tool",
|
|
298
|
+
};
|
|
299
|
+
/**
|
|
300
|
+
* @hidden
|
|
301
|
+
*/
|
|
302
|
+
export async function contentsFromInputMessages(messages) {
|
|
303
|
+
return messages.map((i) => ({
|
|
304
|
+
role: ROLE_MAP[i.role],
|
|
305
|
+
content: typeof i.content === "string"
|
|
306
|
+
? i.content
|
|
307
|
+
: i.content
|
|
308
|
+
?.map((c) => {
|
|
309
|
+
if (c.type === "text") {
|
|
310
|
+
return { type: "text", text: c.text };
|
|
311
|
+
}
|
|
312
|
+
if (c.type === "image_url") {
|
|
313
|
+
return {
|
|
314
|
+
type: "image_url",
|
|
315
|
+
image_url: { url: c.url },
|
|
316
|
+
};
|
|
317
|
+
}
|
|
318
|
+
})
|
|
319
|
+
.filter(isNonNullable),
|
|
320
|
+
tool_calls: i.toolCalls?.map((i) => ({
|
|
321
|
+
...i,
|
|
322
|
+
function: {
|
|
323
|
+
...i.function,
|
|
324
|
+
arguments: JSON.stringify(i.function.arguments),
|
|
325
|
+
},
|
|
326
|
+
})),
|
|
327
|
+
tool_call_id: i.toolCallId,
|
|
328
|
+
name: i.name,
|
|
329
|
+
}));
|
|
330
|
+
}
|
|
331
|
+
/**
|
|
332
|
+
* @hidden
|
|
333
|
+
*/
|
|
334
|
+
export function toolsFromInputTools(tools, options) {
|
|
335
|
+
return tools?.length
|
|
336
|
+
? tools.map((i) => {
|
|
337
|
+
const parameters = i.function.parameters;
|
|
338
|
+
if (options?.addTypeToEmptyParameters && Object.keys(parameters).length === 0) {
|
|
339
|
+
parameters.type = "object";
|
|
340
|
+
}
|
|
341
|
+
return {
|
|
342
|
+
type: "function",
|
|
343
|
+
function: {
|
|
344
|
+
name: i.function.name,
|
|
345
|
+
description: i.function.description,
|
|
346
|
+
parameters,
|
|
347
|
+
},
|
|
348
|
+
};
|
|
349
|
+
})
|
|
350
|
+
: undefined;
|
|
351
|
+
}
|
|
352
|
+
/**
|
|
353
|
+
* @hidden
|
|
354
|
+
*/
|
|
355
|
+
export function jsonSchemaToOpenAIJsonSchema(schema) {
|
|
356
|
+
if (schema?.type === "object") {
|
|
357
|
+
const { required, properties } = schema;
|
|
358
|
+
return {
|
|
359
|
+
...schema,
|
|
360
|
+
properties: Object.fromEntries(Object.entries(properties).map(([key, value]) => {
|
|
361
|
+
const valueSchema = jsonSchemaToOpenAIJsonSchema(value);
|
|
362
|
+
// NOTE: All fields must be required https://platform.openai.com/docs/guides/structured-outputs/all-fields-must-be-required
|
|
363
|
+
return [
|
|
364
|
+
key,
|
|
365
|
+
required?.includes(key) ? valueSchema : { anyOf: [valueSchema, { type: ["null"] }] },
|
|
366
|
+
];
|
|
367
|
+
})),
|
|
368
|
+
required: Object.keys(properties),
|
|
369
|
+
};
|
|
370
|
+
}
|
|
371
|
+
if (schema?.type === "array") {
|
|
372
|
+
const { items } = schema;
|
|
373
|
+
return {
|
|
374
|
+
...schema,
|
|
375
|
+
items: jsonSchemaToOpenAIJsonSchema(items),
|
|
376
|
+
};
|
|
377
|
+
}
|
|
378
|
+
return schema;
|
|
379
|
+
}
|
|
380
|
+
function handleToolCallDelta(toolCalls, call) {
|
|
381
|
+
toolCalls[call.index] ??= {
|
|
382
|
+
id: call.id || nanoid(),
|
|
383
|
+
type: "function",
|
|
384
|
+
function: { name: "", arguments: {} },
|
|
385
|
+
args: "",
|
|
386
|
+
};
|
|
387
|
+
const c = toolCalls[call.index];
|
|
388
|
+
if (!c)
|
|
389
|
+
throw new Error("Tool call not found");
|
|
390
|
+
if (call.type)
|
|
391
|
+
c.type = call.type;
|
|
392
|
+
c.function.name = c.function.name + (call.function?.name || "");
|
|
393
|
+
c.args = c.args.concat(call.function?.arguments || "");
|
|
394
|
+
}
|
|
395
|
+
function handleCompleteToolCall(toolCalls, call) {
|
|
396
|
+
toolCalls.push({
|
|
397
|
+
id: call.id || nanoid(),
|
|
398
|
+
type: "function",
|
|
399
|
+
function: {
|
|
400
|
+
name: call.function?.name || "",
|
|
401
|
+
arguments: parseJSON(call.function?.arguments || "{}"),
|
|
402
|
+
},
|
|
403
|
+
args: call.function?.arguments || "",
|
|
404
|
+
});
|
|
405
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"type": "module"}
|
package/package.json
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@aigne/openai",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "AIGNE OpenAI SDK for integrating with OpenAI's GPT models and API services",
|
|
5
|
+
"publishConfig": {
|
|
6
|
+
"access": "public"
|
|
7
|
+
},
|
|
8
|
+
"author": "Arcblock <blocklet@arcblock.io> https://github.com/blocklet",
|
|
9
|
+
"homepage": "https://github.com/AIGNE-io/aigne-framework",
|
|
10
|
+
"license": "Elastic-2.0",
|
|
11
|
+
"repository": {
|
|
12
|
+
"type": "git",
|
|
13
|
+
"url": "git+https://github.com/AIGNE-io/aigne-framework"
|
|
14
|
+
},
|
|
15
|
+
"files": [
|
|
16
|
+
"lib/cjs",
|
|
17
|
+
"lib/dts",
|
|
18
|
+
"lib/esm",
|
|
19
|
+
"LICENSE",
|
|
20
|
+
"README.md",
|
|
21
|
+
"CHANGELOG.md"
|
|
22
|
+
],
|
|
23
|
+
"type": "module",
|
|
24
|
+
"main": "./lib/cjs/index.js",
|
|
25
|
+
"module": "./lib/esm/index.js",
|
|
26
|
+
"types": "./lib/dts/index.d.ts",
|
|
27
|
+
"exports": {
|
|
28
|
+
".": {
|
|
29
|
+
"import": "./lib/esm/index.js",
|
|
30
|
+
"require": "./lib/cjs/index.js",
|
|
31
|
+
"types": "./lib/dts/index.d.ts"
|
|
32
|
+
}
|
|
33
|
+
},
|
|
34
|
+
"dependencies": {
|
|
35
|
+
"nanoid": "^5.1.5",
|
|
36
|
+
"openai": "^4.87.3",
|
|
37
|
+
"zod": "^3.24.4",
|
|
38
|
+
"@aigne/core": "^1.16.0"
|
|
39
|
+
},
|
|
40
|
+
"devDependencies": {
|
|
41
|
+
"@types/bun": "^1.2.12",
|
|
42
|
+
"@types/node": "^22.15.15",
|
|
43
|
+
"npm-run-all": "^4.1.5",
|
|
44
|
+
"rimraf": "^6.0.1",
|
|
45
|
+
"typescript": "^5.8.3",
|
|
46
|
+
"@aigne/test-utils": "^0.3.0"
|
|
47
|
+
},
|
|
48
|
+
"scripts": {
|
|
49
|
+
"lint": "tsc --noEmit",
|
|
50
|
+
"build": "tsc --build scripts/tsconfig.build.json",
|
|
51
|
+
"clean": "rimraf lib test/coverage",
|
|
52
|
+
"test": "bun test",
|
|
53
|
+
"test:coverage": "bun test --coverage --coverage-reporter=lcov --coverage-reporter=text",
|
|
54
|
+
"postbuild": "echo '{\"type\": \"module\"}' > lib/esm/package.json && echo '{\"type\": \"commonjs\"}' > lib/cjs/package.json"
|
|
55
|
+
}
|
|
56
|
+
}
|