@aigne/openai 0.16.5-beta.2 → 0.16.5-beta.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,37 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## [0.16.5-beta.4](https://github.com/AIGNE-io/aigne-framework/compare/openai-v0.16.5-beta.3...openai-v0.16.5-beta.4) (2025-11-06)
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
### Features
|
|
7
|
+
|
|
8
|
+
* add dynamic model options resolution with getter pattern ([#708](https://github.com/AIGNE-io/aigne-framework/issues/708)) ([5ed5085](https://github.com/AIGNE-io/aigne-framework/commit/5ed5085203763c70194853c56edc13acf56d81c6))
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
### Dependencies
|
|
12
|
+
|
|
13
|
+
* The following workspace dependencies were updated
|
|
14
|
+
* dependencies
|
|
15
|
+
* @aigne/core bumped to 1.66.0-beta.3
|
|
16
|
+
* devDependencies
|
|
17
|
+
* @aigne/test-utils bumped to 0.5.58-beta.4
|
|
18
|
+
|
|
19
|
+
## [0.16.5-beta.3](https://github.com/AIGNE-io/aigne-framework/compare/openai-v0.16.5-beta.2...openai-v0.16.5-beta.3) (2025-11-05)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
### Bug Fixes
|
|
23
|
+
|
|
24
|
+
* add taskTitle to observability traces and fix GPT-5/o1 model parameters ([#700](https://github.com/AIGNE-io/aigne-framework/issues/700)) ([30b513b](https://github.com/AIGNE-io/aigne-framework/commit/30b513b46ab5edb58a37f29e566e311bbb389f44))
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
### Dependencies
|
|
28
|
+
|
|
29
|
+
* The following workspace dependencies were updated
|
|
30
|
+
* dependencies
|
|
31
|
+
* @aigne/core bumped to 1.65.1-beta.3
|
|
32
|
+
* devDependencies
|
|
33
|
+
* @aigne/test-utils bumped to 0.5.58-beta.3
|
|
34
|
+
|
|
3
35
|
## [0.16.5-beta.2](https://github.com/AIGNE-io/aigne-framework/compare/openai-v0.16.5-beta.1...openai-v0.16.5-beta.2) (2025-11-04)
|
|
4
36
|
|
|
5
37
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type
|
|
1
|
+
import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput } from "@aigne/core";
|
|
2
2
|
import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
|
|
3
3
|
import type { ClientOptions, OpenAI } from "openai";
|
|
4
4
|
import type { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
|
|
@@ -127,13 +127,12 @@ export declare class OpenAIChatModel extends ChatModel {
|
|
|
127
127
|
apiKey: string | undefined;
|
|
128
128
|
model: string;
|
|
129
129
|
};
|
|
130
|
-
get modelOptions(): Omit<ChatModelInputOptions, "model"> | undefined;
|
|
131
130
|
/**
|
|
132
131
|
* Process the input and generate a response
|
|
133
132
|
* @param input The input to process
|
|
134
133
|
* @returns The generated response
|
|
135
134
|
*/
|
|
136
|
-
process(input: ChatModelInput,
|
|
135
|
+
process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
|
|
137
136
|
private getReasoningEffort;
|
|
138
137
|
private _process;
|
|
139
138
|
private getParallelToolCalls;
|
|
@@ -94,16 +94,13 @@ class OpenAIChatModel extends core_1.ChatModel {
|
|
|
94
94
|
model: this.options?.model || CHAT_MODEL_OPENAI_DEFAULT_MODEL,
|
|
95
95
|
};
|
|
96
96
|
}
|
|
97
|
-
get modelOptions() {
|
|
98
|
-
return this.options?.modelOptions;
|
|
99
|
-
}
|
|
100
97
|
/**
|
|
101
98
|
* Process the input and generate a response
|
|
102
99
|
* @param input The input to process
|
|
103
100
|
* @returns The generated response
|
|
104
101
|
*/
|
|
105
|
-
process(input,
|
|
106
|
-
return this._process(input);
|
|
102
|
+
process(input, options) {
|
|
103
|
+
return this._process(input, options);
|
|
107
104
|
}
|
|
108
105
|
getReasoningEffort(effort) {
|
|
109
106
|
if (typeof effort === "number") {
|
|
@@ -119,24 +116,25 @@ class OpenAIChatModel extends core_1.ChatModel {
|
|
|
119
116
|
}
|
|
120
117
|
return effort;
|
|
121
118
|
}
|
|
122
|
-
async _process(input) {
|
|
119
|
+
async _process(input, options) {
|
|
120
|
+
const modelOptions = await this.getModelOptions(input, options);
|
|
123
121
|
const messages = await this.getRunMessages(input);
|
|
124
|
-
const model =
|
|
122
|
+
const model = modelOptions?.model || this.credential.model;
|
|
125
123
|
const body = {
|
|
126
124
|
model,
|
|
127
|
-
temperature: this.supportsTemperature
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
frequency_penalty: input.modelOptions?.frequencyPenalty ?? this.modelOptions?.frequencyPenalty,
|
|
132
|
-
presence_penalty: input.modelOptions?.presencePenalty ?? this.modelOptions?.presencePenalty,
|
|
125
|
+
temperature: this.supportsTemperature ? modelOptions.temperature : undefined,
|
|
126
|
+
top_p: modelOptions.topP,
|
|
127
|
+
frequency_penalty: modelOptions.frequencyPenalty,
|
|
128
|
+
presence_penalty: modelOptions.presencePenalty,
|
|
133
129
|
messages,
|
|
134
|
-
stream_options: {
|
|
135
|
-
include_usage: true,
|
|
136
|
-
},
|
|
130
|
+
stream_options: { include_usage: true },
|
|
137
131
|
stream: true,
|
|
138
|
-
reasoning_effort: this.getReasoningEffort(
|
|
132
|
+
reasoning_effort: this.getReasoningEffort(modelOptions.reasoningEffort),
|
|
139
133
|
};
|
|
134
|
+
if (model.includes("gpt-5") || model.includes("o1-")) {
|
|
135
|
+
delete body.temperature;
|
|
136
|
+
delete body.top_p;
|
|
137
|
+
}
|
|
140
138
|
// For models that do not support tools use with JSON schema in same request,
|
|
141
139
|
// so we need to handle the case where tools are not used and responseFormat is json
|
|
142
140
|
if (!input.tools?.length && input.responseFormat?.type === "json_schema") {
|
|
@@ -149,7 +147,7 @@ class OpenAIChatModel extends core_1.ChatModel {
|
|
|
149
147
|
addTypeToEmptyParameters: !this.supportsToolsEmptyParameters,
|
|
150
148
|
}),
|
|
151
149
|
tool_choice: input.toolChoice,
|
|
152
|
-
parallel_tool_calls: this.getParallelToolCalls(input),
|
|
150
|
+
parallel_tool_calls: this.getParallelToolCalls(input, modelOptions),
|
|
153
151
|
response_format: responseFormat,
|
|
154
152
|
}));
|
|
155
153
|
if (input.responseFormat?.type !== "json_schema") {
|
|
@@ -174,12 +172,12 @@ class OpenAIChatModel extends core_1.ChatModel {
|
|
|
174
172
|
const output = await this.requestStructuredOutput(body, input.responseFormat);
|
|
175
173
|
return { ...output, usage: (0, model_utils_js_1.mergeUsage)(result.usage, output.usage) };
|
|
176
174
|
}
|
|
177
|
-
getParallelToolCalls(input) {
|
|
175
|
+
getParallelToolCalls(input, modelOptions) {
|
|
178
176
|
if (!this.supportsParallelToolCalls)
|
|
179
177
|
return undefined;
|
|
180
178
|
if (!input.tools?.length)
|
|
181
179
|
return undefined;
|
|
182
|
-
return
|
|
180
|
+
return modelOptions.parallelToolCalls;
|
|
183
181
|
}
|
|
184
182
|
async getRunMessages(input) {
|
|
185
183
|
const messages = await contentsFromInputMessages(input.messages);
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type
|
|
1
|
+
import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput } from "@aigne/core";
|
|
2
2
|
import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
|
|
3
3
|
import type { ClientOptions, OpenAI } from "openai";
|
|
4
4
|
import type { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
|
|
@@ -127,13 +127,12 @@ export declare class OpenAIChatModel extends ChatModel {
|
|
|
127
127
|
apiKey: string | undefined;
|
|
128
128
|
model: string;
|
|
129
129
|
};
|
|
130
|
-
get modelOptions(): Omit<ChatModelInputOptions, "model"> | undefined;
|
|
131
130
|
/**
|
|
132
131
|
* Process the input and generate a response
|
|
133
132
|
* @param input The input to process
|
|
134
133
|
* @returns The generated response
|
|
135
134
|
*/
|
|
136
|
-
process(input: ChatModelInput,
|
|
135
|
+
process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
|
|
137
136
|
private getReasoningEffort;
|
|
138
137
|
private _process;
|
|
139
138
|
private getParallelToolCalls;
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type
|
|
1
|
+
import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput } from "@aigne/core";
|
|
2
2
|
import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
|
|
3
3
|
import type { ClientOptions, OpenAI } from "openai";
|
|
4
4
|
import type { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
|
|
@@ -127,13 +127,12 @@ export declare class OpenAIChatModel extends ChatModel {
|
|
|
127
127
|
apiKey: string | undefined;
|
|
128
128
|
model: string;
|
|
129
129
|
};
|
|
130
|
-
get modelOptions(): Omit<ChatModelInputOptions, "model"> | undefined;
|
|
131
130
|
/**
|
|
132
131
|
* Process the input and generate a response
|
|
133
132
|
* @param input The input to process
|
|
134
133
|
* @returns The generated response
|
|
135
134
|
*/
|
|
136
|
-
process(input: ChatModelInput,
|
|
135
|
+
process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
|
|
137
136
|
private getReasoningEffort;
|
|
138
137
|
private _process;
|
|
139
138
|
private getParallelToolCalls;
|
|
@@ -89,16 +89,13 @@ export class OpenAIChatModel extends ChatModel {
|
|
|
89
89
|
model: this.options?.model || CHAT_MODEL_OPENAI_DEFAULT_MODEL,
|
|
90
90
|
};
|
|
91
91
|
}
|
|
92
|
-
get modelOptions() {
|
|
93
|
-
return this.options?.modelOptions;
|
|
94
|
-
}
|
|
95
92
|
/**
|
|
96
93
|
* Process the input and generate a response
|
|
97
94
|
* @param input The input to process
|
|
98
95
|
* @returns The generated response
|
|
99
96
|
*/
|
|
100
|
-
process(input,
|
|
101
|
-
return this._process(input);
|
|
97
|
+
process(input, options) {
|
|
98
|
+
return this._process(input, options);
|
|
102
99
|
}
|
|
103
100
|
getReasoningEffort(effort) {
|
|
104
101
|
if (typeof effort === "number") {
|
|
@@ -114,24 +111,25 @@ export class OpenAIChatModel extends ChatModel {
|
|
|
114
111
|
}
|
|
115
112
|
return effort;
|
|
116
113
|
}
|
|
117
|
-
async _process(input) {
|
|
114
|
+
async _process(input, options) {
|
|
115
|
+
const modelOptions = await this.getModelOptions(input, options);
|
|
118
116
|
const messages = await this.getRunMessages(input);
|
|
119
|
-
const model =
|
|
117
|
+
const model = modelOptions?.model || this.credential.model;
|
|
120
118
|
const body = {
|
|
121
119
|
model,
|
|
122
|
-
temperature: this.supportsTemperature
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
frequency_penalty: input.modelOptions?.frequencyPenalty ?? this.modelOptions?.frequencyPenalty,
|
|
127
|
-
presence_penalty: input.modelOptions?.presencePenalty ?? this.modelOptions?.presencePenalty,
|
|
120
|
+
temperature: this.supportsTemperature ? modelOptions.temperature : undefined,
|
|
121
|
+
top_p: modelOptions.topP,
|
|
122
|
+
frequency_penalty: modelOptions.frequencyPenalty,
|
|
123
|
+
presence_penalty: modelOptions.presencePenalty,
|
|
128
124
|
messages,
|
|
129
|
-
stream_options: {
|
|
130
|
-
include_usage: true,
|
|
131
|
-
},
|
|
125
|
+
stream_options: { include_usage: true },
|
|
132
126
|
stream: true,
|
|
133
|
-
reasoning_effort: this.getReasoningEffort(
|
|
127
|
+
reasoning_effort: this.getReasoningEffort(modelOptions.reasoningEffort),
|
|
134
128
|
};
|
|
129
|
+
if (model.includes("gpt-5") || model.includes("o1-")) {
|
|
130
|
+
delete body.temperature;
|
|
131
|
+
delete body.top_p;
|
|
132
|
+
}
|
|
135
133
|
// For models that do not support tools use with JSON schema in same request,
|
|
136
134
|
// so we need to handle the case where tools are not used and responseFormat is json
|
|
137
135
|
if (!input.tools?.length && input.responseFormat?.type === "json_schema") {
|
|
@@ -144,7 +142,7 @@ export class OpenAIChatModel extends ChatModel {
|
|
|
144
142
|
addTypeToEmptyParameters: !this.supportsToolsEmptyParameters,
|
|
145
143
|
}),
|
|
146
144
|
tool_choice: input.toolChoice,
|
|
147
|
-
parallel_tool_calls: this.getParallelToolCalls(input),
|
|
145
|
+
parallel_tool_calls: this.getParallelToolCalls(input, modelOptions),
|
|
148
146
|
response_format: responseFormat,
|
|
149
147
|
}));
|
|
150
148
|
if (input.responseFormat?.type !== "json_schema") {
|
|
@@ -169,12 +167,12 @@ export class OpenAIChatModel extends ChatModel {
|
|
|
169
167
|
const output = await this.requestStructuredOutput(body, input.responseFormat);
|
|
170
168
|
return { ...output, usage: mergeUsage(result.usage, output.usage) };
|
|
171
169
|
}
|
|
172
|
-
getParallelToolCalls(input) {
|
|
170
|
+
getParallelToolCalls(input, modelOptions) {
|
|
173
171
|
if (!this.supportsParallelToolCalls)
|
|
174
172
|
return undefined;
|
|
175
173
|
if (!input.tools?.length)
|
|
176
174
|
return undefined;
|
|
177
|
-
return
|
|
175
|
+
return modelOptions.parallelToolCalls;
|
|
178
176
|
}
|
|
179
177
|
async getRunMessages(input) {
|
|
180
178
|
const messages = await contentsFromInputMessages(input.messages);
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aigne/openai",
|
|
3
|
-
"version": "0.16.5-beta.
|
|
3
|
+
"version": "0.16.5-beta.4",
|
|
4
4
|
"description": "AIGNE OpenAI SDK for integrating with OpenAI's GPT models and API services",
|
|
5
5
|
"publishConfig": {
|
|
6
6
|
"access": "public"
|
|
@@ -38,7 +38,7 @@
|
|
|
38
38
|
"@aigne/uuid": "^13.0.1",
|
|
39
39
|
"openai": "^6.5.0",
|
|
40
40
|
"zod": "^3.25.67",
|
|
41
|
-
"@aigne/core": "^1.
|
|
41
|
+
"@aigne/core": "^1.66.0-beta.3",
|
|
42
42
|
"@aigne/platform-helpers": "^0.6.3"
|
|
43
43
|
},
|
|
44
44
|
"devDependencies": {
|
|
@@ -47,7 +47,7 @@
|
|
|
47
47
|
"npm-run-all": "^4.1.5",
|
|
48
48
|
"rimraf": "^6.0.1",
|
|
49
49
|
"typescript": "^5.9.2",
|
|
50
|
-
"@aigne/test-utils": "^0.5.58-beta.
|
|
50
|
+
"@aigne/test-utils": "^0.5.58-beta.4"
|
|
51
51
|
},
|
|
52
52
|
"scripts": {
|
|
53
53
|
"lint": "tsc --noEmit",
|