@aigne/openai 0.16.5-beta.3 → 0.16.5-beta.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.16.5-beta.4](https://github.com/AIGNE-io/aigne-framework/compare/openai-v0.16.5-beta.3...openai-v0.16.5-beta.4) (2025-11-06)
4
+
5
+
6
+ ### Features
7
+
8
+ * add dynamic model options resolution with getter pattern ([#708](https://github.com/AIGNE-io/aigne-framework/issues/708)) ([5ed5085](https://github.com/AIGNE-io/aigne-framework/commit/5ed5085203763c70194853c56edc13acf56d81c6))
9
+
10
+
11
+ ### Dependencies
12
+
13
+ * The following workspace dependencies were updated
14
+ * dependencies
15
+ * @aigne/core bumped to 1.66.0-beta.3
16
+ * devDependencies
17
+ * @aigne/test-utils bumped to 0.5.58-beta.4
18
+
3
19
  ## [0.16.5-beta.3](https://github.com/AIGNE-io/aigne-framework/compare/openai-v0.16.5-beta.2...openai-v0.16.5-beta.3) (2025-11-05)
4
20
 
5
21
 
@@ -1,4 +1,4 @@
1
- import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputOptions, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput } from "@aigne/core";
1
+ import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput } from "@aigne/core";
2
2
  import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
3
3
  import type { ClientOptions, OpenAI } from "openai";
4
4
  import type { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
@@ -127,13 +127,12 @@ export declare class OpenAIChatModel extends ChatModel {
127
127
  apiKey: string | undefined;
128
128
  model: string;
129
129
  };
130
- get modelOptions(): Omit<ChatModelInputOptions, "model"> | undefined;
131
130
  /**
132
131
  * Process the input and generate a response
133
132
  * @param input The input to process
134
133
  * @returns The generated response
135
134
  */
136
- process(input: ChatModelInput, _options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
135
+ process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
137
136
  private getReasoningEffort;
138
137
  private _process;
139
138
  private getParallelToolCalls;
@@ -94,16 +94,13 @@ class OpenAIChatModel extends core_1.ChatModel {
94
94
  model: this.options?.model || CHAT_MODEL_OPENAI_DEFAULT_MODEL,
95
95
  };
96
96
  }
97
- get modelOptions() {
98
- return this.options?.modelOptions;
99
- }
100
97
  /**
101
98
  * Process the input and generate a response
102
99
  * @param input The input to process
103
100
  * @returns The generated response
104
101
  */
105
- process(input, _options) {
106
- return this._process(input);
102
+ process(input, options) {
103
+ return this._process(input, options);
107
104
  }
108
105
  getReasoningEffort(effort) {
109
106
  if (typeof effort === "number") {
@@ -119,23 +116,20 @@ class OpenAIChatModel extends core_1.ChatModel {
119
116
  }
120
117
  return effort;
121
118
  }
122
- async _process(input) {
119
+ async _process(input, options) {
120
+ const modelOptions = await this.getModelOptions(input, options);
123
121
  const messages = await this.getRunMessages(input);
124
- const model = input.modelOptions?.model || this.credential.model;
122
+ const model = modelOptions?.model || this.credential.model;
125
123
  const body = {
126
124
  model,
127
- temperature: this.supportsTemperature
128
- ? (input.modelOptions?.temperature ?? this.modelOptions?.temperature)
129
- : undefined,
130
- top_p: input.modelOptions?.topP ?? this.modelOptions?.topP,
131
- frequency_penalty: input.modelOptions?.frequencyPenalty ?? this.modelOptions?.frequencyPenalty,
132
- presence_penalty: input.modelOptions?.presencePenalty ?? this.modelOptions?.presencePenalty,
125
+ temperature: this.supportsTemperature ? modelOptions.temperature : undefined,
126
+ top_p: modelOptions.topP,
127
+ frequency_penalty: modelOptions.frequencyPenalty,
128
+ presence_penalty: modelOptions.presencePenalty,
133
129
  messages,
134
- stream_options: {
135
- include_usage: true,
136
- },
130
+ stream_options: { include_usage: true },
137
131
  stream: true,
138
- reasoning_effort: this.getReasoningEffort(input.modelOptions?.reasoningEffort ?? this.modelOptions?.reasoningEffort),
132
+ reasoning_effort: this.getReasoningEffort(modelOptions.reasoningEffort),
139
133
  };
140
134
  if (model.includes("gpt-5") || model.includes("o1-")) {
141
135
  delete body.temperature;
@@ -153,7 +147,7 @@ class OpenAIChatModel extends core_1.ChatModel {
153
147
  addTypeToEmptyParameters: !this.supportsToolsEmptyParameters,
154
148
  }),
155
149
  tool_choice: input.toolChoice,
156
- parallel_tool_calls: this.getParallelToolCalls(input),
150
+ parallel_tool_calls: this.getParallelToolCalls(input, modelOptions),
157
151
  response_format: responseFormat,
158
152
  }));
159
153
  if (input.responseFormat?.type !== "json_schema") {
@@ -178,12 +172,12 @@ class OpenAIChatModel extends core_1.ChatModel {
178
172
  const output = await this.requestStructuredOutput(body, input.responseFormat);
179
173
  return { ...output, usage: (0, model_utils_js_1.mergeUsage)(result.usage, output.usage) };
180
174
  }
181
- getParallelToolCalls(input) {
175
+ getParallelToolCalls(input, modelOptions) {
182
176
  if (!this.supportsParallelToolCalls)
183
177
  return undefined;
184
178
  if (!input.tools?.length)
185
179
  return undefined;
186
- return input.modelOptions?.parallelToolCalls ?? this.modelOptions?.parallelToolCalls;
180
+ return modelOptions.parallelToolCalls;
187
181
  }
188
182
  async getRunMessages(input) {
189
183
  const messages = await contentsFromInputMessages(input.messages);
@@ -1,4 +1,4 @@
1
- import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputOptions, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput } from "@aigne/core";
1
+ import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput } from "@aigne/core";
2
2
  import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
3
3
  import type { ClientOptions, OpenAI } from "openai";
4
4
  import type { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
@@ -127,13 +127,12 @@ export declare class OpenAIChatModel extends ChatModel {
127
127
  apiKey: string | undefined;
128
128
  model: string;
129
129
  };
130
- get modelOptions(): Omit<ChatModelInputOptions, "model"> | undefined;
131
130
  /**
132
131
  * Process the input and generate a response
133
132
  * @param input The input to process
134
133
  * @returns The generated response
135
134
  */
136
- process(input: ChatModelInput, _options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
135
+ process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
137
136
  private getReasoningEffort;
138
137
  private _process;
139
138
  private getParallelToolCalls;
@@ -1,4 +1,4 @@
1
- import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputOptions, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput } from "@aigne/core";
1
+ import { type AgentInvokeOptions, type AgentProcessResult, ChatModel, type ChatModelInput, type ChatModelInputMessage, type ChatModelInputTool, type ChatModelOptions, type ChatModelOutput } from "@aigne/core";
2
2
  import { type PromiseOrValue } from "@aigne/core/utils/type-utils.js";
3
3
  import type { ClientOptions, OpenAI } from "openai";
4
4
  import type { ChatCompletionMessageParam, ChatCompletionTool } from "openai/resources";
@@ -127,13 +127,12 @@ export declare class OpenAIChatModel extends ChatModel {
127
127
  apiKey: string | undefined;
128
128
  model: string;
129
129
  };
130
- get modelOptions(): Omit<ChatModelInputOptions, "model"> | undefined;
131
130
  /**
132
131
  * Process the input and generate a response
133
132
  * @param input The input to process
134
133
  * @returns The generated response
135
134
  */
136
- process(input: ChatModelInput, _options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
135
+ process(input: ChatModelInput, options: AgentInvokeOptions): PromiseOrValue<AgentProcessResult<ChatModelOutput>>;
137
136
  private getReasoningEffort;
138
137
  private _process;
139
138
  private getParallelToolCalls;
@@ -89,16 +89,13 @@ export class OpenAIChatModel extends ChatModel {
89
89
  model: this.options?.model || CHAT_MODEL_OPENAI_DEFAULT_MODEL,
90
90
  };
91
91
  }
92
- get modelOptions() {
93
- return this.options?.modelOptions;
94
- }
95
92
  /**
96
93
  * Process the input and generate a response
97
94
  * @param input The input to process
98
95
  * @returns The generated response
99
96
  */
100
- process(input, _options) {
101
- return this._process(input);
97
+ process(input, options) {
98
+ return this._process(input, options);
102
99
  }
103
100
  getReasoningEffort(effort) {
104
101
  if (typeof effort === "number") {
@@ -114,23 +111,20 @@ export class OpenAIChatModel extends ChatModel {
114
111
  }
115
112
  return effort;
116
113
  }
117
- async _process(input) {
114
+ async _process(input, options) {
115
+ const modelOptions = await this.getModelOptions(input, options);
118
116
  const messages = await this.getRunMessages(input);
119
- const model = input.modelOptions?.model || this.credential.model;
117
+ const model = modelOptions?.model || this.credential.model;
120
118
  const body = {
121
119
  model,
122
- temperature: this.supportsTemperature
123
- ? (input.modelOptions?.temperature ?? this.modelOptions?.temperature)
124
- : undefined,
125
- top_p: input.modelOptions?.topP ?? this.modelOptions?.topP,
126
- frequency_penalty: input.modelOptions?.frequencyPenalty ?? this.modelOptions?.frequencyPenalty,
127
- presence_penalty: input.modelOptions?.presencePenalty ?? this.modelOptions?.presencePenalty,
120
+ temperature: this.supportsTemperature ? modelOptions.temperature : undefined,
121
+ top_p: modelOptions.topP,
122
+ frequency_penalty: modelOptions.frequencyPenalty,
123
+ presence_penalty: modelOptions.presencePenalty,
128
124
  messages,
129
- stream_options: {
130
- include_usage: true,
131
- },
125
+ stream_options: { include_usage: true },
132
126
  stream: true,
133
- reasoning_effort: this.getReasoningEffort(input.modelOptions?.reasoningEffort ?? this.modelOptions?.reasoningEffort),
127
+ reasoning_effort: this.getReasoningEffort(modelOptions.reasoningEffort),
134
128
  };
135
129
  if (model.includes("gpt-5") || model.includes("o1-")) {
136
130
  delete body.temperature;
@@ -148,7 +142,7 @@ export class OpenAIChatModel extends ChatModel {
148
142
  addTypeToEmptyParameters: !this.supportsToolsEmptyParameters,
149
143
  }),
150
144
  tool_choice: input.toolChoice,
151
- parallel_tool_calls: this.getParallelToolCalls(input),
145
+ parallel_tool_calls: this.getParallelToolCalls(input, modelOptions),
152
146
  response_format: responseFormat,
153
147
  }));
154
148
  if (input.responseFormat?.type !== "json_schema") {
@@ -173,12 +167,12 @@ export class OpenAIChatModel extends ChatModel {
173
167
  const output = await this.requestStructuredOutput(body, input.responseFormat);
174
168
  return { ...output, usage: mergeUsage(result.usage, output.usage) };
175
169
  }
176
- getParallelToolCalls(input) {
170
+ getParallelToolCalls(input, modelOptions) {
177
171
  if (!this.supportsParallelToolCalls)
178
172
  return undefined;
179
173
  if (!input.tools?.length)
180
174
  return undefined;
181
- return input.modelOptions?.parallelToolCalls ?? this.modelOptions?.parallelToolCalls;
175
+ return modelOptions.parallelToolCalls;
182
176
  }
183
177
  async getRunMessages(input) {
184
178
  const messages = await contentsFromInputMessages(input.messages);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aigne/openai",
3
- "version": "0.16.5-beta.3",
3
+ "version": "0.16.5-beta.4",
4
4
  "description": "AIGNE OpenAI SDK for integrating with OpenAI's GPT models and API services",
5
5
  "publishConfig": {
6
6
  "access": "public"
@@ -38,7 +38,7 @@
38
38
  "@aigne/uuid": "^13.0.1",
39
39
  "openai": "^6.5.0",
40
40
  "zod": "^3.25.67",
41
- "@aigne/core": "^1.65.1-beta.3",
41
+ "@aigne/core": "^1.66.0-beta.3",
42
42
  "@aigne/platform-helpers": "^0.6.3"
43
43
  },
44
44
  "devDependencies": {
@@ -47,7 +47,7 @@
47
47
  "npm-run-all": "^4.1.5",
48
48
  "rimraf": "^6.0.1",
49
49
  "typescript": "^5.9.2",
50
- "@aigne/test-utils": "^0.5.58-beta.3"
50
+ "@aigne/test-utils": "^0.5.58-beta.4"
51
51
  },
52
52
  "scripts": {
53
53
  "lint": "tsc --noEmit",