@ai-sdk/openai 2.0.42 → 2.0.44
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +2 -2
- package/dist/index.d.ts +2 -2
- package/dist/index.js +13 -6
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +13 -6
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +2 -2
- package/dist/internal/index.d.ts +2 -2
- package/dist/internal/index.js +12 -5
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +12 -5
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
|
@@ -129,7 +129,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
|
129
129
|
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
|
|
130
130
|
}
|
|
131
131
|
|
|
132
|
-
type OpenAIImageModelId = '
|
|
132
|
+
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
|
|
133
133
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
134
134
|
declare const hasDefaultResponseFormat: Set<string>;
|
|
135
135
|
|
|
@@ -203,7 +203,7 @@ declare class OpenAISpeechModel implements SpeechModelV2 {
|
|
|
203
203
|
doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
|
|
204
204
|
}
|
|
205
205
|
|
|
206
|
-
type OpenAIResponsesModelId = '
|
|
206
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
207
207
|
|
|
208
208
|
declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
209
209
|
readonly specificationVersion = "v2";
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -129,7 +129,7 @@ declare class OpenAIEmbeddingModel implements EmbeddingModelV2<string> {
|
|
|
129
129
|
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV2<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV2<string>['doEmbed']>>>;
|
|
130
130
|
}
|
|
131
131
|
|
|
132
|
-
type OpenAIImageModelId = '
|
|
132
|
+
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | 'gpt-image-1' | 'gpt-image-1-mini' | (string & {});
|
|
133
133
|
declare const modelMaxImagesPerCall: Record<OpenAIImageModelId, number>;
|
|
134
134
|
declare const hasDefaultResponseFormat: Set<string>;
|
|
135
135
|
|
|
@@ -203,7 +203,7 @@ declare class OpenAISpeechModel implements SpeechModelV2 {
|
|
|
203
203
|
doGenerate(options: Parameters<SpeechModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<SpeechModelV2['doGenerate']>>>;
|
|
204
204
|
}
|
|
205
205
|
|
|
206
|
-
type OpenAIResponsesModelId = '
|
|
206
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
207
207
|
|
|
208
208
|
declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
209
209
|
readonly specificationVersion = "v2";
|
package/dist/internal/index.js
CHANGED
|
@@ -1608,9 +1608,13 @@ var import_v48 = require("zod/v4");
|
|
|
1608
1608
|
var modelMaxImagesPerCall = {
|
|
1609
1609
|
"dall-e-3": 1,
|
|
1610
1610
|
"dall-e-2": 10,
|
|
1611
|
-
"gpt-image-1": 10
|
|
1611
|
+
"gpt-image-1": 10,
|
|
1612
|
+
"gpt-image-1-mini": 10
|
|
1612
1613
|
};
|
|
1613
|
-
var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
|
|
1614
|
+
var hasDefaultResponseFormat = /* @__PURE__ */ new Set([
|
|
1615
|
+
"gpt-image-1",
|
|
1616
|
+
"gpt-image-1-mini"
|
|
1617
|
+
]);
|
|
1614
1618
|
|
|
1615
1619
|
// src/image/openai-image-model.ts
|
|
1616
1620
|
var OpenAIImageModel = class {
|
|
@@ -3348,7 +3352,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3348
3352
|
controller.enqueue({
|
|
3349
3353
|
type: "tool-input-start",
|
|
3350
3354
|
id: value.item.id,
|
|
3351
|
-
toolName: webSearchToolName != null ? webSearchToolName : "web_search"
|
|
3355
|
+
toolName: webSearchToolName != null ? webSearchToolName : "web_search",
|
|
3356
|
+
providerExecuted: true
|
|
3352
3357
|
});
|
|
3353
3358
|
} else if (value.item.type === "computer_call") {
|
|
3354
3359
|
ongoingToolCalls[value.output_index] = {
|
|
@@ -3358,7 +3363,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3358
3363
|
controller.enqueue({
|
|
3359
3364
|
type: "tool-input-start",
|
|
3360
3365
|
id: value.item.id,
|
|
3361
|
-
toolName: "computer_use"
|
|
3366
|
+
toolName: "computer_use",
|
|
3367
|
+
providerExecuted: true
|
|
3362
3368
|
});
|
|
3363
3369
|
} else if (value.item.type === "code_interpreter_call") {
|
|
3364
3370
|
ongoingToolCalls[value.output_index] = {
|
|
@@ -3371,7 +3377,8 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3371
3377
|
controller.enqueue({
|
|
3372
3378
|
type: "tool-input-start",
|
|
3373
3379
|
id: value.item.id,
|
|
3374
|
-
toolName: "code_interpreter"
|
|
3380
|
+
toolName: "code_interpreter",
|
|
3381
|
+
providerExecuted: true
|
|
3375
3382
|
});
|
|
3376
3383
|
controller.enqueue({
|
|
3377
3384
|
type: "tool-input-delta",
|