@ai-sdk/openai 2.0.9 → 2.0.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/dist/index.d.mts +36 -53
- package/dist/index.d.ts +36 -53
- package/dist/index.js +502 -442
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +513 -453
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +316 -255
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +301 -240
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
package/dist/index.d.mts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
3
3
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
4
4
|
import { z } from 'zod/v4';
|
|
@@ -12,6 +12,11 @@ type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large
|
|
|
12
12
|
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
13
13
|
|
|
14
14
|
declare const openaiTools: {
|
|
15
|
+
codeInterpreter: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
|
|
16
|
+
container?: string | {
|
|
17
|
+
fileIds?: string[];
|
|
18
|
+
};
|
|
19
|
+
}>;
|
|
15
20
|
fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{
|
|
16
21
|
query: string;
|
|
17
22
|
}, {
|
|
@@ -41,68 +46,19 @@ declare const openaiTools: {
|
|
|
41
46
|
}>;
|
|
42
47
|
};
|
|
43
48
|
|
|
44
|
-
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
45
|
-
|
|
46
|
-
type OpenAIConfig = {
|
|
47
|
-
provider: string;
|
|
48
|
-
url: (options: {
|
|
49
|
-
modelId: string;
|
|
50
|
-
path: string;
|
|
51
|
-
}) => string;
|
|
52
|
-
headers: () => Record<string, string | undefined>;
|
|
53
|
-
fetch?: FetchFunction;
|
|
54
|
-
generateId?: () => string;
|
|
55
|
-
};
|
|
56
|
-
|
|
57
49
|
declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview", "gpt-5", "gpt-5-2025-08-07", "gpt-5-mini", "gpt-5-mini-2025-08-07", "gpt-5-nano", "gpt-5-nano-2025-08-07", "gpt-5-chat-latest"];
|
|
58
50
|
type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
|
|
59
51
|
|
|
60
|
-
declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
61
|
-
readonly specificationVersion = "v2";
|
|
62
|
-
readonly modelId: OpenAIResponsesModelId;
|
|
63
|
-
private readonly config;
|
|
64
|
-
constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
|
|
65
|
-
readonly supportedUrls: Record<string, RegExp[]>;
|
|
66
|
-
get provider(): string;
|
|
67
|
-
private getArgs;
|
|
68
|
-
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
69
|
-
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
70
|
-
}
|
|
71
|
-
declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
72
|
-
metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
|
|
73
|
-
parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
74
|
-
previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
75
|
-
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
76
|
-
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
77
|
-
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
78
|
-
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
79
|
-
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
80
|
-
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
81
|
-
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
82
|
-
auto: "auto";
|
|
83
|
-
flex: "flex";
|
|
84
|
-
priority: "priority";
|
|
85
|
-
}>>>;
|
|
86
|
-
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
87
|
-
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
88
|
-
"file_search_call.results": "file_search_call.results";
|
|
89
|
-
}>>>>;
|
|
90
|
-
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
91
|
-
low: "low";
|
|
92
|
-
medium: "medium";
|
|
93
|
-
high: "high";
|
|
94
|
-
}>>>;
|
|
95
|
-
}, z.core.$strip>;
|
|
96
|
-
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
97
|
-
|
|
98
52
|
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
|
|
99
53
|
|
|
54
|
+
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
55
|
+
|
|
100
56
|
interface OpenAIProvider extends ProviderV2 {
|
|
101
57
|
(modelId: OpenAIResponsesModelId): LanguageModelV2;
|
|
102
58
|
/**
|
|
103
59
|
Creates an OpenAI model for text generation.
|
|
104
60
|
*/
|
|
105
|
-
languageModel(modelId: OpenAIResponsesModelId):
|
|
61
|
+
languageModel(modelId: OpenAIResponsesModelId): LanguageModelV2;
|
|
106
62
|
/**
|
|
107
63
|
Creates an OpenAI chat model for text generation.
|
|
108
64
|
*/
|
|
@@ -188,4 +144,31 @@ Default OpenAI provider instance.
|
|
|
188
144
|
*/
|
|
189
145
|
declare const openai: OpenAIProvider;
|
|
190
146
|
|
|
147
|
+
declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
148
|
+
metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
|
|
149
|
+
parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
150
|
+
previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
151
|
+
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
152
|
+
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
153
|
+
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
154
|
+
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
155
|
+
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
156
|
+
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
157
|
+
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
158
|
+
auto: "auto";
|
|
159
|
+
flex: "flex";
|
|
160
|
+
priority: "priority";
|
|
161
|
+
}>>>;
|
|
162
|
+
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
163
|
+
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
164
|
+
"file_search_call.results": "file_search_call.results";
|
|
165
|
+
}>>>>;
|
|
166
|
+
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
167
|
+
low: "low";
|
|
168
|
+
medium: "medium";
|
|
169
|
+
high: "high";
|
|
170
|
+
}>>>;
|
|
171
|
+
}, z.core.$strip>;
|
|
172
|
+
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
173
|
+
|
|
191
174
|
export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };
|
package/dist/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import {
|
|
1
|
+
import { ProviderV2, LanguageModelV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
3
3
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
4
4
|
import { z } from 'zod/v4';
|
|
@@ -12,6 +12,11 @@ type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large
|
|
|
12
12
|
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
13
13
|
|
|
14
14
|
declare const openaiTools: {
|
|
15
|
+
codeInterpreter: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{}, {
|
|
16
|
+
container?: string | {
|
|
17
|
+
fileIds?: string[];
|
|
18
|
+
};
|
|
19
|
+
}>;
|
|
15
20
|
fileSearch: _ai_sdk_provider_utils.ProviderDefinedToolFactory<{
|
|
16
21
|
query: string;
|
|
17
22
|
}, {
|
|
@@ -41,68 +46,19 @@ declare const openaiTools: {
|
|
|
41
46
|
}>;
|
|
42
47
|
};
|
|
43
48
|
|
|
44
|
-
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
45
|
-
|
|
46
|
-
type OpenAIConfig = {
|
|
47
|
-
provider: string;
|
|
48
|
-
url: (options: {
|
|
49
|
-
modelId: string;
|
|
50
|
-
path: string;
|
|
51
|
-
}) => string;
|
|
52
|
-
headers: () => Record<string, string | undefined>;
|
|
53
|
-
fetch?: FetchFunction;
|
|
54
|
-
generateId?: () => string;
|
|
55
|
-
};
|
|
56
|
-
|
|
57
49
|
declare const openaiResponsesModelIds: readonly ["gpt-4.1", "gpt-4.1-2025-04-14", "gpt-4.1-mini", "gpt-4.1-mini-2025-04-14", "gpt-4.1-nano", "gpt-4.1-nano-2025-04-14", "gpt-4o", "gpt-4o-2024-05-13", "gpt-4o-2024-08-06", "gpt-4o-2024-11-20", "gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01", "gpt-4o-audio-preview-2024-12-17", "gpt-4o-search-preview", "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview", "gpt-4o-mini-search-preview-2025-03-11", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", "gpt-4-turbo-2024-04-09", "gpt-4-turbo-preview", "gpt-4-0125-preview", "gpt-4-1106-preview", "gpt-4", "gpt-4-0613", "gpt-4.5-preview", "gpt-4.5-preview-2025-02-27", "gpt-3.5-turbo-0125", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", "chatgpt-4o-latest", "o1", "o1-2024-12-17", "o3-mini", "o3-mini-2025-01-31", "o3", "o3-2025-04-16", "o4-mini", "o4-mini-2025-04-16", "codex-mini-latest", "computer-use-preview", "gpt-5", "gpt-5-2025-08-07", "gpt-5-mini", "gpt-5-mini-2025-08-07", "gpt-5-nano", "gpt-5-nano-2025-08-07", "gpt-5-chat-latest"];
|
|
58
50
|
type OpenAIResponsesModelId = (typeof openaiResponsesModelIds)[number] | (string & {});
|
|
59
51
|
|
|
60
|
-
declare class OpenAIResponsesLanguageModel implements LanguageModelV2 {
|
|
61
|
-
readonly specificationVersion = "v2";
|
|
62
|
-
readonly modelId: OpenAIResponsesModelId;
|
|
63
|
-
private readonly config;
|
|
64
|
-
constructor(modelId: OpenAIResponsesModelId, config: OpenAIConfig);
|
|
65
|
-
readonly supportedUrls: Record<string, RegExp[]>;
|
|
66
|
-
get provider(): string;
|
|
67
|
-
private getArgs;
|
|
68
|
-
doGenerate(options: Parameters<LanguageModelV2['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
|
|
69
|
-
doStream(options: Parameters<LanguageModelV2['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
|
|
70
|
-
}
|
|
71
|
-
declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
72
|
-
metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
|
|
73
|
-
parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
74
|
-
previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
75
|
-
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
76
|
-
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
77
|
-
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
78
|
-
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
79
|
-
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
80
|
-
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
81
|
-
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
82
|
-
auto: "auto";
|
|
83
|
-
flex: "flex";
|
|
84
|
-
priority: "priority";
|
|
85
|
-
}>>>;
|
|
86
|
-
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
87
|
-
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
88
|
-
"file_search_call.results": "file_search_call.results";
|
|
89
|
-
}>>>>;
|
|
90
|
-
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
91
|
-
low: "low";
|
|
92
|
-
medium: "medium";
|
|
93
|
-
high: "high";
|
|
94
|
-
}>>>;
|
|
95
|
-
}, z.core.$strip>;
|
|
96
|
-
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
97
|
-
|
|
98
52
|
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
|
|
99
53
|
|
|
54
|
+
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
55
|
+
|
|
100
56
|
interface OpenAIProvider extends ProviderV2 {
|
|
101
57
|
(modelId: OpenAIResponsesModelId): LanguageModelV2;
|
|
102
58
|
/**
|
|
103
59
|
Creates an OpenAI model for text generation.
|
|
104
60
|
*/
|
|
105
|
-
languageModel(modelId: OpenAIResponsesModelId):
|
|
61
|
+
languageModel(modelId: OpenAIResponsesModelId): LanguageModelV2;
|
|
106
62
|
/**
|
|
107
63
|
Creates an OpenAI chat model for text generation.
|
|
108
64
|
*/
|
|
@@ -188,4 +144,31 @@ Default OpenAI provider instance.
|
|
|
188
144
|
*/
|
|
189
145
|
declare const openai: OpenAIProvider;
|
|
190
146
|
|
|
147
|
+
declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
148
|
+
metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
|
|
149
|
+
parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
150
|
+
previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
151
|
+
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
152
|
+
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
153
|
+
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
154
|
+
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
155
|
+
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
156
|
+
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
157
|
+
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
158
|
+
auto: "auto";
|
|
159
|
+
flex: "flex";
|
|
160
|
+
priority: "priority";
|
|
161
|
+
}>>>;
|
|
162
|
+
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
163
|
+
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
164
|
+
"file_search_call.results": "file_search_call.results";
|
|
165
|
+
}>>>>;
|
|
166
|
+
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
167
|
+
low: "low";
|
|
168
|
+
medium: "medium";
|
|
169
|
+
high: "high";
|
|
170
|
+
}>>>;
|
|
171
|
+
}, z.core.$strip>;
|
|
172
|
+
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
173
|
+
|
|
191
174
|
export { type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, createOpenAI, openai };
|