@ai-sdk/openai 2.1.0-beta.1 → 2.1.0-beta.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +32 -0
- package/dist/index.d.mts +38 -5
- package/dist/index.d.ts +38 -5
- package/dist/index.js +22 -14
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +23 -15
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +7 -7
- package/dist/internal/index.d.ts +7 -7
- package/dist/internal/index.js +8 -8
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +6 -6
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { LanguageModelV2,
|
|
1
|
+
import { LanguageModelV2, EmbeddingModelV3, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { z } from 'zod/v4';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
|
|
6
|
-
declare const
|
|
6
|
+
declare const openaiChatLanguageModelOptions: z.ZodObject<{
|
|
7
7
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
|
|
8
8
|
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
9
9
|
parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
|
|
@@ -33,7 +33,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
33
33
|
promptCacheKey: z.ZodOptional<z.ZodString>;
|
|
34
34
|
safetyIdentifier: z.ZodOptional<z.ZodString>;
|
|
35
35
|
}, z.core.$strip>;
|
|
36
|
-
type
|
|
36
|
+
type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
|
|
37
37
|
|
|
38
38
|
type OpenAIChatConfig = {
|
|
39
39
|
provider: string;
|
|
@@ -117,15 +117,15 @@ declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
|
117
117
|
}, z.core.$strip>;
|
|
118
118
|
type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
|
|
119
119
|
|
|
120
|
-
declare class OpenAIEmbeddingModel implements
|
|
121
|
-
readonly specificationVersion = "
|
|
120
|
+
declare class OpenAIEmbeddingModel implements EmbeddingModelV3<string> {
|
|
121
|
+
readonly specificationVersion = "v3";
|
|
122
122
|
readonly modelId: OpenAIEmbeddingModelId;
|
|
123
123
|
readonly maxEmbeddingsPerCall = 2048;
|
|
124
124
|
readonly supportsParallelCalls = true;
|
|
125
125
|
private readonly config;
|
|
126
126
|
get provider(): string;
|
|
127
127
|
constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
|
|
128
|
-
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<
|
|
128
|
+
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3<string>['doEmbed']>>>;
|
|
129
129
|
}
|
|
130
130
|
|
|
131
131
|
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
@@ -247,4 +247,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
247
247
|
}, z.core.$strip>;
|
|
248
248
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
249
249
|
|
|
250
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId,
|
|
250
|
+
export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions };
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
import { LanguageModelV2,
|
|
1
|
+
import { LanguageModelV2, EmbeddingModelV3, ImageModelV2, TranscriptionModelV2CallOptions, TranscriptionModelV2, SpeechModelV2 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { z } from 'zod/v4';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
|
|
6
|
-
declare const
|
|
6
|
+
declare const openaiChatLanguageModelOptions: z.ZodObject<{
|
|
7
7
|
logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
|
|
8
8
|
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
9
9
|
parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
|
|
@@ -33,7 +33,7 @@ declare const openaiProviderOptions: z.ZodObject<{
|
|
|
33
33
|
promptCacheKey: z.ZodOptional<z.ZodString>;
|
|
34
34
|
safetyIdentifier: z.ZodOptional<z.ZodString>;
|
|
35
35
|
}, z.core.$strip>;
|
|
36
|
-
type
|
|
36
|
+
type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
|
|
37
37
|
|
|
38
38
|
type OpenAIChatConfig = {
|
|
39
39
|
provider: string;
|
|
@@ -117,15 +117,15 @@ declare const openaiEmbeddingProviderOptions: z.ZodObject<{
|
|
|
117
117
|
}, z.core.$strip>;
|
|
118
118
|
type OpenAIEmbeddingProviderOptions = z.infer<typeof openaiEmbeddingProviderOptions>;
|
|
119
119
|
|
|
120
|
-
declare class OpenAIEmbeddingModel implements
|
|
121
|
-
readonly specificationVersion = "
|
|
120
|
+
declare class OpenAIEmbeddingModel implements EmbeddingModelV3<string> {
|
|
121
|
+
readonly specificationVersion = "v3";
|
|
122
122
|
readonly modelId: OpenAIEmbeddingModelId;
|
|
123
123
|
readonly maxEmbeddingsPerCall = 2048;
|
|
124
124
|
readonly supportsParallelCalls = true;
|
|
125
125
|
private readonly config;
|
|
126
126
|
get provider(): string;
|
|
127
127
|
constructor(modelId: OpenAIEmbeddingModelId, config: OpenAIConfig);
|
|
128
|
-
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<
|
|
128
|
+
doEmbed({ values, headers, abortSignal, providerOptions, }: Parameters<EmbeddingModelV3<string>['doEmbed']>[0]): Promise<Awaited<ReturnType<EmbeddingModelV3<string>['doEmbed']>>>;
|
|
129
129
|
}
|
|
130
130
|
|
|
131
131
|
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
@@ -247,4 +247,4 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
|
247
247
|
}, z.core.$strip>;
|
|
248
248
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
249
249
|
|
|
250
|
-
export { OpenAIChatLanguageModel, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId,
|
|
250
|
+
export { OpenAIChatLanguageModel, type OpenAIChatLanguageModelOptions, type OpenAIChatModelId, OpenAICompletionLanguageModel, type OpenAICompletionModelId, type OpenAICompletionProviderOptions, OpenAIEmbeddingModel, type OpenAIEmbeddingModelId, type OpenAIEmbeddingProviderOptions, OpenAIImageModel, type OpenAIImageModelId, OpenAIResponsesLanguageModel, type OpenAIResponsesProviderOptions, type OpenAISpeechCallOptions, OpenAISpeechModel, type OpenAISpeechModelId, type OpenAITranscriptionCallOptions, OpenAITranscriptionModel, type OpenAITranscriptionModelId, type OpenAITranscriptionProviderOptions, hasDefaultResponseFormat, modelMaxImagesPerCall, openAITranscriptionProviderOptions, openaiChatLanguageModelOptions, openaiCompletionProviderOptions, openaiEmbeddingProviderOptions };
|
package/dist/internal/index.js
CHANGED
|
@@ -30,9 +30,9 @@ __export(internal_exports, {
|
|
|
30
30
|
hasDefaultResponseFormat: () => hasDefaultResponseFormat,
|
|
31
31
|
modelMaxImagesPerCall: () => modelMaxImagesPerCall,
|
|
32
32
|
openAITranscriptionProviderOptions: () => openAITranscriptionProviderOptions,
|
|
33
|
+
openaiChatLanguageModelOptions: () => openaiChatLanguageModelOptions,
|
|
33
34
|
openaiCompletionProviderOptions: () => openaiCompletionProviderOptions,
|
|
34
|
-
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions
|
|
35
|
-
openaiProviderOptions: () => openaiProviderOptions
|
|
35
|
+
openaiEmbeddingProviderOptions: () => openaiEmbeddingProviderOptions
|
|
36
36
|
});
|
|
37
37
|
module.exports = __toCommonJS(internal_exports);
|
|
38
38
|
|
|
@@ -270,7 +270,7 @@ function mapOpenAIFinishReason(finishReason) {
|
|
|
270
270
|
|
|
271
271
|
// src/chat/openai-chat-options.ts
|
|
272
272
|
var import_v42 = require("zod/v4");
|
|
273
|
-
var
|
|
273
|
+
var openaiChatLanguageModelOptions = import_v42.z.object({
|
|
274
274
|
/**
|
|
275
275
|
* Modify the likelihood of specified tokens appearing in the completion.
|
|
276
276
|
*
|
|
@@ -452,7 +452,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
452
452
|
const openaiOptions = (_a = await (0, import_provider_utils3.parseProviderOptions)({
|
|
453
453
|
provider: "openai",
|
|
454
454
|
providerOptions,
|
|
455
|
-
schema:
|
|
455
|
+
schema: openaiChatLanguageModelOptions
|
|
456
456
|
})) != null ? _a : {};
|
|
457
457
|
const structuredOutputs = (_b = openaiOptions.structuredOutputs) != null ? _b : true;
|
|
458
458
|
if (topK != null) {
|
|
@@ -1524,7 +1524,7 @@ var openaiEmbeddingProviderOptions = import_v46.z.object({
|
|
|
1524
1524
|
// src/embedding/openai-embedding-model.ts
|
|
1525
1525
|
var OpenAIEmbeddingModel = class {
|
|
1526
1526
|
constructor(modelId, config) {
|
|
1527
|
-
this.specificationVersion = "
|
|
1527
|
+
this.specificationVersion = "v3";
|
|
1528
1528
|
this.maxEmbeddingsPerCall = 2048;
|
|
1529
1529
|
this.supportsParallelCalls = true;
|
|
1530
1530
|
this.modelId = modelId;
|
|
@@ -2926,7 +2926,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
2926
2926
|
])
|
|
2927
2927
|
),
|
|
2928
2928
|
service_tier: import_v418.z.string().nullish(),
|
|
2929
|
-
incomplete_details: import_v418.z.object({ reason: import_v418.z.string() }).
|
|
2929
|
+
incomplete_details: import_v418.z.object({ reason: import_v418.z.string() }).nullish(),
|
|
2930
2930
|
usage: usageSchema2
|
|
2931
2931
|
})
|
|
2932
2932
|
),
|
|
@@ -3816,8 +3816,8 @@ var openaiResponsesProviderOptionsSchema = import_v418.z.object({
|
|
|
3816
3816
|
hasDefaultResponseFormat,
|
|
3817
3817
|
modelMaxImagesPerCall,
|
|
3818
3818
|
openAITranscriptionProviderOptions,
|
|
3819
|
+
openaiChatLanguageModelOptions,
|
|
3819
3820
|
openaiCompletionProviderOptions,
|
|
3820
|
-
openaiEmbeddingProviderOptions
|
|
3821
|
-
openaiProviderOptions
|
|
3821
|
+
openaiEmbeddingProviderOptions
|
|
3822
3822
|
});
|
|
3823
3823
|
//# sourceMappingURL=index.js.map
|