@ai-sdk/openai 2.0.0-canary.12 → 2.0.0-canary.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +14 -0
- package/dist/index.d.mts +7 -25
- package/dist/index.d.ts +7 -25
- package/dist/index.js +178 -164
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +178 -164
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +25 -43
- package/dist/internal/index.d.ts +25 -43
- package/dist/internal/index.js +174 -158
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +173 -158
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,19 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.0-canary.13
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 177526b: chore(providers/openai-transcription): switch to providerOptions
|
|
8
|
+
- c15dfbf: feat (providers/openai): add gpt-image-1 model id to image settings
|
|
9
|
+
- 9bf7291: chore(providers/openai): enable structuredOutputs by default & switch to provider option
|
|
10
|
+
- 4617fab: chore(embedding-models): remove remaining settings
|
|
11
|
+
- Updated dependencies [9bf7291]
|
|
12
|
+
- Updated dependencies [4617fab]
|
|
13
|
+
- Updated dependencies [e030615]
|
|
14
|
+
- @ai-sdk/provider@2.0.0-canary.11
|
|
15
|
+
- @ai-sdk/provider-utils@3.0.0-canary.12
|
|
16
|
+
|
|
3
17
|
## 2.0.0-canary.12
|
|
4
18
|
|
|
5
19
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -3,14 +3,6 @@ import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
|
3
3
|
import { z } from 'zod';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
6
|
-
interface OpenAIChatSettings {
|
|
7
|
-
/**
|
|
8
|
-
Whether to use structured outputs. Defaults to false.
|
|
9
|
-
|
|
10
|
-
When enabled, tool calls and object generation will be strict and follow the provided schema.
|
|
11
|
-
*/
|
|
12
|
-
structuredOutputs?: boolean;
|
|
13
|
-
}
|
|
14
6
|
|
|
15
7
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
16
8
|
|
|
@@ -38,18 +30,8 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
|
38
30
|
}
|
|
39
31
|
|
|
40
32
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
41
|
-
interface OpenAIEmbeddingSettings {
|
|
42
|
-
/**
|
|
43
|
-
Override the maximum number of embeddings per call.
|
|
44
|
-
*/
|
|
45
|
-
maxEmbeddingsPerCall?: number;
|
|
46
|
-
/**
|
|
47
|
-
Override the parallelism of embedding calls.
|
|
48
|
-
*/
|
|
49
|
-
supportsParallelCalls?: boolean;
|
|
50
|
-
}
|
|
51
33
|
|
|
52
|
-
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
34
|
+
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
53
35
|
interface OpenAIImageSettings {
|
|
54
36
|
/**
|
|
55
37
|
Override the maximum number of images per call (default is dependent on the
|
|
@@ -86,16 +68,16 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
|
|
|
86
68
|
|
|
87
69
|
interface OpenAIProvider extends ProviderV2 {
|
|
88
70
|
(modelId: 'gpt-3.5-turbo-instruct'): OpenAICompletionLanguageModel;
|
|
89
|
-
(modelId: OpenAIChatModelId
|
|
71
|
+
(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
90
72
|
/**
|
|
91
73
|
Creates an OpenAI model for text generation.
|
|
92
74
|
*/
|
|
93
75
|
languageModel(modelId: 'gpt-3.5-turbo-instruct'): OpenAICompletionLanguageModel;
|
|
94
|
-
languageModel(modelId: OpenAIChatModelId
|
|
76
|
+
languageModel(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
95
77
|
/**
|
|
96
78
|
Creates an OpenAI chat model for text generation.
|
|
97
79
|
*/
|
|
98
|
-
chat(modelId: OpenAIChatModelId
|
|
80
|
+
chat(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
99
81
|
/**
|
|
100
82
|
Creates an OpenAI responses API model for text generation.
|
|
101
83
|
*/
|
|
@@ -107,17 +89,17 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
107
89
|
/**
|
|
108
90
|
Creates a model for text embeddings.
|
|
109
91
|
*/
|
|
110
|
-
embedding(modelId: OpenAIEmbeddingModelId
|
|
92
|
+
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
111
93
|
/**
|
|
112
94
|
Creates a model for text embeddings.
|
|
113
95
|
|
|
114
96
|
@deprecated Use `textEmbeddingModel` instead.
|
|
115
97
|
*/
|
|
116
|
-
textEmbedding(modelId: OpenAIEmbeddingModelId
|
|
98
|
+
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
117
99
|
/**
|
|
118
100
|
Creates a model for text embeddings.
|
|
119
101
|
*/
|
|
120
|
-
textEmbeddingModel(modelId: OpenAIEmbeddingModelId
|
|
102
|
+
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
121
103
|
/**
|
|
122
104
|
Creates a model for image generation.
|
|
123
105
|
*/
|
package/dist/index.d.ts
CHANGED
|
@@ -3,14 +3,6 @@ import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
|
3
3
|
import { z } from 'zod';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
6
|
-
interface OpenAIChatSettings {
|
|
7
|
-
/**
|
|
8
|
-
Whether to use structured outputs. Defaults to false.
|
|
9
|
-
|
|
10
|
-
When enabled, tool calls and object generation will be strict and follow the provided schema.
|
|
11
|
-
*/
|
|
12
|
-
structuredOutputs?: boolean;
|
|
13
|
-
}
|
|
14
6
|
|
|
15
7
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
16
8
|
|
|
@@ -38,18 +30,8 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
|
38
30
|
}
|
|
39
31
|
|
|
40
32
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
41
|
-
interface OpenAIEmbeddingSettings {
|
|
42
|
-
/**
|
|
43
|
-
Override the maximum number of embeddings per call.
|
|
44
|
-
*/
|
|
45
|
-
maxEmbeddingsPerCall?: number;
|
|
46
|
-
/**
|
|
47
|
-
Override the parallelism of embedding calls.
|
|
48
|
-
*/
|
|
49
|
-
supportsParallelCalls?: boolean;
|
|
50
|
-
}
|
|
51
33
|
|
|
52
|
-
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
34
|
+
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
53
35
|
interface OpenAIImageSettings {
|
|
54
36
|
/**
|
|
55
37
|
Override the maximum number of images per call (default is dependent on the
|
|
@@ -86,16 +68,16 @@ type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string &
|
|
|
86
68
|
|
|
87
69
|
interface OpenAIProvider extends ProviderV2 {
|
|
88
70
|
(modelId: 'gpt-3.5-turbo-instruct'): OpenAICompletionLanguageModel;
|
|
89
|
-
(modelId: OpenAIChatModelId
|
|
71
|
+
(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
90
72
|
/**
|
|
91
73
|
Creates an OpenAI model for text generation.
|
|
92
74
|
*/
|
|
93
75
|
languageModel(modelId: 'gpt-3.5-turbo-instruct'): OpenAICompletionLanguageModel;
|
|
94
|
-
languageModel(modelId: OpenAIChatModelId
|
|
76
|
+
languageModel(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
95
77
|
/**
|
|
96
78
|
Creates an OpenAI chat model for text generation.
|
|
97
79
|
*/
|
|
98
|
-
chat(modelId: OpenAIChatModelId
|
|
80
|
+
chat(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
99
81
|
/**
|
|
100
82
|
Creates an OpenAI responses API model for text generation.
|
|
101
83
|
*/
|
|
@@ -107,17 +89,17 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
107
89
|
/**
|
|
108
90
|
Creates a model for text embeddings.
|
|
109
91
|
*/
|
|
110
|
-
embedding(modelId: OpenAIEmbeddingModelId
|
|
92
|
+
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
111
93
|
/**
|
|
112
94
|
Creates a model for text embeddings.
|
|
113
95
|
|
|
114
96
|
@deprecated Use `textEmbeddingModel` instead.
|
|
115
97
|
*/
|
|
116
|
-
textEmbedding(modelId: OpenAIEmbeddingModelId
|
|
98
|
+
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
117
99
|
/**
|
|
118
100
|
Creates a model for text embeddings.
|
|
119
101
|
*/
|
|
120
|
-
textEmbeddingModel(modelId: OpenAIEmbeddingModelId
|
|
102
|
+
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
121
103
|
/**
|
|
122
104
|
Creates a model for image generation.
|
|
123
105
|
*/
|