@ai-sdk/openai 2.0.0-canary.11 → 2.0.0-canary.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +28 -0
- package/dist/index.d.mts +15 -63
- package/dist/index.d.ts +15 -63
- package/dist/index.js +299 -240
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +304 -244
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +51 -57
- package/dist/internal/index.d.ts +51 -57
- package/dist/internal/index.js +295 -227
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +297 -231
- package/dist/internal/index.mjs.map +1 -1
- package/internal.d.ts +1 -0
- package/package.json +5 -4
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,33 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.0.0-canary.13
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 177526b: chore(providers/openai-transcription): switch to providerOptions
|
|
8
|
+
- c15dfbf: feat (providers/openai): add gpt-image-1 model id to image settings
|
|
9
|
+
- 9bf7291: chore(providers/openai): enable structuredOutputs by default & switch to provider option
|
|
10
|
+
- 4617fab: chore(embedding-models): remove remaining settings
|
|
11
|
+
- Updated dependencies [9bf7291]
|
|
12
|
+
- Updated dependencies [4617fab]
|
|
13
|
+
- Updated dependencies [e030615]
|
|
14
|
+
- @ai-sdk/provider@2.0.0-canary.11
|
|
15
|
+
- @ai-sdk/provider-utils@3.0.0-canary.12
|
|
16
|
+
|
|
17
|
+
## 2.0.0-canary.12
|
|
18
|
+
|
|
19
|
+
### Patch Changes
|
|
20
|
+
|
|
21
|
+
- db72adc: chore(providers/openai): update completion model to use providerOptions
|
|
22
|
+
- 66962ed: fix(packages): export node10 compatible types
|
|
23
|
+
- 9301f86: refactor (image-model): rename `ImageModelV1` to `ImageModelV2`
|
|
24
|
+
- 7df7a25: feat (providers/openai): support gpt-image-1 image generation
|
|
25
|
+
- Updated dependencies [66962ed]
|
|
26
|
+
- Updated dependencies [9301f86]
|
|
27
|
+
- Updated dependencies [a3f768e]
|
|
28
|
+
- @ai-sdk/provider-utils@3.0.0-canary.11
|
|
29
|
+
- @ai-sdk/provider@2.0.0-canary.10
|
|
30
|
+
|
|
3
31
|
## 2.0.0-canary.11
|
|
4
32
|
|
|
5
33
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -1,48 +1,10 @@
|
|
|
1
|
-
import { LanguageModelV2, ProviderV2, EmbeddingModelV2,
|
|
1
|
+
import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { z } from 'zod';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
6
|
-
interface OpenAIChatSettings {
|
|
7
|
-
/**
|
|
8
|
-
Whether to use structured outputs. Defaults to false.
|
|
9
|
-
|
|
10
|
-
When enabled, tool calls and object generation will be strict and follow the provided schema.
|
|
11
|
-
*/
|
|
12
|
-
structuredOutputs?: boolean;
|
|
13
|
-
}
|
|
14
6
|
|
|
15
7
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
16
|
-
interface OpenAICompletionSettings {
|
|
17
|
-
/**
|
|
18
|
-
Echo back the prompt in addition to the completion.
|
|
19
|
-
*/
|
|
20
|
-
echo?: boolean;
|
|
21
|
-
/**
|
|
22
|
-
Modify the likelihood of specified tokens appearing in the completion.
|
|
23
|
-
|
|
24
|
-
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
25
|
-
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
26
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
27
|
-
the bias is added to the logits generated by the model prior to sampling.
|
|
28
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
|
29
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
|
30
|
-
should result in a ban or exclusive selection of the relevant token.
|
|
31
|
-
|
|
32
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
33
|
-
token from being generated.
|
|
34
|
-
*/
|
|
35
|
-
logitBias?: Record<number, number>;
|
|
36
|
-
/**
|
|
37
|
-
The suffix that comes after a completion of inserted text.
|
|
38
|
-
*/
|
|
39
|
-
suffix?: string;
|
|
40
|
-
/**
|
|
41
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
42
|
-
monitor and detect abuse. Learn more.
|
|
43
|
-
*/
|
|
44
|
-
user?: string;
|
|
45
|
-
}
|
|
46
8
|
|
|
47
9
|
type OpenAICompletionConfig = {
|
|
48
10
|
provider: string;
|
|
@@ -57,9 +19,9 @@ type OpenAICompletionConfig = {
|
|
|
57
19
|
declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
58
20
|
readonly specificationVersion = "v2";
|
|
59
21
|
readonly modelId: OpenAICompletionModelId;
|
|
60
|
-
readonly settings: OpenAICompletionSettings;
|
|
61
22
|
private readonly config;
|
|
62
|
-
|
|
23
|
+
private get providerOptionsName();
|
|
24
|
+
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
63
25
|
get provider(): string;
|
|
64
26
|
getSupportedUrls(): Promise<Record<string, RegExp[]>>;
|
|
65
27
|
private getArgs;
|
|
@@ -68,18 +30,8 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
|
68
30
|
}
|
|
69
31
|
|
|
70
32
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
71
|
-
interface OpenAIEmbeddingSettings {
|
|
72
|
-
/**
|
|
73
|
-
Override the maximum number of embeddings per call.
|
|
74
|
-
*/
|
|
75
|
-
maxEmbeddingsPerCall?: number;
|
|
76
|
-
/**
|
|
77
|
-
Override the parallelism of embedding calls.
|
|
78
|
-
*/
|
|
79
|
-
supportsParallelCalls?: boolean;
|
|
80
|
-
}
|
|
81
33
|
|
|
82
|
-
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
34
|
+
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
83
35
|
interface OpenAIImageSettings {
|
|
84
36
|
/**
|
|
85
37
|
Override the maximum number of images per call (default is dependent on the
|
|
@@ -115,17 +67,17 @@ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024
|
|
|
115
67
|
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
|
|
116
68
|
|
|
117
69
|
interface OpenAIProvider extends ProviderV2 {
|
|
118
|
-
(modelId: 'gpt-3.5-turbo-instruct'
|
|
119
|
-
(modelId: OpenAIChatModelId
|
|
70
|
+
(modelId: 'gpt-3.5-turbo-instruct'): OpenAICompletionLanguageModel;
|
|
71
|
+
(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
120
72
|
/**
|
|
121
73
|
Creates an OpenAI model for text generation.
|
|
122
74
|
*/
|
|
123
|
-
languageModel(modelId: 'gpt-3.5-turbo-instruct'
|
|
124
|
-
languageModel(modelId: OpenAIChatModelId
|
|
75
|
+
languageModel(modelId: 'gpt-3.5-turbo-instruct'): OpenAICompletionLanguageModel;
|
|
76
|
+
languageModel(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
125
77
|
/**
|
|
126
78
|
Creates an OpenAI chat model for text generation.
|
|
127
79
|
*/
|
|
128
|
-
chat(modelId: OpenAIChatModelId
|
|
80
|
+
chat(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
129
81
|
/**
|
|
130
82
|
Creates an OpenAI responses API model for text generation.
|
|
131
83
|
*/
|
|
@@ -133,29 +85,29 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
133
85
|
/**
|
|
134
86
|
Creates an OpenAI completion model for text generation.
|
|
135
87
|
*/
|
|
136
|
-
completion(modelId: OpenAICompletionModelId
|
|
88
|
+
completion(modelId: OpenAICompletionModelId): LanguageModelV2;
|
|
137
89
|
/**
|
|
138
90
|
Creates a model for text embeddings.
|
|
139
91
|
*/
|
|
140
|
-
embedding(modelId: OpenAIEmbeddingModelId
|
|
92
|
+
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
141
93
|
/**
|
|
142
94
|
Creates a model for text embeddings.
|
|
143
95
|
|
|
144
96
|
@deprecated Use `textEmbeddingModel` instead.
|
|
145
97
|
*/
|
|
146
|
-
textEmbedding(modelId: OpenAIEmbeddingModelId
|
|
98
|
+
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
147
99
|
/**
|
|
148
100
|
Creates a model for text embeddings.
|
|
149
101
|
*/
|
|
150
|
-
textEmbeddingModel(modelId: OpenAIEmbeddingModelId
|
|
102
|
+
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
151
103
|
/**
|
|
152
104
|
Creates a model for image generation.
|
|
153
105
|
*/
|
|
154
|
-
image(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings):
|
|
106
|
+
image(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV2;
|
|
155
107
|
/**
|
|
156
108
|
Creates a model for image generation.
|
|
157
109
|
*/
|
|
158
|
-
imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings):
|
|
110
|
+
imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV2;
|
|
159
111
|
/**
|
|
160
112
|
Creates a model for transcription.
|
|
161
113
|
*/
|
package/dist/index.d.ts
CHANGED
|
@@ -1,48 +1,10 @@
|
|
|
1
|
-
import { LanguageModelV2, ProviderV2, EmbeddingModelV2,
|
|
1
|
+
import { LanguageModelV2, ProviderV2, EmbeddingModelV2, ImageModelV2, TranscriptionModelV1, SpeechModelV1 } from '@ai-sdk/provider';
|
|
2
2
|
import { FetchFunction } from '@ai-sdk/provider-utils';
|
|
3
3
|
import { z } from 'zod';
|
|
4
4
|
|
|
5
5
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024-09-12' | 'o1-preview' | 'o1-preview-2024-09-12' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'o4-mini' | 'o4-mini-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-audio-preview' | 'gpt-4o-audio-preview-2024-10-01' | 'gpt-4o-audio-preview-2024-12-17' | 'gpt-4o-search-preview' | 'gpt-4o-search-preview-2025-03-11' | 'gpt-4o-mini-search-preview' | 'gpt-4o-mini-search-preview-2025-03-11' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo-preview' | 'gpt-4-0125-preview' | 'gpt-4-1106-preview' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
6
|
-
interface OpenAIChatSettings {
|
|
7
|
-
/**
|
|
8
|
-
Whether to use structured outputs. Defaults to false.
|
|
9
|
-
|
|
10
|
-
When enabled, tool calls and object generation will be strict and follow the provided schema.
|
|
11
|
-
*/
|
|
12
|
-
structuredOutputs?: boolean;
|
|
13
|
-
}
|
|
14
6
|
|
|
15
7
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
16
|
-
interface OpenAICompletionSettings {
|
|
17
|
-
/**
|
|
18
|
-
Echo back the prompt in addition to the completion.
|
|
19
|
-
*/
|
|
20
|
-
echo?: boolean;
|
|
21
|
-
/**
|
|
22
|
-
Modify the likelihood of specified tokens appearing in the completion.
|
|
23
|
-
|
|
24
|
-
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
25
|
-
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
26
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
27
|
-
the bias is added to the logits generated by the model prior to sampling.
|
|
28
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
|
29
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
|
30
|
-
should result in a ban or exclusive selection of the relevant token.
|
|
31
|
-
|
|
32
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
33
|
-
token from being generated.
|
|
34
|
-
*/
|
|
35
|
-
logitBias?: Record<number, number>;
|
|
36
|
-
/**
|
|
37
|
-
The suffix that comes after a completion of inserted text.
|
|
38
|
-
*/
|
|
39
|
-
suffix?: string;
|
|
40
|
-
/**
|
|
41
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
42
|
-
monitor and detect abuse. Learn more.
|
|
43
|
-
*/
|
|
44
|
-
user?: string;
|
|
45
|
-
}
|
|
46
8
|
|
|
47
9
|
type OpenAICompletionConfig = {
|
|
48
10
|
provider: string;
|
|
@@ -57,9 +19,9 @@ type OpenAICompletionConfig = {
|
|
|
57
19
|
declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
58
20
|
readonly specificationVersion = "v2";
|
|
59
21
|
readonly modelId: OpenAICompletionModelId;
|
|
60
|
-
readonly settings: OpenAICompletionSettings;
|
|
61
22
|
private readonly config;
|
|
62
|
-
|
|
23
|
+
private get providerOptionsName();
|
|
24
|
+
constructor(modelId: OpenAICompletionModelId, config: OpenAICompletionConfig);
|
|
63
25
|
get provider(): string;
|
|
64
26
|
getSupportedUrls(): Promise<Record<string, RegExp[]>>;
|
|
65
27
|
private getArgs;
|
|
@@ -68,18 +30,8 @@ declare class OpenAICompletionLanguageModel implements LanguageModelV2 {
|
|
|
68
30
|
}
|
|
69
31
|
|
|
70
32
|
type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large' | 'text-embedding-ada-002' | (string & {});
|
|
71
|
-
interface OpenAIEmbeddingSettings {
|
|
72
|
-
/**
|
|
73
|
-
Override the maximum number of embeddings per call.
|
|
74
|
-
*/
|
|
75
|
-
maxEmbeddingsPerCall?: number;
|
|
76
|
-
/**
|
|
77
|
-
Override the parallelism of embedding calls.
|
|
78
|
-
*/
|
|
79
|
-
supportsParallelCalls?: boolean;
|
|
80
|
-
}
|
|
81
33
|
|
|
82
|
-
type OpenAIImageModelId = 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
34
|
+
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
83
35
|
interface OpenAIImageSettings {
|
|
84
36
|
/**
|
|
85
37
|
Override the maximum number of images per call (default is dependent on the
|
|
@@ -115,17 +67,17 @@ type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o1-mini' | 'o1-mini-2024
|
|
|
115
67
|
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
|
|
116
68
|
|
|
117
69
|
interface OpenAIProvider extends ProviderV2 {
|
|
118
|
-
(modelId: 'gpt-3.5-turbo-instruct'
|
|
119
|
-
(modelId: OpenAIChatModelId
|
|
70
|
+
(modelId: 'gpt-3.5-turbo-instruct'): OpenAICompletionLanguageModel;
|
|
71
|
+
(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
120
72
|
/**
|
|
121
73
|
Creates an OpenAI model for text generation.
|
|
122
74
|
*/
|
|
123
|
-
languageModel(modelId: 'gpt-3.5-turbo-instruct'
|
|
124
|
-
languageModel(modelId: OpenAIChatModelId
|
|
75
|
+
languageModel(modelId: 'gpt-3.5-turbo-instruct'): OpenAICompletionLanguageModel;
|
|
76
|
+
languageModel(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
125
77
|
/**
|
|
126
78
|
Creates an OpenAI chat model for text generation.
|
|
127
79
|
*/
|
|
128
|
-
chat(modelId: OpenAIChatModelId
|
|
80
|
+
chat(modelId: OpenAIChatModelId): LanguageModelV2;
|
|
129
81
|
/**
|
|
130
82
|
Creates an OpenAI responses API model for text generation.
|
|
131
83
|
*/
|
|
@@ -133,29 +85,29 @@ interface OpenAIProvider extends ProviderV2 {
|
|
|
133
85
|
/**
|
|
134
86
|
Creates an OpenAI completion model for text generation.
|
|
135
87
|
*/
|
|
136
|
-
completion(modelId: OpenAICompletionModelId
|
|
88
|
+
completion(modelId: OpenAICompletionModelId): LanguageModelV2;
|
|
137
89
|
/**
|
|
138
90
|
Creates a model for text embeddings.
|
|
139
91
|
*/
|
|
140
|
-
embedding(modelId: OpenAIEmbeddingModelId
|
|
92
|
+
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
141
93
|
/**
|
|
142
94
|
Creates a model for text embeddings.
|
|
143
95
|
|
|
144
96
|
@deprecated Use `textEmbeddingModel` instead.
|
|
145
97
|
*/
|
|
146
|
-
textEmbedding(modelId: OpenAIEmbeddingModelId
|
|
98
|
+
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
147
99
|
/**
|
|
148
100
|
Creates a model for text embeddings.
|
|
149
101
|
*/
|
|
150
|
-
textEmbeddingModel(modelId: OpenAIEmbeddingModelId
|
|
102
|
+
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV2<string>;
|
|
151
103
|
/**
|
|
152
104
|
Creates a model for image generation.
|
|
153
105
|
*/
|
|
154
|
-
image(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings):
|
|
106
|
+
image(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV2;
|
|
155
107
|
/**
|
|
156
108
|
Creates a model for image generation.
|
|
157
109
|
*/
|
|
158
|
-
imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings):
|
|
110
|
+
imageModel(modelId: OpenAIImageModelId, settings?: OpenAIImageSettings): ImageModelV2;
|
|
159
111
|
/**
|
|
160
112
|
Creates a model for transcription.
|
|
161
113
|
*/
|