@zenning/openai 2.0.29 → 2.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +148 -0
- package/dist/index.d.mts +164 -43
- package/dist/index.d.ts +164 -43
- package/dist/index.js +1120 -784
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +1067 -730
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +352 -36
- package/dist/internal/index.d.ts +352 -36
- package/dist/internal/index.js +1075 -753
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +1008 -697
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +6 -5
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,153 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 2.1.0-beta.11
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 0adc679: feat(provider): shared spec v3
|
|
8
|
+
- 2b0caef: feat(provider/openai): preview image generation results
|
|
9
|
+
- Updated dependencies [0adc679]
|
|
10
|
+
- Updated dependencies [2b0caef]
|
|
11
|
+
- @ai-sdk/provider-utils@3.1.0-beta.6
|
|
12
|
+
- @ai-sdk/provider@2.1.0-beta.4
|
|
13
|
+
|
|
14
|
+
## 2.1.0-beta.10
|
|
15
|
+
|
|
16
|
+
### Patch Changes
|
|
17
|
+
|
|
18
|
+
- d64ece9: enables image_generation capabilities in the Azure provider through the Responses API.
|
|
19
|
+
|
|
20
|
+
## 2.1.0-beta.9
|
|
21
|
+
|
|
22
|
+
### Patch Changes
|
|
23
|
+
|
|
24
|
+
- 9a51b92: support OPENAI_BASE_URL env
|
|
25
|
+
|
|
26
|
+
## 2.1.0-beta.8
|
|
27
|
+
|
|
28
|
+
### Patch Changes
|
|
29
|
+
|
|
30
|
+
- 4122d2a: feat(provider/openai): add gpt-5-codex model id
|
|
31
|
+
- 3997a42: feat(provider/openai): local shell tool
|
|
32
|
+
- cb4d238: The built in Code Interpreter tool input code is streamed in `tool-input-<start/delta/end>` chunks.
|
|
33
|
+
|
|
34
|
+
## 2.1.0-beta.7
|
|
35
|
+
|
|
36
|
+
### Patch Changes
|
|
37
|
+
|
|
38
|
+
- 77f2b20: enables code_interpreter and file_search capabilities in the Azure provider through the Responses API
|
|
39
|
+
- 8dac895: feat: `LanguageModelV3`
|
|
40
|
+
- 10c1322: fix: moved dependency `@ai-sdk/test-server` to devDependencies
|
|
41
|
+
- Updated dependencies [8dac895]
|
|
42
|
+
- @ai-sdk/provider-utils@3.1.0-beta.5
|
|
43
|
+
- @ai-sdk/provider@2.1.0-beta.3
|
|
44
|
+
|
|
45
|
+
## 2.1.0-beta.6
|
|
46
|
+
|
|
47
|
+
### Patch Changes
|
|
48
|
+
|
|
49
|
+
- fe49278: feat(provider/openai): only send item references for reasoning when store: true
|
|
50
|
+
|
|
51
|
+
## 2.1.0-beta.5
|
|
52
|
+
|
|
53
|
+
### Patch Changes
|
|
54
|
+
|
|
55
|
+
- 4616b86: chore: update zod peer depenedency version
|
|
56
|
+
- Updated dependencies [4616b86]
|
|
57
|
+
- @ai-sdk/provider-utils@3.1.0-beta.4
|
|
58
|
+
|
|
59
|
+
## 2.1.0-beta.4
|
|
60
|
+
|
|
61
|
+
### Patch Changes
|
|
62
|
+
|
|
63
|
+
- ed329cb: feat: `Provider-V3`
|
|
64
|
+
- 522f6b8: feat: `ImageModelV3`
|
|
65
|
+
- Updated dependencies [ed329cb]
|
|
66
|
+
- Updated dependencies [522f6b8]
|
|
67
|
+
- @ai-sdk/provider@2.1.0-beta.2
|
|
68
|
+
- @ai-sdk/provider-utils@3.1.0-beta.3
|
|
69
|
+
|
|
70
|
+
## 2.1.0-beta.3
|
|
71
|
+
|
|
72
|
+
### Patch Changes
|
|
73
|
+
|
|
74
|
+
- 2e86082: feat(provider/openai): `OpenAIChatLanguageModelOptions` type
|
|
75
|
+
|
|
76
|
+
```ts
|
|
77
|
+
import { openai, type OpenAIChatLanguageModelOptions } from '@ai-sdk/openai';
|
|
78
|
+
import { generateText } from 'ai';
|
|
79
|
+
|
|
80
|
+
await generateText({
|
|
81
|
+
model: openai.chat('gpt-4o'),
|
|
82
|
+
prompt: 'Invent a new holiday and describe its traditions.',
|
|
83
|
+
providerOptions: {
|
|
84
|
+
openai: {
|
|
85
|
+
user: 'user-123',
|
|
86
|
+
} satisfies OpenAIChatLanguageModelOptions,
|
|
87
|
+
},
|
|
88
|
+
});
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
## 2.1.0-beta.2
|
|
92
|
+
|
|
93
|
+
### Patch Changes
|
|
94
|
+
|
|
95
|
+
- 4920119: fix the "incomplete_details" key from nullable to nullish for openai compatibility
|
|
96
|
+
- 0c4822d: feat: `EmbeddingModelV3`
|
|
97
|
+
- 1cad0ab: feat: add provider version to user-agent header
|
|
98
|
+
- Updated dependencies [0c4822d]
|
|
99
|
+
- @ai-sdk/provider@2.1.0-beta.1
|
|
100
|
+
- @ai-sdk/provider-utils@3.1.0-beta.2
|
|
101
|
+
|
|
102
|
+
## 2.1.0-beta.1
|
|
103
|
+
|
|
104
|
+
### Patch Changes
|
|
105
|
+
|
|
106
|
+
- Updated dependencies [953d0f2]
|
|
107
|
+
- Updated dependencies [cbb1d35]
|
|
108
|
+
- @ai-sdk/test-server@1.0.0-beta.0
|
|
109
|
+
- @ai-sdk/provider-utils@3.1.0-beta.1
|
|
110
|
+
|
|
111
|
+
## 2.1.0-beta.0
|
|
112
|
+
|
|
113
|
+
### Minor Changes
|
|
114
|
+
|
|
115
|
+
- 78928cb: release: start 5.1 beta
|
|
116
|
+
|
|
117
|
+
### Patch Changes
|
|
118
|
+
|
|
119
|
+
- Updated dependencies [78928cb]
|
|
120
|
+
- @ai-sdk/provider@2.1.0-beta.0
|
|
121
|
+
- @ai-sdk/provider-utils@3.1.0-beta.0
|
|
122
|
+
|
|
123
|
+
## 2.0.32
|
|
124
|
+
|
|
125
|
+
### Patch Changes
|
|
126
|
+
|
|
127
|
+
- 1cf857d: fix(provider/openai): remove provider-executed tools from chat completions model
|
|
128
|
+
- 01de47f: feat(provider/openai): rework file search tool
|
|
129
|
+
|
|
130
|
+
## 2.0.31
|
|
131
|
+
|
|
132
|
+
### Patch Changes
|
|
133
|
+
|
|
134
|
+
- bb94467: feat(provider/openai): add maxToolCalls provider option
|
|
135
|
+
- 4a2b70e: feat(provider/openai): send item references for provider-executed tool results
|
|
136
|
+
- 643711d: feat (provider/openai): provider defined image generation tool support
|
|
137
|
+
|
|
138
|
+
## 2.0.30
|
|
139
|
+
|
|
140
|
+
### Patch Changes
|
|
141
|
+
|
|
142
|
+
- Updated dependencies [0294b58]
|
|
143
|
+
- @ai-sdk/provider-utils@3.0.9
|
|
144
|
+
|
|
145
|
+
## 2.0.29
|
|
146
|
+
|
|
147
|
+
### Patch Changes
|
|
148
|
+
|
|
149
|
+
- 4235eb3: feat(provider/openai): code interpreter tool calls and results
|
|
150
|
+
|
|
3
151
|
## 2.0.28
|
|
4
152
|
|
|
5
153
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -1,9 +1,40 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import * as
|
|
3
|
-
import { FetchFunction } from '@
|
|
1
|
+
import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, TranscriptionModelV2, SpeechModelV2 } from '@zenning/provider';
|
|
2
|
+
import * as _zenning_provider_utils from '@zenning/provider-utils';
|
|
3
|
+
import { FetchFunction } from '@zenning/provider-utils';
|
|
4
4
|
import { z } from 'zod/v4';
|
|
5
5
|
|
|
6
6
|
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
|
|
7
|
+
declare const openaiChatLanguageModelOptions: z.ZodObject<{
|
|
8
|
+
logitBias: z.ZodOptional<z.ZodRecord<z.ZodCoercedNumber<string>, z.ZodNumber>>;
|
|
9
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
10
|
+
parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
|
|
11
|
+
user: z.ZodOptional<z.ZodString>;
|
|
12
|
+
reasoningEffort: z.ZodOptional<z.ZodEnum<{
|
|
13
|
+
minimal: "minimal";
|
|
14
|
+
low: "low";
|
|
15
|
+
medium: "medium";
|
|
16
|
+
high: "high";
|
|
17
|
+
}>>;
|
|
18
|
+
maxCompletionTokens: z.ZodOptional<z.ZodNumber>;
|
|
19
|
+
store: z.ZodOptional<z.ZodBoolean>;
|
|
20
|
+
metadata: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodString>>;
|
|
21
|
+
prediction: z.ZodOptional<z.ZodRecord<z.ZodString, z.ZodAny>>;
|
|
22
|
+
structuredOutputs: z.ZodOptional<z.ZodBoolean>;
|
|
23
|
+
serviceTier: z.ZodOptional<z.ZodEnum<{
|
|
24
|
+
auto: "auto";
|
|
25
|
+
flex: "flex";
|
|
26
|
+
priority: "priority";
|
|
27
|
+
}>>;
|
|
28
|
+
strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
|
|
29
|
+
textVerbosity: z.ZodOptional<z.ZodEnum<{
|
|
30
|
+
low: "low";
|
|
31
|
+
medium: "medium";
|
|
32
|
+
high: "high";
|
|
33
|
+
}>>;
|
|
34
|
+
promptCacheKey: z.ZodOptional<z.ZodString>;
|
|
35
|
+
safetyIdentifier: z.ZodOptional<z.ZodString>;
|
|
36
|
+
}, z.core.$strip>;
|
|
37
|
+
type OpenAIChatLanguageModelOptions = z.infer<typeof openaiChatLanguageModelOptions>;
|
|
7
38
|
|
|
8
39
|
type OpenAICompletionModelId = 'gpt-3.5-turbo-instruct' | (string & {});
|
|
9
40
|
|
|
@@ -11,7 +42,7 @@ type OpenAIEmbeddingModelId = 'text-embedding-3-small' | 'text-embedding-3-large
|
|
|
11
42
|
|
|
12
43
|
type OpenAIImageModelId = 'gpt-image-1' | 'dall-e-3' | 'dall-e-2' | (string & {});
|
|
13
44
|
|
|
14
|
-
declare const webSearchToolFactory:
|
|
45
|
+
declare const webSearchToolFactory: _zenning_provider_utils.ProviderDefinedToolFactory<{}, {
|
|
15
46
|
/**
|
|
16
47
|
* Filters for the search.
|
|
17
48
|
*/
|
|
@@ -57,6 +88,37 @@ declare const webSearchToolFactory: _ai_sdk_provider_utils.ProviderDefinedToolFa
|
|
|
57
88
|
};
|
|
58
89
|
}>;
|
|
59
90
|
|
|
91
|
+
/**
|
|
92
|
+
* A filter used to compare a specified attribute key to a given value using a defined comparison operation.
|
|
93
|
+
*/
|
|
94
|
+
type OpenAIResponsesFileSearchToolComparisonFilter = {
|
|
95
|
+
/**
|
|
96
|
+
* The key to compare against the value.
|
|
97
|
+
*/
|
|
98
|
+
key: string;
|
|
99
|
+
/**
|
|
100
|
+
* Specifies the comparison operator: eq, ne, gt, gte, lt, lte.
|
|
101
|
+
*/
|
|
102
|
+
type: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte';
|
|
103
|
+
/**
|
|
104
|
+
* The value to compare against the attribute key; supports string, number, or boolean types.
|
|
105
|
+
*/
|
|
106
|
+
value: string | number | boolean;
|
|
107
|
+
};
|
|
108
|
+
/**
|
|
109
|
+
* Combine multiple filters using and or or.
|
|
110
|
+
*/
|
|
111
|
+
type OpenAIResponsesFileSearchToolCompoundFilter = {
|
|
112
|
+
/**
|
|
113
|
+
* Type of operation: and or or.
|
|
114
|
+
*/
|
|
115
|
+
type: 'and' | 'or';
|
|
116
|
+
/**
|
|
117
|
+
* Array of filters to combine. Items can be ComparisonFilter or CompoundFilter.
|
|
118
|
+
*/
|
|
119
|
+
filters: Array<OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter>;
|
|
120
|
+
};
|
|
121
|
+
|
|
60
122
|
declare const openaiTools: {
|
|
61
123
|
/**
|
|
62
124
|
* The Code Interpreter tool allows models to write and run Python code in a
|
|
@@ -71,7 +133,7 @@ declare const openaiTools: {
|
|
|
71
133
|
container?: string | {
|
|
72
134
|
fileIds?: string[];
|
|
73
135
|
};
|
|
74
|
-
}) =>
|
|
136
|
+
}) => _zenning_provider_utils.Tool<{
|
|
75
137
|
code?: string | null;
|
|
76
138
|
containerId: string;
|
|
77
139
|
}, {
|
|
@@ -95,23 +157,79 @@ declare const openaiTools: {
|
|
|
95
157
|
* @param ranking - The ranking options to use for the file search.
|
|
96
158
|
* @param filters - The filters to use for the file search.
|
|
97
159
|
*/
|
|
98
|
-
fileSearch:
|
|
99
|
-
|
|
160
|
+
fileSearch: _zenning_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{}, {
|
|
161
|
+
queries: string[];
|
|
162
|
+
results: null | {
|
|
163
|
+
attributes: Record<string, unknown>;
|
|
164
|
+
fileId: string;
|
|
165
|
+
filename: string;
|
|
166
|
+
score: number;
|
|
167
|
+
text: string;
|
|
168
|
+
}[];
|
|
100
169
|
}, {
|
|
101
|
-
vectorStoreIds
|
|
170
|
+
vectorStoreIds: string[];
|
|
102
171
|
maxNumResults?: number;
|
|
103
172
|
ranking?: {
|
|
104
|
-
ranker?:
|
|
173
|
+
ranker?: string;
|
|
174
|
+
scoreThreshold?: number;
|
|
105
175
|
};
|
|
106
|
-
filters?:
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
176
|
+
filters?: OpenAIResponsesFileSearchToolComparisonFilter | OpenAIResponsesFileSearchToolCompoundFilter;
|
|
177
|
+
}>;
|
|
178
|
+
/**
|
|
179
|
+
* The image generation tool allows you to generate images using a text prompt,
|
|
180
|
+
* and optionally image inputs. It leverages the GPT Image model,
|
|
181
|
+
* and automatically optimizes text inputs for improved performance.
|
|
182
|
+
*
|
|
183
|
+
* Must have name `image_generation`.
|
|
184
|
+
*
|
|
185
|
+
* @param background - Background type for the generated image. One of 'auto', 'opaque', or 'transparent'.
|
|
186
|
+
* @param inputFidelity - Input fidelity for the generated image. One of 'low' or 'high'.
|
|
187
|
+
* @param inputImageMask - Optional mask for inpainting. Contains fileId and/or imageUrl.
|
|
188
|
+
* @param model - The image generation model to use. Default: gpt-image-1.
|
|
189
|
+
* @param moderation - Moderation level for the generated image. Default: 'auto'.
|
|
190
|
+
* @param outputCompression - Compression level for the output image (0-100).
|
|
191
|
+
* @param outputFormat - The output format of the generated image. One of 'png', 'jpeg', or 'webp'.
|
|
192
|
+
* @param partialImages - Number of partial images to generate in streaming mode (0-3).
|
|
193
|
+
* @param quality - The quality of the generated image. One of 'auto', 'low', 'medium', or 'high'.
|
|
194
|
+
* @param size - The size of the generated image. One of 'auto', '1024x1024', '1024x1536', or '1536x1024'.
|
|
195
|
+
*/
|
|
196
|
+
imageGeneration: (args?: {
|
|
197
|
+
background?: "auto" | "opaque" | "transparent";
|
|
198
|
+
inputFidelity?: "low" | "high";
|
|
199
|
+
inputImageMask?: {
|
|
200
|
+
fileId?: string;
|
|
201
|
+
imageUrl?: string;
|
|
113
202
|
};
|
|
203
|
+
model?: string;
|
|
204
|
+
moderation?: "auto";
|
|
205
|
+
outputCompression?: number;
|
|
206
|
+
outputFormat?: "png" | "jpeg" | "webp";
|
|
207
|
+
partialImages?: number;
|
|
208
|
+
quality?: "auto" | "low" | "medium" | "high";
|
|
209
|
+
size?: "auto" | "1024x1024" | "1024x1536" | "1536x1024";
|
|
210
|
+
}) => _zenning_provider_utils.Tool<{}, {
|
|
211
|
+
result: string;
|
|
114
212
|
}>;
|
|
213
|
+
/**
|
|
214
|
+
* Local shell is a tool that allows agents to run shell commands locally
|
|
215
|
+
* on a machine you or the user provides.
|
|
216
|
+
*
|
|
217
|
+
* Supported models: `gpt-5-codex` and `codex-mini-latest`
|
|
218
|
+
*
|
|
219
|
+
* Must have name `local_shell`.
|
|
220
|
+
*/
|
|
221
|
+
localShell: _zenning_provider_utils.ProviderDefinedToolFactoryWithOutputSchema<{
|
|
222
|
+
action: {
|
|
223
|
+
type: "exec";
|
|
224
|
+
command: string[];
|
|
225
|
+
timeoutMs?: number;
|
|
226
|
+
user?: string;
|
|
227
|
+
workingDirectory?: string;
|
|
228
|
+
env?: Record<string, string>;
|
|
229
|
+
};
|
|
230
|
+
}, {
|
|
231
|
+
output: string;
|
|
232
|
+
}, {}>;
|
|
115
233
|
/**
|
|
116
234
|
* Web search allows models to access up-to-date information from the internet
|
|
117
235
|
* and provide answers with sourced citations.
|
|
@@ -123,7 +241,7 @@ declare const openaiTools: {
|
|
|
123
241
|
*
|
|
124
242
|
* @deprecated Use `webSearch` instead.
|
|
125
243
|
*/
|
|
126
|
-
webSearchPreview:
|
|
244
|
+
webSearchPreview: _zenning_provider_utils.ProviderDefinedToolFactory<{}, {
|
|
127
245
|
searchContextSize?: "low" | "medium" | "high";
|
|
128
246
|
userLocation?: {
|
|
129
247
|
type: "approximate";
|
|
@@ -143,53 +261,53 @@ declare const openaiTools: {
|
|
|
143
261
|
* @param searchContextSize - The search context size to use for the web search.
|
|
144
262
|
* @param userLocation - The user location to use for the web search.
|
|
145
263
|
*/
|
|
146
|
-
webSearch: (args?: Parameters<typeof webSearchToolFactory>[0]) =>
|
|
264
|
+
webSearch: (args?: Parameters<typeof webSearchToolFactory>[0]) => _zenning_provider_utils.Tool<{}, unknown>;
|
|
147
265
|
};
|
|
148
266
|
|
|
149
|
-
type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
267
|
+
type OpenAIResponsesModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | (string & {});
|
|
150
268
|
|
|
151
269
|
type OpenAISpeechModelId = 'tts-1' | 'tts-1-hd' | 'gpt-4o-mini-tts' | (string & {});
|
|
152
270
|
|
|
153
271
|
type OpenAITranscriptionModelId = 'whisper-1' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe' | (string & {});
|
|
154
272
|
|
|
155
|
-
interface OpenAIProvider extends
|
|
156
|
-
(modelId: OpenAIResponsesModelId):
|
|
273
|
+
interface OpenAIProvider extends ProviderV3 {
|
|
274
|
+
(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
157
275
|
/**
|
|
158
276
|
Creates an OpenAI model for text generation.
|
|
159
277
|
*/
|
|
160
|
-
languageModel(modelId: OpenAIResponsesModelId):
|
|
278
|
+
languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
161
279
|
/**
|
|
162
280
|
Creates an OpenAI chat model for text generation.
|
|
163
281
|
*/
|
|
164
|
-
chat(modelId: OpenAIChatModelId):
|
|
282
|
+
chat(modelId: OpenAIChatModelId): LanguageModelV3;
|
|
165
283
|
/**
|
|
166
284
|
Creates an OpenAI responses API model for text generation.
|
|
167
285
|
*/
|
|
168
|
-
responses(modelId: OpenAIResponsesModelId):
|
|
286
|
+
responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
169
287
|
/**
|
|
170
288
|
Creates an OpenAI completion model for text generation.
|
|
171
289
|
*/
|
|
172
|
-
completion(modelId: OpenAICompletionModelId):
|
|
290
|
+
completion(modelId: OpenAICompletionModelId): LanguageModelV3;
|
|
173
291
|
/**
|
|
174
292
|
Creates a model for text embeddings.
|
|
175
293
|
*/
|
|
176
|
-
embedding(modelId: OpenAIEmbeddingModelId):
|
|
294
|
+
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3<string>;
|
|
177
295
|
/**
|
|
178
296
|
Creates a model for text embeddings.
|
|
179
297
|
*/
|
|
180
|
-
textEmbedding(modelId: OpenAIEmbeddingModelId):
|
|
298
|
+
textEmbedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3<string>;
|
|
181
299
|
/**
|
|
182
300
|
Creates a model for text embeddings.
|
|
183
301
|
*/
|
|
184
|
-
textEmbeddingModel(modelId: OpenAIEmbeddingModelId):
|
|
302
|
+
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3<string>;
|
|
185
303
|
/**
|
|
186
304
|
Creates a model for image generation.
|
|
187
305
|
*/
|
|
188
|
-
image(modelId: OpenAIImageModelId):
|
|
306
|
+
image(modelId: OpenAIImageModelId): ImageModelV3;
|
|
189
307
|
/**
|
|
190
308
|
Creates a model for image generation.
|
|
191
309
|
*/
|
|
192
|
-
imageModel(modelId: OpenAIImageModelId):
|
|
310
|
+
imageModel(modelId: OpenAIImageModelId): ImageModelV3;
|
|
193
311
|
/**
|
|
194
312
|
Creates a model for transcription.
|
|
195
313
|
*/
|
|
@@ -244,34 +362,37 @@ Default OpenAI provider instance.
|
|
|
244
362
|
declare const openai: OpenAIProvider;
|
|
245
363
|
|
|
246
364
|
declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
|
|
365
|
+
include: z.ZodOptional<z.ZodNullable<z.ZodArray<z.ZodEnum<{
|
|
366
|
+
"file_search_call.results": "file_search_call.results";
|
|
367
|
+
"message.output_text.logprobs": "message.output_text.logprobs";
|
|
368
|
+
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
369
|
+
}>>>>;
|
|
370
|
+
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
371
|
+
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
372
|
+
maxToolCalls: z.ZodOptional<z.ZodNullable<z.ZodNumber>>;
|
|
247
373
|
metadata: z.ZodOptional<z.ZodNullable<z.ZodAny>>;
|
|
248
374
|
parallelToolCalls: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
249
375
|
previousResponseId: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
250
|
-
|
|
251
|
-
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
376
|
+
promptCacheKey: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
252
377
|
reasoningEffort: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
253
|
-
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
254
|
-
instructions: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
255
378
|
reasoningSummary: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
379
|
+
safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
256
380
|
serviceTier: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
257
381
|
auto: "auto";
|
|
258
382
|
flex: "flex";
|
|
259
383
|
priority: "priority";
|
|
260
384
|
}>>>;
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
"message.output_text.logprobs": "message.output_text.logprobs";
|
|
264
|
-
"reasoning.encrypted_content": "reasoning.encrypted_content";
|
|
265
|
-
}>>>>;
|
|
385
|
+
store: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
386
|
+
strictJsonSchema: z.ZodOptional<z.ZodNullable<z.ZodBoolean>>;
|
|
266
387
|
textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
|
|
267
388
|
low: "low";
|
|
268
389
|
medium: "medium";
|
|
269
390
|
high: "high";
|
|
270
391
|
}>>>;
|
|
271
|
-
|
|
272
|
-
safetyIdentifier: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
273
|
-
logprobs: z.ZodOptional<z.ZodUnion<readonly [z.ZodBoolean, z.ZodNumber]>>;
|
|
392
|
+
user: z.ZodOptional<z.ZodNullable<z.ZodString>>;
|
|
274
393
|
}, z.core.$strip>;
|
|
275
394
|
type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
|
|
276
395
|
|
|
277
|
-
|
|
396
|
+
declare const VERSION: string;
|
|
397
|
+
|
|
398
|
+
export { type OpenAIChatLanguageModelOptions, type OpenAIProvider, type OpenAIProviderSettings, type OpenAIResponsesProviderOptions, VERSION, createOpenAI, openai };
|