@ai-sdk/openai 3.0.19 → 3.0.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/dist/index.d.mts +27 -27
- package/dist/index.d.ts +27 -27
- package/dist/index.js +32 -32
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +32 -32
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +31 -31
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +31 -31
- package/dist/internal/index.mjs.map +1 -1
- package/docs/03-openai.mdx +65 -6
- package/package.json +2 -2
- package/src/completion/openai-completion-options.ts +28 -28
- package/src/embedding/openai-embedding-options.ts +6 -6
- package/src/openai-provider.ts +27 -27
package/docs/03-openai.mdx
CHANGED
|
@@ -186,6 +186,9 @@ The following provider options are available:
|
|
|
186
186
|
They can be used to change the system or developer message when continuing a conversation using the `previousResponseId` option.
|
|
187
187
|
Defaults to `undefined`.
|
|
188
188
|
|
|
189
|
+
- **logprobs** _boolean | number_
|
|
190
|
+
Return the log probabilities of the tokens. Including logprobs will increase the response size and can slow down response times. However, it can be useful to better understand how the model is behaving. Setting to `true` returns the log probabilities of the tokens that were generated. Setting to a number (1-20) returns the log probabilities of the top n tokens that were generated.
|
|
191
|
+
|
|
189
192
|
- **user** _string_
|
|
190
193
|
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Defaults to `undefined`.
|
|
191
194
|
|
|
@@ -374,6 +377,9 @@ const result = await generateText({
|
|
|
374
377
|
city: 'San Francisco',
|
|
375
378
|
region: 'California',
|
|
376
379
|
},
|
|
380
|
+
filters: {
|
|
381
|
+
allowedDomains: ['sfchronicle.com', 'sfgate.com'],
|
|
382
|
+
},
|
|
377
383
|
}),
|
|
378
384
|
},
|
|
379
385
|
// Force web search tool (optional):
|
|
@@ -393,6 +399,14 @@ for (const toolResult of result.toolResults) {
|
|
|
393
399
|
}
|
|
394
400
|
```
|
|
395
401
|
|
|
402
|
+
The web search tool supports the following configuration options:
|
|
403
|
+
|
|
404
|
+
- **externalWebAccess** _boolean_ - Whether to use external web access for fetching live content. Defaults to `true`.
|
|
405
|
+
- **searchContextSize** _'low' | 'medium' | 'high'_ - Controls the amount of context used for the search. Higher values provide more comprehensive results but may have higher latency and cost.
|
|
406
|
+
- **userLocation** - Optional location information to provide geographically relevant results. Includes `type` (always `'approximate'`), `country`, `city`, `region`, and `timezone`.
|
|
407
|
+
- **filters** - Optional filter configuration to restrict search results.
|
|
408
|
+
- **allowedDomains** _string[]_ - Array of allowed domains for the search. Subdomains of the provided domains are automatically included.
|
|
409
|
+
|
|
396
410
|
For detailed information on configuration options see the [OpenAI Web Search Tool documentation](https://platform.openai.com/docs/guides/tools-web-search?api-mode=responses).
|
|
397
411
|
|
|
398
412
|
#### File Search Tool
|
|
@@ -430,6 +444,49 @@ const result = await generateText({
|
|
|
430
444
|
});
|
|
431
445
|
```
|
|
432
446
|
|
|
447
|
+
The file search tool supports filtering with both comparison and compound filters:
|
|
448
|
+
|
|
449
|
+
**Comparison filters** - Filter by a single attribute:
|
|
450
|
+
|
|
451
|
+
- `eq` - Equal to
|
|
452
|
+
- `ne` - Not equal to
|
|
453
|
+
- `gt` - Greater than
|
|
454
|
+
- `gte` - Greater than or equal to
|
|
455
|
+
- `lt` - Less than
|
|
456
|
+
- `lte` - Less than or equal to
|
|
457
|
+
- `in` - Value is in array
|
|
458
|
+
- `nin` - Value is not in array
|
|
459
|
+
|
|
460
|
+
```ts
|
|
461
|
+
// Single comparison filter
|
|
462
|
+
filters: { key: 'year', type: 'gte', value: 2023 }
|
|
463
|
+
|
|
464
|
+
// Filter with array values
|
|
465
|
+
filters: { key: 'status', type: 'in', value: ['published', 'reviewed'] }
|
|
466
|
+
```
|
|
467
|
+
|
|
468
|
+
**Compound filters** - Combine multiple filters with `and` or `or`:
|
|
469
|
+
|
|
470
|
+
```ts
|
|
471
|
+
// Compound filter with AND
|
|
472
|
+
filters: {
|
|
473
|
+
type: 'and',
|
|
474
|
+
filters: [
|
|
475
|
+
{ key: 'author', type: 'eq', value: 'Jane Smith' },
|
|
476
|
+
{ key: 'year', type: 'gte', value: 2023 },
|
|
477
|
+
],
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
// Compound filter with OR
|
|
481
|
+
filters: {
|
|
482
|
+
type: 'or',
|
|
483
|
+
filters: [
|
|
484
|
+
{ key: 'department', type: 'eq', value: 'Engineering' },
|
|
485
|
+
{ key: 'department', type: 'eq', value: 'Research' },
|
|
486
|
+
],
|
|
487
|
+
}
|
|
488
|
+
```
|
|
489
|
+
|
|
433
490
|
#### Image Generation Tool
|
|
434
491
|
|
|
435
492
|
OpenAI's Responses API supports multi-modal image generation as a provider-defined tool.
|
|
@@ -1179,6 +1236,14 @@ The following optional provider options are available for OpenAI chat models:
|
|
|
1179
1236
|
|
|
1180
1237
|
A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies. The IDs should be a string that uniquely identifies each user.
|
|
1181
1238
|
|
|
1239
|
+
- **systemMessageMode** _'system' | 'developer' | 'remove'_
|
|
1240
|
+
|
|
1241
|
+
Override the system message mode for this model. If not specified, the mode is automatically determined based on the model. `system` uses the 'system' role for system messages (default for most models); `developer` uses the 'developer' role (used by reasoning models); `remove` removes system messages entirely.
|
|
1242
|
+
|
|
1243
|
+
- **forceReasoning** _boolean_
|
|
1244
|
+
|
|
1245
|
+
Force treating this model as a reasoning model. This is useful for "stealth" reasoning models (e.g. via a custom baseURL) where the model ID is not recognized by the SDK's allowlist. When enabled, the SDK applies reasoning-model parameter compatibility rules and defaults `systemMessageMode` to `developer` unless overridden.
|
|
1246
|
+
|
|
1182
1247
|
#### Reasoning
|
|
1183
1248
|
|
|
1184
1249
|
OpenAI has introduced the `o1`,`o3`, and `o4` series of [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
|
@@ -2053,12 +2118,6 @@ const result = await generateSpeech({
|
|
|
2053
2118
|
Does not work with `tts-1` or `tts-1-hd`.
|
|
2054
2119
|
Optional.
|
|
2055
2120
|
|
|
2056
|
-
- **response_format** _string_
|
|
2057
|
-
The format to audio in.
|
|
2058
|
-
Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`.
|
|
2059
|
-
Defaults to `mp3`.
|
|
2060
|
-
Optional.
|
|
2061
|
-
|
|
2062
2121
|
- **speed** _number_
|
|
2063
2122
|
The speed of the generated audio.
|
|
2064
2123
|
Select a value from 0.25 to 4.0.
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@ai-sdk/openai",
|
|
3
|
-
"version": "3.0.
|
|
3
|
+
"version": "3.0.21",
|
|
4
4
|
"license": "Apache-2.0",
|
|
5
5
|
"sideEffects": false,
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -37,7 +37,7 @@
|
|
|
37
37
|
},
|
|
38
38
|
"dependencies": {
|
|
39
39
|
"@ai-sdk/provider": "3.0.5",
|
|
40
|
-
"@ai-sdk/provider-utils": "4.0.
|
|
40
|
+
"@ai-sdk/provider-utils": "4.0.10"
|
|
41
41
|
},
|
|
42
42
|
"devDependencies": {
|
|
43
43
|
"@types/node": "20.17.24",
|
|
@@ -8,46 +8,46 @@ export const openaiCompletionProviderOptions = lazySchema(() =>
|
|
|
8
8
|
zodSchema(
|
|
9
9
|
z.object({
|
|
10
10
|
/**
|
|
11
|
-
Echo back the prompt in addition to the completion.
|
|
12
|
-
|
|
11
|
+
* Echo back the prompt in addition to the completion.
|
|
12
|
+
*/
|
|
13
13
|
echo: z.boolean().optional(),
|
|
14
14
|
|
|
15
15
|
/**
|
|
16
|
-
Modify the likelihood of specified tokens appearing in the completion.
|
|
17
|
-
|
|
18
|
-
Accepts a JSON object that maps tokens (specified by their token ID in
|
|
19
|
-
the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
20
|
-
can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
21
|
-
the bias is added to the logits generated by the model prior to sampling.
|
|
22
|
-
The exact effect will vary per model, but values between -1 and 1 should
|
|
23
|
-
decrease or increase likelihood of selection; values like -100 or 100
|
|
24
|
-
should result in a ban or exclusive selection of the relevant token.
|
|
25
|
-
|
|
26
|
-
As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
27
|
-
token from being generated.
|
|
28
|
-
|
|
16
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
17
|
+
*
|
|
18
|
+
* Accepts a JSON object that maps tokens (specified by their token ID in
|
|
19
|
+
* the GPT tokenizer) to an associated bias value from -100 to 100. You
|
|
20
|
+
* can use this tokenizer tool to convert text to token IDs. Mathematically,
|
|
21
|
+
* the bias is added to the logits generated by the model prior to sampling.
|
|
22
|
+
* The exact effect will vary per model, but values between -1 and 1 should
|
|
23
|
+
* decrease or increase likelihood of selection; values like -100 or 100
|
|
24
|
+
* should result in a ban or exclusive selection of the relevant token.
|
|
25
|
+
*
|
|
26
|
+
* As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
|
|
27
|
+
* token from being generated.
|
|
28
|
+
*/
|
|
29
29
|
logitBias: z.record(z.string(), z.number()).optional(),
|
|
30
30
|
|
|
31
31
|
/**
|
|
32
|
-
The suffix that comes after a completion of inserted text.
|
|
33
|
-
|
|
32
|
+
* The suffix that comes after a completion of inserted text.
|
|
33
|
+
*/
|
|
34
34
|
suffix: z.string().optional(),
|
|
35
35
|
|
|
36
36
|
/**
|
|
37
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
38
|
-
monitor and detect abuse. Learn more.
|
|
39
|
-
|
|
37
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
38
|
+
* monitor and detect abuse. Learn more.
|
|
39
|
+
*/
|
|
40
40
|
user: z.string().optional(),
|
|
41
41
|
|
|
42
42
|
/**
|
|
43
|
-
Return the log probabilities of the tokens. Including logprobs will increase
|
|
44
|
-
the response size and can slow down response times. However, it can
|
|
45
|
-
be useful to better understand how the model is behaving.
|
|
46
|
-
Setting to true will return the log probabilities of the tokens that
|
|
47
|
-
were generated.
|
|
48
|
-
Setting to a number will return the log probabilities of the top n
|
|
49
|
-
tokens that were generated.
|
|
50
|
-
|
|
43
|
+
* Return the log probabilities of the tokens. Including logprobs will increase
|
|
44
|
+
* the response size and can slow down response times. However, it can
|
|
45
|
+
* be useful to better understand how the model is behaving.
|
|
46
|
+
* Setting to true will return the log probabilities of the tokens that
|
|
47
|
+
* were generated.
|
|
48
|
+
* Setting to a number will return the log probabilities of the top n
|
|
49
|
+
* tokens that were generated.
|
|
50
|
+
*/
|
|
51
51
|
logprobs: z.union([z.boolean(), z.number()]).optional(),
|
|
52
52
|
}),
|
|
53
53
|
),
|
|
@@ -11,15 +11,15 @@ export const openaiEmbeddingProviderOptions = lazySchema(() =>
|
|
|
11
11
|
zodSchema(
|
|
12
12
|
z.object({
|
|
13
13
|
/**
|
|
14
|
-
The number of dimensions the resulting output embeddings should have.
|
|
15
|
-
Only supported in text-embedding-3 and later models.
|
|
16
|
-
|
|
14
|
+
* The number of dimensions the resulting output embeddings should have.
|
|
15
|
+
* Only supported in text-embedding-3 and later models.
|
|
16
|
+
*/
|
|
17
17
|
dimensions: z.number().optional(),
|
|
18
18
|
|
|
19
19
|
/**
|
|
20
|
-
A unique identifier representing your end-user, which can help OpenAI to
|
|
21
|
-
monitor and detect abuse. Learn more.
|
|
22
|
-
*/
|
|
20
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
21
|
+
* monitor and detect abuse. Learn more.
|
|
22
|
+
*/
|
|
23
23
|
user: z.string().optional(),
|
|
24
24
|
}),
|
|
25
25
|
),
|
package/src/openai-provider.ts
CHANGED
|
@@ -34,32 +34,32 @@ export interface OpenAIProvider extends ProviderV3 {
|
|
|
34
34
|
(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
35
35
|
|
|
36
36
|
/**
|
|
37
|
-
Creates an OpenAI model for text generation.
|
|
37
|
+
* Creates an OpenAI model for text generation.
|
|
38
38
|
*/
|
|
39
39
|
languageModel(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
40
40
|
|
|
41
41
|
/**
|
|
42
|
-
Creates an OpenAI chat model for text generation.
|
|
42
|
+
* Creates an OpenAI chat model for text generation.
|
|
43
43
|
*/
|
|
44
44
|
chat(modelId: OpenAIChatModelId): LanguageModelV3;
|
|
45
45
|
|
|
46
46
|
/**
|
|
47
|
-
Creates an OpenAI responses API model for text generation.
|
|
47
|
+
* Creates an OpenAI responses API model for text generation.
|
|
48
48
|
*/
|
|
49
49
|
responses(modelId: OpenAIResponsesModelId): LanguageModelV3;
|
|
50
50
|
|
|
51
51
|
/**
|
|
52
|
-
Creates an OpenAI completion model for text generation.
|
|
52
|
+
* Creates an OpenAI completion model for text generation.
|
|
53
53
|
*/
|
|
54
54
|
completion(modelId: OpenAICompletionModelId): LanguageModelV3;
|
|
55
55
|
|
|
56
56
|
/**
|
|
57
|
-
Creates a model for text embeddings.
|
|
57
|
+
* Creates a model for text embeddings.
|
|
58
58
|
*/
|
|
59
59
|
embedding(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
|
|
60
60
|
|
|
61
61
|
/**
|
|
62
|
-
Creates a model for text embeddings.
|
|
62
|
+
* Creates a model for text embeddings.
|
|
63
63
|
*/
|
|
64
64
|
embeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
|
|
65
65
|
|
|
@@ -74,71 +74,71 @@ Creates a model for text embeddings.
|
|
|
74
74
|
textEmbeddingModel(modelId: OpenAIEmbeddingModelId): EmbeddingModelV3;
|
|
75
75
|
|
|
76
76
|
/**
|
|
77
|
-
Creates a model for image generation.
|
|
77
|
+
* Creates a model for image generation.
|
|
78
78
|
*/
|
|
79
79
|
image(modelId: OpenAIImageModelId): ImageModelV3;
|
|
80
80
|
|
|
81
81
|
/**
|
|
82
|
-
Creates a model for image generation.
|
|
82
|
+
* Creates a model for image generation.
|
|
83
83
|
*/
|
|
84
84
|
imageModel(modelId: OpenAIImageModelId): ImageModelV3;
|
|
85
85
|
|
|
86
86
|
/**
|
|
87
|
-
Creates a model for transcription.
|
|
87
|
+
* Creates a model for transcription.
|
|
88
88
|
*/
|
|
89
89
|
transcription(modelId: OpenAITranscriptionModelId): TranscriptionModelV3;
|
|
90
90
|
|
|
91
91
|
/**
|
|
92
|
-
Creates a model for speech generation.
|
|
92
|
+
* Creates a model for speech generation.
|
|
93
93
|
*/
|
|
94
94
|
speech(modelId: OpenAISpeechModelId): SpeechModelV3;
|
|
95
95
|
|
|
96
96
|
/**
|
|
97
|
-
OpenAI-specific tools.
|
|
97
|
+
* OpenAI-specific tools.
|
|
98
98
|
*/
|
|
99
99
|
tools: typeof openaiTools;
|
|
100
100
|
}
|
|
101
101
|
|
|
102
102
|
export interface OpenAIProviderSettings {
|
|
103
103
|
/**
|
|
104
|
-
Base URL for the OpenAI API calls.
|
|
105
|
-
|
|
104
|
+
* Base URL for the OpenAI API calls.
|
|
105
|
+
*/
|
|
106
106
|
baseURL?: string;
|
|
107
107
|
|
|
108
108
|
/**
|
|
109
|
-
API key for authenticating requests.
|
|
110
|
-
|
|
109
|
+
* API key for authenticating requests.
|
|
110
|
+
*/
|
|
111
111
|
apiKey?: string;
|
|
112
112
|
|
|
113
113
|
/**
|
|
114
|
-
OpenAI Organization.
|
|
115
|
-
|
|
114
|
+
* OpenAI Organization.
|
|
115
|
+
*/
|
|
116
116
|
organization?: string;
|
|
117
117
|
|
|
118
118
|
/**
|
|
119
|
-
OpenAI project.
|
|
120
|
-
|
|
119
|
+
* OpenAI project.
|
|
120
|
+
*/
|
|
121
121
|
project?: string;
|
|
122
122
|
|
|
123
123
|
/**
|
|
124
|
-
Custom headers to include in the requests.
|
|
125
|
-
|
|
124
|
+
* Custom headers to include in the requests.
|
|
125
|
+
*/
|
|
126
126
|
headers?: Record<string, string>;
|
|
127
127
|
|
|
128
128
|
/**
|
|
129
|
-
Provider name. Overrides the `openai` default name for 3rd party providers.
|
|
129
|
+
* Provider name. Overrides the `openai` default name for 3rd party providers.
|
|
130
130
|
*/
|
|
131
131
|
name?: string;
|
|
132
132
|
|
|
133
133
|
/**
|
|
134
|
-
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
|
135
|
-
or to provide a custom fetch implementation for e.g. testing.
|
|
136
|
-
|
|
134
|
+
* Custom fetch implementation. You can use it as a middleware to intercept requests,
|
|
135
|
+
* or to provide a custom fetch implementation for e.g. testing.
|
|
136
|
+
*/
|
|
137
137
|
fetch?: FetchFunction;
|
|
138
138
|
}
|
|
139
139
|
|
|
140
140
|
/**
|
|
141
|
-
Create an OpenAI provider instance.
|
|
141
|
+
* Create an OpenAI provider instance.
|
|
142
142
|
*/
|
|
143
143
|
export function createOpenAI(
|
|
144
144
|
options: OpenAIProviderSettings = {},
|
|
@@ -265,6 +265,6 @@ export function createOpenAI(
|
|
|
265
265
|
}
|
|
266
266
|
|
|
267
267
|
/**
|
|
268
|
-
Default OpenAI provider instance.
|
|
268
|
+
* Default OpenAI provider instance.
|
|
269
269
|
*/
|
|
270
270
|
export const openai = createOpenAI();
|