@ai-sdk/openai 3.0.0-beta.57 → 3.0.0-beta.59
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +5 -3
- package/dist/index.d.ts +5 -3
- package/dist/index.js +27 -3
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +27 -3
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +4 -3
- package/dist/internal/index.d.ts +4 -3
- package/dist/internal/index.js +26 -2
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +26 -2
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,17 @@
|
|
|
1
1
|
# @ai-sdk/openai
|
|
2
2
|
|
|
3
|
+
## 3.0.0-beta.59
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- d0f1baf: feat(openai): Add support for 'promptCacheRetention: 24h' for gpt5.1 series
|
|
8
|
+
|
|
9
|
+
## 3.0.0-beta.58
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- 79b4e46: feat(openai): add 'gpt-5.1' modelID
|
|
14
|
+
|
|
3
15
|
## 3.0.0-beta.57
|
|
4
16
|
|
|
5
17
|
### Patch Changes
|
package/dist/index.d.mts
CHANGED
|
@@ -3,13 +3,13 @@ import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, Transcript
|
|
|
3
3
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
4
4
|
import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
|
|
5
5
|
|
|
6
|
-
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
|
|
6
|
+
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | (string & {});
|
|
7
7
|
declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
|
|
8
8
|
logitBias?: Record<number, number> | undefined;
|
|
9
9
|
logprobs?: number | boolean | undefined;
|
|
10
10
|
parallelToolCalls?: boolean | undefined;
|
|
11
11
|
user?: string | undefined;
|
|
12
|
-
reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
|
|
12
|
+
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | undefined;
|
|
13
13
|
maxCompletionTokens?: number | undefined;
|
|
14
14
|
store?: boolean | undefined;
|
|
15
15
|
metadata?: Record<string, string> | undefined;
|
|
@@ -19,6 +19,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
19
19
|
strictJsonSchema?: boolean | undefined;
|
|
20
20
|
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
21
21
|
promptCacheKey?: string | undefined;
|
|
22
|
+
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
22
23
|
safetyIdentifier?: string | undefined;
|
|
23
24
|
}>;
|
|
24
25
|
type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
|
|
@@ -379,7 +380,7 @@ declare const openaiTools: {
|
|
|
379
380
|
}>;
|
|
380
381
|
};
|
|
381
382
|
|
|
382
|
-
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
383
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
383
384
|
declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
384
385
|
include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
|
|
385
386
|
instructions?: string | null | undefined;
|
|
@@ -389,6 +390,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyS
|
|
|
389
390
|
parallelToolCalls?: boolean | null | undefined;
|
|
390
391
|
previousResponseId?: string | null | undefined;
|
|
391
392
|
promptCacheKey?: string | null | undefined;
|
|
393
|
+
promptCacheRetention?: "in_memory" | "24h" | null | undefined;
|
|
392
394
|
reasoningEffort?: string | null | undefined;
|
|
393
395
|
reasoningSummary?: string | null | undefined;
|
|
394
396
|
safetyIdentifier?: string | null | undefined;
|
package/dist/index.d.ts
CHANGED
|
@@ -3,13 +3,13 @@ import { ProviderV3, LanguageModelV3, EmbeddingModelV3, ImageModelV3, Transcript
|
|
|
3
3
|
import * as _ai_sdk_provider_utils from '@ai-sdk/provider-utils';
|
|
4
4
|
import { InferSchema, FetchFunction } from '@ai-sdk/provider-utils';
|
|
5
5
|
|
|
6
|
-
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | (string & {});
|
|
6
|
+
type OpenAIChatModelId = 'o1' | 'o1-2024-12-17' | 'o3-mini' | 'o3-mini-2025-01-31' | 'o3' | 'o3-2025-04-16' | 'gpt-4.1' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini' | 'gpt-4o-mini-2024-07-18' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4' | 'gpt-4-0613' | 'gpt-4.5-preview' | 'gpt-4.5-preview-2025-02-27' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-1106' | 'chatgpt-4o-latest' | 'gpt-5' | 'gpt-5-2025-08-07' | 'gpt-5-mini' | 'gpt-5-mini-2025-08-07' | 'gpt-5-nano' | 'gpt-5-nano-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | (string & {});
|
|
7
7
|
declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<{
|
|
8
8
|
logitBias?: Record<number, number> | undefined;
|
|
9
9
|
logprobs?: number | boolean | undefined;
|
|
10
10
|
parallelToolCalls?: boolean | undefined;
|
|
11
11
|
user?: string | undefined;
|
|
12
|
-
reasoningEffort?: "minimal" | "low" | "medium" | "high" | undefined;
|
|
12
|
+
reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | undefined;
|
|
13
13
|
maxCompletionTokens?: number | undefined;
|
|
14
14
|
store?: boolean | undefined;
|
|
15
15
|
metadata?: Record<string, string> | undefined;
|
|
@@ -19,6 +19,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
19
19
|
strictJsonSchema?: boolean | undefined;
|
|
20
20
|
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
21
21
|
promptCacheKey?: string | undefined;
|
|
22
|
+
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
22
23
|
safetyIdentifier?: string | undefined;
|
|
23
24
|
}>;
|
|
24
25
|
type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
|
|
@@ -379,7 +380,7 @@ declare const openaiTools: {
|
|
|
379
380
|
}>;
|
|
380
381
|
};
|
|
381
382
|
|
|
382
|
-
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
383
|
+
type OpenAIResponsesModelId = 'chatgpt-4o-latest' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo' | 'gpt-4-0613' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-turbo' | 'gpt-4.1-2025-04-14' | 'gpt-4.1-mini-2025-04-14' | 'gpt-4.1-mini' | 'gpt-4.1-nano-2025-04-14' | 'gpt-4.1-nano' | 'gpt-4.1' | 'gpt-4' | 'gpt-4o-2024-05-13' | 'gpt-4o-2024-08-06' | 'gpt-4o-2024-11-20' | 'gpt-4o-mini-2024-07-18' | 'gpt-4o-mini' | 'gpt-4o' | 'gpt-5.1' | 'gpt-5.1-chat-latest' | 'gpt-5.1-codex-mini' | 'gpt-5.1-codex' | 'gpt-5-2025-08-07' | 'gpt-5-chat-latest' | 'gpt-5-codex' | 'gpt-5-mini-2025-08-07' | 'gpt-5-mini' | 'gpt-5-nano-2025-08-07' | 'gpt-5-nano' | 'gpt-5-pro-2025-10-06' | 'gpt-5-pro' | 'gpt-5' | 'o1-2024-12-17' | 'o1' | 'o3-2025-04-16' | 'o3-mini-2025-01-31' | 'o3-mini' | 'o3' | (string & {});
|
|
383
384
|
declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazySchema<{
|
|
384
385
|
include?: ("file_search_call.results" | "message.output_text.logprobs" | "reasoning.encrypted_content")[] | null | undefined;
|
|
385
386
|
instructions?: string | null | undefined;
|
|
@@ -389,6 +390,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyS
|
|
|
389
390
|
parallelToolCalls?: boolean | null | undefined;
|
|
390
391
|
previousResponseId?: string | null | undefined;
|
|
391
392
|
promptCacheKey?: string | null | undefined;
|
|
393
|
+
promptCacheRetention?: "in_memory" | "24h" | null | undefined;
|
|
392
394
|
reasoningEffort?: string | null | undefined;
|
|
393
395
|
reasoningSummary?: string | null | undefined;
|
|
394
396
|
safetyIdentifier?: string | null | undefined;
|
package/dist/index.js
CHANGED
|
@@ -446,7 +446,7 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
|
|
|
446
446
|
/**
|
|
447
447
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
448
448
|
*/
|
|
449
|
-
reasoningEffort: import_v43.z.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
449
|
+
reasoningEffort: import_v43.z.enum(["none", "minimal", "low", "medium", "high"]).optional(),
|
|
450
450
|
/**
|
|
451
451
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
452
452
|
*/
|
|
@@ -496,6 +496,15 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
|
|
|
496
496
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
497
497
|
*/
|
|
498
498
|
promptCacheKey: import_v43.z.string().optional(),
|
|
499
|
+
/**
|
|
500
|
+
* The retention policy for the prompt cache.
|
|
501
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
502
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
503
|
+
* Currently only available for 5.1 series models.
|
|
504
|
+
*
|
|
505
|
+
* @default 'in_memory'
|
|
506
|
+
*/
|
|
507
|
+
promptCacheRetention: import_v43.z.enum(["in_memory", "24h"]).optional(),
|
|
499
508
|
/**
|
|
500
509
|
* A stable identifier used to help detect users of your application
|
|
501
510
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -662,6 +671,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
662
671
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
663
672
|
service_tier: openaiOptions.serviceTier,
|
|
664
673
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
674
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
665
675
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
666
676
|
// messages:
|
|
667
677
|
messages
|
|
@@ -3102,7 +3112,11 @@ var openaiResponsesReasoningModelIds = [
|
|
|
3102
3112
|
"gpt-5-nano",
|
|
3103
3113
|
"gpt-5-nano-2025-08-07",
|
|
3104
3114
|
"gpt-5-pro",
|
|
3105
|
-
"gpt-5-pro-2025-10-06"
|
|
3115
|
+
"gpt-5-pro-2025-10-06",
|
|
3116
|
+
"gpt-5.1",
|
|
3117
|
+
"gpt-5.1-chat-latest",
|
|
3118
|
+
"gpt-5.1-codex-mini",
|
|
3119
|
+
"gpt-5.1-codex"
|
|
3106
3120
|
];
|
|
3107
3121
|
var openaiResponsesModelIds = [
|
|
3108
3122
|
"gpt-4.1",
|
|
@@ -3175,6 +3189,15 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils23.lazySchem
|
|
|
3175
3189
|
parallelToolCalls: import_v418.z.boolean().nullish(),
|
|
3176
3190
|
previousResponseId: import_v418.z.string().nullish(),
|
|
3177
3191
|
promptCacheKey: import_v418.z.string().nullish(),
|
|
3192
|
+
/**
|
|
3193
|
+
* The retention policy for the prompt cache.
|
|
3194
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3195
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3196
|
+
* Currently only available for 5.1 series models.
|
|
3197
|
+
*
|
|
3198
|
+
* @default 'in_memory'
|
|
3199
|
+
*/
|
|
3200
|
+
promptCacheRetention: import_v418.z.enum(["in_memory", "24h"]).nullish(),
|
|
3178
3201
|
reasoningEffort: import_v418.z.string().nullish(),
|
|
3179
3202
|
reasoningSummary: import_v418.z.string().nullish(),
|
|
3180
3203
|
safetyIdentifier: import_v418.z.string().nullish(),
|
|
@@ -3489,6 +3512,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3489
3512
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3490
3513
|
include,
|
|
3491
3514
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3515
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3492
3516
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3493
3517
|
top_logprobs: topLogprobs,
|
|
3494
3518
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
@@ -4937,7 +4961,7 @@ var OpenAITranscriptionModel = class {
|
|
|
4937
4961
|
};
|
|
4938
4962
|
|
|
4939
4963
|
// src/version.ts
|
|
4940
|
-
var VERSION = true ? "3.0.0-beta.
|
|
4964
|
+
var VERSION = true ? "3.0.0-beta.59" : "0.0.0-test";
|
|
4941
4965
|
|
|
4942
4966
|
// src/openai-provider.ts
|
|
4943
4967
|
function createOpenAI(options = {}) {
|