@ai-sdk/openai 3.0.0-beta.57 → 3.0.0-beta.59
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +5 -3
- package/dist/index.d.ts +5 -3
- package/dist/index.js +27 -3
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +27 -3
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +4 -3
- package/dist/internal/index.d.ts +4 -3
- package/dist/internal/index.js +26 -2
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +26 -2
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -427,7 +427,7 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
427
427
|
/**
|
|
428
428
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
429
429
|
*/
|
|
430
|
-
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
430
|
+
reasoningEffort: z3.enum(["none", "minimal", "low", "medium", "high"]).optional(),
|
|
431
431
|
/**
|
|
432
432
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
433
433
|
*/
|
|
@@ -477,6 +477,15 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
477
477
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
478
478
|
*/
|
|
479
479
|
promptCacheKey: z3.string().optional(),
|
|
480
|
+
/**
|
|
481
|
+
* The retention policy for the prompt cache.
|
|
482
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
483
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
484
|
+
* Currently only available for 5.1 series models.
|
|
485
|
+
*
|
|
486
|
+
* @default 'in_memory'
|
|
487
|
+
*/
|
|
488
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
480
489
|
/**
|
|
481
490
|
* A stable identifier used to help detect users of your application
|
|
482
491
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -645,6 +654,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
645
654
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
646
655
|
service_tier: openaiOptions.serviceTier,
|
|
647
656
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
657
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
648
658
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
649
659
|
// messages:
|
|
650
660
|
messages
|
|
@@ -3076,7 +3086,11 @@ var openaiResponsesReasoningModelIds = [
|
|
|
3076
3086
|
"gpt-5-nano",
|
|
3077
3087
|
"gpt-5-nano-2025-08-07",
|
|
3078
3088
|
"gpt-5-pro",
|
|
3079
|
-
"gpt-5-pro-2025-10-06"
|
|
3089
|
+
"gpt-5-pro-2025-10-06",
|
|
3090
|
+
"gpt-5.1",
|
|
3091
|
+
"gpt-5.1-chat-latest",
|
|
3092
|
+
"gpt-5.1-codex-mini",
|
|
3093
|
+
"gpt-5.1-codex"
|
|
3080
3094
|
];
|
|
3081
3095
|
var openaiResponsesModelIds = [
|
|
3082
3096
|
"gpt-4.1",
|
|
@@ -3149,6 +3163,15 @@ var openaiResponsesProviderOptionsSchema = lazySchema13(
|
|
|
3149
3163
|
parallelToolCalls: z15.boolean().nullish(),
|
|
3150
3164
|
previousResponseId: z15.string().nullish(),
|
|
3151
3165
|
promptCacheKey: z15.string().nullish(),
|
|
3166
|
+
/**
|
|
3167
|
+
* The retention policy for the prompt cache.
|
|
3168
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3169
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3170
|
+
* Currently only available for 5.1 series models.
|
|
3171
|
+
*
|
|
3172
|
+
* @default 'in_memory'
|
|
3173
|
+
*/
|
|
3174
|
+
promptCacheRetention: z15.enum(["in_memory", "24h"]).nullish(),
|
|
3152
3175
|
reasoningEffort: z15.string().nullish(),
|
|
3153
3176
|
reasoningSummary: z15.string().nullish(),
|
|
3154
3177
|
safetyIdentifier: z15.string().nullish(),
|
|
@@ -3810,6 +3833,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3810
3833
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3811
3834
|
include,
|
|
3812
3835
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3836
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3813
3837
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3814
3838
|
top_logprobs: topLogprobs,
|
|
3815
3839
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|