@ai-sdk/openai 2.0.65 → 2.0.67
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +5 -3
- package/dist/index.d.ts +5 -3
- package/dist/index.js +27 -3
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +27 -3
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +4 -3
- package/dist/internal/index.d.ts +4 -3
- package/dist/internal/index.js +26 -2
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +26 -2
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -420,7 +420,7 @@ var openaiChatLanguageModelOptions = lazyValidator2(
|
|
|
420
420
|
/**
|
|
421
421
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
422
422
|
*/
|
|
423
|
-
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
423
|
+
reasoningEffort: z3.enum(["none", "minimal", "low", "medium", "high"]).optional(),
|
|
424
424
|
/**
|
|
425
425
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
426
426
|
*/
|
|
@@ -470,6 +470,15 @@ var openaiChatLanguageModelOptions = lazyValidator2(
|
|
|
470
470
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
471
471
|
*/
|
|
472
472
|
promptCacheKey: z3.string().optional(),
|
|
473
|
+
/**
|
|
474
|
+
* The retention policy for the prompt cache.
|
|
475
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
476
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
477
|
+
* Currently only available for 5.1 series models.
|
|
478
|
+
*
|
|
479
|
+
* @default 'in_memory'
|
|
480
|
+
*/
|
|
481
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
473
482
|
/**
|
|
474
483
|
* A stable identifier used to help detect users of your application
|
|
475
484
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -638,6 +647,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
638
647
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
639
648
|
service_tier: openaiOptions.serviceTier,
|
|
640
649
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
650
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
641
651
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
642
652
|
// messages:
|
|
643
653
|
messages
|
|
@@ -2952,7 +2962,11 @@ var openaiResponsesReasoningModelIds = [
|
|
|
2952
2962
|
"gpt-5-nano",
|
|
2953
2963
|
"gpt-5-nano-2025-08-07",
|
|
2954
2964
|
"gpt-5-pro",
|
|
2955
|
-
"gpt-5-pro-2025-10-06"
|
|
2965
|
+
"gpt-5-pro-2025-10-06",
|
|
2966
|
+
"gpt-5.1",
|
|
2967
|
+
"gpt-5.1-chat-latest",
|
|
2968
|
+
"gpt-5.1-codex-mini",
|
|
2969
|
+
"gpt-5.1-codex"
|
|
2956
2970
|
];
|
|
2957
2971
|
var openaiResponsesModelIds = [
|
|
2958
2972
|
"gpt-4.1",
|
|
@@ -3025,6 +3039,15 @@ var openaiResponsesProviderOptionsSchema = lazyValidator12(
|
|
|
3025
3039
|
parallelToolCalls: z15.boolean().nullish(),
|
|
3026
3040
|
previousResponseId: z15.string().nullish(),
|
|
3027
3041
|
promptCacheKey: z15.string().nullish(),
|
|
3042
|
+
/**
|
|
3043
|
+
* The retention policy for the prompt cache.
|
|
3044
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3045
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3046
|
+
* Currently only available for 5.1 series models.
|
|
3047
|
+
*
|
|
3048
|
+
* @default 'in_memory'
|
|
3049
|
+
*/
|
|
3050
|
+
promptCacheRetention: z15.enum(["in_memory", "24h"]).nullish(),
|
|
3028
3051
|
reasoningEffort: z15.string().nullish(),
|
|
3029
3052
|
reasoningSummary: z15.string().nullish(),
|
|
3030
3053
|
safetyIdentifier: z15.string().nullish(),
|
|
@@ -3564,6 +3587,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3564
3587
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3565
3588
|
include,
|
|
3566
3589
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3590
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3567
3591
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3568
3592
|
top_logprobs: topLogprobs,
|
|
3569
3593
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|