@ai-sdk/openai 2.0.66 → 2.0.67
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +21 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +21 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -0
- package/dist/internal/index.d.ts +1 -0
- package/dist/internal/index.js +20 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +20 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -470,6 +470,15 @@ var openaiChatLanguageModelOptions = lazyValidator2(
|
|
|
470
470
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
471
471
|
*/
|
|
472
472
|
promptCacheKey: z3.string().optional(),
|
|
473
|
+
/**
|
|
474
|
+
* The retention policy for the prompt cache.
|
|
475
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
476
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
477
|
+
* Currently only available for 5.1 series models.
|
|
478
|
+
*
|
|
479
|
+
* @default 'in_memory'
|
|
480
|
+
*/
|
|
481
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
473
482
|
/**
|
|
474
483
|
* A stable identifier used to help detect users of your application
|
|
475
484
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -638,6 +647,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
638
647
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
639
648
|
service_tier: openaiOptions.serviceTier,
|
|
640
649
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
650
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
641
651
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
642
652
|
// messages:
|
|
643
653
|
messages
|
|
@@ -3029,6 +3039,15 @@ var openaiResponsesProviderOptionsSchema = lazyValidator12(
|
|
|
3029
3039
|
parallelToolCalls: z15.boolean().nullish(),
|
|
3030
3040
|
previousResponseId: z15.string().nullish(),
|
|
3031
3041
|
promptCacheKey: z15.string().nullish(),
|
|
3042
|
+
/**
|
|
3043
|
+
* The retention policy for the prompt cache.
|
|
3044
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3045
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3046
|
+
* Currently only available for 5.1 series models.
|
|
3047
|
+
*
|
|
3048
|
+
* @default 'in_memory'
|
|
3049
|
+
*/
|
|
3050
|
+
promptCacheRetention: z15.enum(["in_memory", "24h"]).nullish(),
|
|
3032
3051
|
reasoningEffort: z15.string().nullish(),
|
|
3033
3052
|
reasoningSummary: z15.string().nullish(),
|
|
3034
3053
|
safetyIdentifier: z15.string().nullish(),
|
|
@@ -3568,6 +3587,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3568
3587
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3569
3588
|
include,
|
|
3570
3589
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3590
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3571
3591
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3572
3592
|
top_logprobs: topLogprobs,
|
|
3573
3593
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|