@ai-sdk/openai 3.0.0-beta.58 → 3.0.0-beta.59
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +21 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +21 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -0
- package/dist/internal/index.d.ts +1 -0
- package/dist/internal/index.js +20 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +20 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -477,6 +477,15 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
477
477
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
478
478
|
*/
|
|
479
479
|
promptCacheKey: z3.string().optional(),
|
|
480
|
+
/**
|
|
481
|
+
* The retention policy for the prompt cache.
|
|
482
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
483
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
484
|
+
* Currently only available for 5.1 series models.
|
|
485
|
+
*
|
|
486
|
+
* @default 'in_memory'
|
|
487
|
+
*/
|
|
488
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
480
489
|
/**
|
|
481
490
|
* A stable identifier used to help detect users of your application
|
|
482
491
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -645,6 +654,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
645
654
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
646
655
|
service_tier: openaiOptions.serviceTier,
|
|
647
656
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
657
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
648
658
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
649
659
|
// messages:
|
|
650
660
|
messages
|
|
@@ -3153,6 +3163,15 @@ var openaiResponsesProviderOptionsSchema = lazySchema13(
|
|
|
3153
3163
|
parallelToolCalls: z15.boolean().nullish(),
|
|
3154
3164
|
previousResponseId: z15.string().nullish(),
|
|
3155
3165
|
promptCacheKey: z15.string().nullish(),
|
|
3166
|
+
/**
|
|
3167
|
+
* The retention policy for the prompt cache.
|
|
3168
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3169
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3170
|
+
* Currently only available for 5.1 series models.
|
|
3171
|
+
*
|
|
3172
|
+
* @default 'in_memory'
|
|
3173
|
+
*/
|
|
3174
|
+
promptCacheRetention: z15.enum(["in_memory", "24h"]).nullish(),
|
|
3156
3175
|
reasoningEffort: z15.string().nullish(),
|
|
3157
3176
|
reasoningSummary: z15.string().nullish(),
|
|
3158
3177
|
safetyIdentifier: z15.string().nullish(),
|
|
@@ -3814,6 +3833,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3814
3833
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3815
3834
|
include,
|
|
3816
3835
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3836
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3817
3837
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3818
3838
|
top_logprobs: topLogprobs,
|
|
3819
3839
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|