@ai-sdk/openai 3.0.0-beta.58 → 3.0.0-beta.59
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +21 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +21 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -0
- package/dist/internal/index.d.ts +1 -0
- package/dist/internal/index.js +20 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +20 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -485,6 +485,15 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
485
485
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
486
486
|
*/
|
|
487
487
|
promptCacheKey: z3.string().optional(),
|
|
488
|
+
/**
|
|
489
|
+
* The retention policy for the prompt cache.
|
|
490
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
491
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
492
|
+
* Currently only available for 5.1 series models.
|
|
493
|
+
*
|
|
494
|
+
* @default 'in_memory'
|
|
495
|
+
*/
|
|
496
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
488
497
|
/**
|
|
489
498
|
* A stable identifier used to help detect users of your application
|
|
490
499
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -653,6 +662,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
653
662
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
654
663
|
service_tier: openaiOptions.serviceTier,
|
|
655
664
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
665
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
656
666
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
657
667
|
// messages:
|
|
658
668
|
messages
|
|
@@ -3234,6 +3244,15 @@ var openaiResponsesProviderOptionsSchema = lazySchema16(
|
|
|
3234
3244
|
parallelToolCalls: z18.boolean().nullish(),
|
|
3235
3245
|
previousResponseId: z18.string().nullish(),
|
|
3236
3246
|
promptCacheKey: z18.string().nullish(),
|
|
3247
|
+
/**
|
|
3248
|
+
* The retention policy for the prompt cache.
|
|
3249
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3250
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3251
|
+
* Currently only available for 5.1 series models.
|
|
3252
|
+
*
|
|
3253
|
+
* @default 'in_memory'
|
|
3254
|
+
*/
|
|
3255
|
+
promptCacheRetention: z18.enum(["in_memory", "24h"]).nullish(),
|
|
3237
3256
|
reasoningEffort: z18.string().nullish(),
|
|
3238
3257
|
reasoningSummary: z18.string().nullish(),
|
|
3239
3258
|
safetyIdentifier: z18.string().nullish(),
|
|
@@ -3550,6 +3569,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3550
3569
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3551
3570
|
include,
|
|
3552
3571
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3572
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3553
3573
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3554
3574
|
top_logprobs: topLogprobs,
|
|
3555
3575
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
@@ -5010,7 +5030,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5010
5030
|
};
|
|
5011
5031
|
|
|
5012
5032
|
// src/version.ts
|
|
5013
|
-
var VERSION = true ? "3.0.0-beta.
|
|
5033
|
+
var VERSION = true ? "3.0.0-beta.59" : "0.0.0-test";
|
|
5014
5034
|
|
|
5015
5035
|
// src/openai-provider.ts
|
|
5016
5036
|
function createOpenAI(options = {}) {
|