@ai-sdk/openai 2.0.66 → 2.0.67
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +21 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +21 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -0
- package/dist/internal/index.d.ts +1 -0
- package/dist/internal/index.js +20 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +20 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -478,6 +478,15 @@ var openaiChatLanguageModelOptions = lazyValidator2(
|
|
|
478
478
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
479
479
|
*/
|
|
480
480
|
promptCacheKey: z3.string().optional(),
|
|
481
|
+
/**
|
|
482
|
+
* The retention policy for the prompt cache.
|
|
483
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
484
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
485
|
+
* Currently only available for 5.1 series models.
|
|
486
|
+
*
|
|
487
|
+
* @default 'in_memory'
|
|
488
|
+
*/
|
|
489
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
481
490
|
/**
|
|
482
491
|
* A stable identifier used to help detect users of your application
|
|
483
492
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -646,6 +655,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
646
655
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
647
656
|
service_tier: openaiOptions.serviceTier,
|
|
648
657
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
658
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
649
659
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
650
660
|
// messages:
|
|
651
661
|
messages
|
|
@@ -2991,6 +3001,15 @@ var openaiResponsesProviderOptionsSchema = lazyValidator9(
|
|
|
2991
3001
|
parallelToolCalls: z17.boolean().nullish(),
|
|
2992
3002
|
previousResponseId: z17.string().nullish(),
|
|
2993
3003
|
promptCacheKey: z17.string().nullish(),
|
|
3004
|
+
/**
|
|
3005
|
+
* The retention policy for the prompt cache.
|
|
3006
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3007
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3008
|
+
* Currently only available for 5.1 series models.
|
|
3009
|
+
*
|
|
3010
|
+
* @default 'in_memory'
|
|
3011
|
+
*/
|
|
3012
|
+
promptCacheRetention: z17.enum(["in_memory", "24h"]).nullish(),
|
|
2994
3013
|
reasoningEffort: z17.string().nullish(),
|
|
2995
3014
|
reasoningSummary: z17.string().nullish(),
|
|
2996
3015
|
safetyIdentifier: z17.string().nullish(),
|
|
@@ -3276,6 +3295,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3276
3295
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3277
3296
|
include,
|
|
3278
3297
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3298
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3279
3299
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3280
3300
|
top_logprobs: topLogprobs,
|
|
3281
3301
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
@@ -4555,7 +4575,7 @@ var OpenAITranscriptionModel = class {
|
|
|
4555
4575
|
};
|
|
4556
4576
|
|
|
4557
4577
|
// src/version.ts
|
|
4558
|
-
var VERSION = true ? "2.0.
|
|
4578
|
+
var VERSION = true ? "2.0.67" : "0.0.0-test";
|
|
4559
4579
|
|
|
4560
4580
|
// src/openai-provider.ts
|
|
4561
4581
|
function createOpenAI(options = {}) {
|