@ai-sdk/openai 2.0.66 → 2.0.68
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +3 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +30 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +30 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -0
- package/dist/internal/index.d.ts +1 -0
- package/dist/internal/index.js +29 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +29 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.mjs
CHANGED
|
@@ -470,6 +470,15 @@ var openaiChatLanguageModelOptions = lazyValidator2(
|
|
|
470
470
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
471
471
|
*/
|
|
472
472
|
promptCacheKey: z3.string().optional(),
|
|
473
|
+
/**
|
|
474
|
+
* The retention policy for the prompt cache.
|
|
475
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
476
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
477
|
+
* Currently only available for 5.1 series models.
|
|
478
|
+
*
|
|
479
|
+
* @default 'in_memory'
|
|
480
|
+
*/
|
|
481
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
473
482
|
/**
|
|
474
483
|
* A stable identifier used to help detect users of your application
|
|
475
484
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -638,6 +647,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
638
647
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
639
648
|
service_tier: openaiOptions.serviceTier,
|
|
640
649
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
650
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
641
651
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
642
652
|
// messages:
|
|
643
653
|
messages
|
|
@@ -2997,6 +3007,7 @@ var openaiResponsesModelIds = [
|
|
|
2997
3007
|
var openaiResponsesProviderOptionsSchema = lazyValidator12(
|
|
2998
3008
|
() => zodSchema13(
|
|
2999
3009
|
z15.object({
|
|
3010
|
+
conversation: z15.string().nullish(),
|
|
3000
3011
|
include: z15.array(
|
|
3001
3012
|
z15.enum([
|
|
3002
3013
|
"reasoning.encrypted_content",
|
|
@@ -3029,6 +3040,15 @@ var openaiResponsesProviderOptionsSchema = lazyValidator12(
|
|
|
3029
3040
|
parallelToolCalls: z15.boolean().nullish(),
|
|
3030
3041
|
previousResponseId: z15.string().nullish(),
|
|
3031
3042
|
promptCacheKey: z15.string().nullish(),
|
|
3043
|
+
/**
|
|
3044
|
+
* The retention policy for the prompt cache.
|
|
3045
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3046
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3047
|
+
* Currently only available for 5.1 series models.
|
|
3048
|
+
*
|
|
3049
|
+
* @default 'in_memory'
|
|
3050
|
+
*/
|
|
3051
|
+
promptCacheRetention: z15.enum(["in_memory", "24h"]).nullish(),
|
|
3032
3052
|
reasoningEffort: z15.string().nullish(),
|
|
3033
3053
|
reasoningSummary: z15.string().nullish(),
|
|
3034
3054
|
safetyIdentifier: z15.string().nullish(),
|
|
@@ -3496,6 +3516,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3496
3516
|
providerOptions,
|
|
3497
3517
|
schema: openaiResponsesProviderOptionsSchema
|
|
3498
3518
|
});
|
|
3519
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
3520
|
+
warnings.push({
|
|
3521
|
+
type: "unsupported-setting",
|
|
3522
|
+
setting: "conversation",
|
|
3523
|
+
details: "conversation and previousResponseId cannot be used together"
|
|
3524
|
+
});
|
|
3525
|
+
}
|
|
3499
3526
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
3500
3527
|
prompt,
|
|
3501
3528
|
systemMessageMode: modelConfig.systemMessageMode,
|
|
@@ -3558,6 +3585,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3558
3585
|
}
|
|
3559
3586
|
},
|
|
3560
3587
|
// provider options:
|
|
3588
|
+
conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
|
|
3561
3589
|
max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
|
|
3562
3590
|
metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
|
|
3563
3591
|
parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
|
|
@@ -3568,6 +3596,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3568
3596
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3569
3597
|
include,
|
|
3570
3598
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3599
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3571
3600
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3572
3601
|
top_logprobs: topLogprobs,
|
|
3573
3602
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|