@ai-sdk/openai 3.0.0-beta.58 → 3.0.0-beta.60
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +3 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +30 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +30 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -0
- package/dist/internal/index.d.ts +1 -0
- package/dist/internal/index.js +29 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +29 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
|
@@ -18,6 +18,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
18
18
|
strictJsonSchema?: boolean | undefined;
|
|
19
19
|
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
20
20
|
promptCacheKey?: string | undefined;
|
|
21
|
+
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
21
22
|
safetyIdentifier?: string | undefined;
|
|
22
23
|
}>;
|
|
23
24
|
type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -18,6 +18,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
18
18
|
strictJsonSchema?: boolean | undefined;
|
|
19
19
|
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
20
20
|
promptCacheKey?: string | undefined;
|
|
21
|
+
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
21
22
|
safetyIdentifier?: string | undefined;
|
|
22
23
|
}>;
|
|
23
24
|
type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
|
package/dist/internal/index.js
CHANGED
|
@@ -515,6 +515,15 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
|
|
|
515
515
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
516
516
|
*/
|
|
517
517
|
promptCacheKey: import_v43.z.string().optional(),
|
|
518
|
+
/**
|
|
519
|
+
* The retention policy for the prompt cache.
|
|
520
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
521
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
522
|
+
* Currently only available for 5.1 series models.
|
|
523
|
+
*
|
|
524
|
+
* @default 'in_memory'
|
|
525
|
+
*/
|
|
526
|
+
promptCacheRetention: import_v43.z.enum(["in_memory", "24h"]).optional(),
|
|
518
527
|
/**
|
|
519
528
|
* A stable identifier used to help detect users of your application
|
|
520
529
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -681,6 +690,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
681
690
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
682
691
|
service_tier: openaiOptions.serviceTier,
|
|
683
692
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
693
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
684
694
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
685
695
|
// messages:
|
|
686
696
|
messages
|
|
@@ -3105,6 +3115,7 @@ var openaiResponsesModelIds = [
|
|
|
3105
3115
|
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazySchema)(
|
|
3106
3116
|
() => (0, import_provider_utils22.zodSchema)(
|
|
3107
3117
|
import_v415.z.object({
|
|
3118
|
+
conversation: import_v415.z.string().nullish(),
|
|
3108
3119
|
include: import_v415.z.array(
|
|
3109
3120
|
import_v415.z.enum([
|
|
3110
3121
|
"reasoning.encrypted_content",
|
|
@@ -3137,6 +3148,15 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazySchem
|
|
|
3137
3148
|
parallelToolCalls: import_v415.z.boolean().nullish(),
|
|
3138
3149
|
previousResponseId: import_v415.z.string().nullish(),
|
|
3139
3150
|
promptCacheKey: import_v415.z.string().nullish(),
|
|
3151
|
+
/**
|
|
3152
|
+
* The retention policy for the prompt cache.
|
|
3153
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3154
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3155
|
+
* Currently only available for 5.1 series models.
|
|
3156
|
+
*
|
|
3157
|
+
* @default 'in_memory'
|
|
3158
|
+
*/
|
|
3159
|
+
promptCacheRetention: import_v415.z.enum(["in_memory", "24h"]).nullish(),
|
|
3140
3160
|
reasoningEffort: import_v415.z.string().nullish(),
|
|
3141
3161
|
reasoningSummary: import_v415.z.string().nullish(),
|
|
3142
3162
|
safetyIdentifier: import_v415.z.string().nullish(),
|
|
@@ -3700,6 +3720,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3700
3720
|
providerOptions,
|
|
3701
3721
|
schema: openaiResponsesProviderOptionsSchema
|
|
3702
3722
|
});
|
|
3723
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
3724
|
+
warnings.push({
|
|
3725
|
+
type: "unsupported-setting",
|
|
3726
|
+
setting: "conversation",
|
|
3727
|
+
details: "conversation and previousResponseId cannot be used together"
|
|
3728
|
+
});
|
|
3729
|
+
}
|
|
3703
3730
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
3704
3731
|
prompt,
|
|
3705
3732
|
systemMessageMode: modelConfig.systemMessageMode,
|
|
@@ -3762,6 +3789,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3762
3789
|
}
|
|
3763
3790
|
},
|
|
3764
3791
|
// provider options:
|
|
3792
|
+
conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
|
|
3765
3793
|
max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
|
|
3766
3794
|
metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
|
|
3767
3795
|
parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
|
|
@@ -3772,6 +3800,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3772
3800
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3773
3801
|
include,
|
|
3774
3802
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3803
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3775
3804
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3776
3805
|
top_logprobs: topLogprobs,
|
|
3777
3806
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|