@ai-sdk/openai 2.0.66 → 2.0.68
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +3 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +30 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +30 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -0
- package/dist/internal/index.d.ts +1 -0
- package/dist/internal/index.js +29 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +29 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
|
@@ -18,6 +18,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidat
|
|
|
18
18
|
strictJsonSchema?: boolean | undefined;
|
|
19
19
|
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
20
20
|
promptCacheKey?: string | undefined;
|
|
21
|
+
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
21
22
|
safetyIdentifier?: string | undefined;
|
|
22
23
|
}>;
|
|
23
24
|
type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
|
package/dist/internal/index.d.ts
CHANGED
|
@@ -18,6 +18,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazyValidat
|
|
|
18
18
|
strictJsonSchema?: boolean | undefined;
|
|
19
19
|
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
20
20
|
promptCacheKey?: string | undefined;
|
|
21
|
+
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
21
22
|
safetyIdentifier?: string | undefined;
|
|
22
23
|
}>;
|
|
23
24
|
type OpenAIChatLanguageModelOptions = InferValidator<typeof openaiChatLanguageModelOptions>;
|
package/dist/internal/index.js
CHANGED
|
@@ -502,6 +502,15 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazyValidator)(
|
|
|
502
502
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
503
503
|
*/
|
|
504
504
|
promptCacheKey: import_v43.z.string().optional(),
|
|
505
|
+
/**
|
|
506
|
+
* The retention policy for the prompt cache.
|
|
507
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
508
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
509
|
+
* Currently only available for 5.1 series models.
|
|
510
|
+
*
|
|
511
|
+
* @default 'in_memory'
|
|
512
|
+
*/
|
|
513
|
+
promptCacheRetention: import_v43.z.enum(["in_memory", "24h"]).optional(),
|
|
505
514
|
/**
|
|
506
515
|
* A stable identifier used to help detect users of your application
|
|
507
516
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -668,6 +677,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
668
677
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
669
678
|
service_tier: openaiOptions.serviceTier,
|
|
670
679
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
680
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
671
681
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
672
682
|
// messages:
|
|
673
683
|
messages
|
|
@@ -2955,6 +2965,7 @@ var openaiResponsesModelIds = [
|
|
|
2955
2965
|
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValidator)(
|
|
2956
2966
|
() => (0, import_provider_utils22.zodSchema)(
|
|
2957
2967
|
import_v415.z.object({
|
|
2968
|
+
conversation: import_v415.z.string().nullish(),
|
|
2958
2969
|
include: import_v415.z.array(
|
|
2959
2970
|
import_v415.z.enum([
|
|
2960
2971
|
"reasoning.encrypted_content",
|
|
@@ -2987,6 +2998,15 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils22.lazyValid
|
|
|
2987
2998
|
parallelToolCalls: import_v415.z.boolean().nullish(),
|
|
2988
2999
|
previousResponseId: import_v415.z.string().nullish(),
|
|
2989
3000
|
promptCacheKey: import_v415.z.string().nullish(),
|
|
3001
|
+
/**
|
|
3002
|
+
* The retention policy for the prompt cache.
|
|
3003
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3004
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3005
|
+
* Currently only available for 5.1 series models.
|
|
3006
|
+
*
|
|
3007
|
+
* @default 'in_memory'
|
|
3008
|
+
*/
|
|
3009
|
+
promptCacheRetention: import_v415.z.enum(["in_memory", "24h"]).nullish(),
|
|
2990
3010
|
reasoningEffort: import_v415.z.string().nullish(),
|
|
2991
3011
|
reasoningSummary: import_v415.z.string().nullish(),
|
|
2992
3012
|
safetyIdentifier: import_v415.z.string().nullish(),
|
|
@@ -3432,6 +3452,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3432
3452
|
providerOptions,
|
|
3433
3453
|
schema: openaiResponsesProviderOptionsSchema
|
|
3434
3454
|
});
|
|
3455
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
3456
|
+
warnings.push({
|
|
3457
|
+
type: "unsupported-setting",
|
|
3458
|
+
setting: "conversation",
|
|
3459
|
+
details: "conversation and previousResponseId cannot be used together"
|
|
3460
|
+
});
|
|
3461
|
+
}
|
|
3435
3462
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
3436
3463
|
prompt,
|
|
3437
3464
|
systemMessageMode: modelConfig.systemMessageMode,
|
|
@@ -3494,6 +3521,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3494
3521
|
}
|
|
3495
3522
|
},
|
|
3496
3523
|
// provider options:
|
|
3524
|
+
conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
|
|
3497
3525
|
max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
|
|
3498
3526
|
metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
|
|
3499
3527
|
parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
|
|
@@ -3504,6 +3532,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3504
3532
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3505
3533
|
include,
|
|
3506
3534
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3535
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3507
3536
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3508
3537
|
top_logprobs: topLogprobs,
|
|
3509
3538
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|