@ai-sdk/openai 2.0.65 → 2.0.67
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +5 -3
- package/dist/index.d.ts +5 -3
- package/dist/index.js +27 -3
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +27 -3
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +4 -3
- package/dist/internal/index.d.ts +4 -3
- package/dist/internal/index.js +26 -2
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +26 -2
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -428,7 +428,7 @@ var openaiChatLanguageModelOptions = lazyValidator2(
|
|
|
428
428
|
/**
|
|
429
429
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
430
430
|
*/
|
|
431
|
-
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
431
|
+
reasoningEffort: z3.enum(["none", "minimal", "low", "medium", "high"]).optional(),
|
|
432
432
|
/**
|
|
433
433
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
434
434
|
*/
|
|
@@ -478,6 +478,15 @@ var openaiChatLanguageModelOptions = lazyValidator2(
|
|
|
478
478
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
479
479
|
*/
|
|
480
480
|
promptCacheKey: z3.string().optional(),
|
|
481
|
+
/**
|
|
482
|
+
* The retention policy for the prompt cache.
|
|
483
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
484
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
485
|
+
* Currently only available for 5.1 series models.
|
|
486
|
+
*
|
|
487
|
+
* @default 'in_memory'
|
|
488
|
+
*/
|
|
489
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
481
490
|
/**
|
|
482
491
|
* A stable identifier used to help detect users of your application
|
|
483
492
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -646,6 +655,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
646
655
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
647
656
|
service_tier: openaiOptions.serviceTier,
|
|
648
657
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
658
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
649
659
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
650
660
|
// messages:
|
|
651
661
|
messages
|
|
@@ -2914,7 +2924,11 @@ var openaiResponsesReasoningModelIds = [
|
|
|
2914
2924
|
"gpt-5-nano",
|
|
2915
2925
|
"gpt-5-nano-2025-08-07",
|
|
2916
2926
|
"gpt-5-pro",
|
|
2917
|
-
"gpt-5-pro-2025-10-06"
|
|
2927
|
+
"gpt-5-pro-2025-10-06",
|
|
2928
|
+
"gpt-5.1",
|
|
2929
|
+
"gpt-5.1-chat-latest",
|
|
2930
|
+
"gpt-5.1-codex-mini",
|
|
2931
|
+
"gpt-5.1-codex"
|
|
2918
2932
|
];
|
|
2919
2933
|
var openaiResponsesModelIds = [
|
|
2920
2934
|
"gpt-4.1",
|
|
@@ -2987,6 +3001,15 @@ var openaiResponsesProviderOptionsSchema = lazyValidator9(
|
|
|
2987
3001
|
parallelToolCalls: z17.boolean().nullish(),
|
|
2988
3002
|
previousResponseId: z17.string().nullish(),
|
|
2989
3003
|
promptCacheKey: z17.string().nullish(),
|
|
3004
|
+
/**
|
|
3005
|
+
* The retention policy for the prompt cache.
|
|
3006
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3007
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3008
|
+
* Currently only available for 5.1 series models.
|
|
3009
|
+
*
|
|
3010
|
+
* @default 'in_memory'
|
|
3011
|
+
*/
|
|
3012
|
+
promptCacheRetention: z17.enum(["in_memory", "24h"]).nullish(),
|
|
2990
3013
|
reasoningEffort: z17.string().nullish(),
|
|
2991
3014
|
reasoningSummary: z17.string().nullish(),
|
|
2992
3015
|
safetyIdentifier: z17.string().nullish(),
|
|
@@ -3272,6 +3295,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3272
3295
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3273
3296
|
include,
|
|
3274
3297
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3298
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3275
3299
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3276
3300
|
top_logprobs: topLogprobs,
|
|
3277
3301
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
@@ -4551,7 +4575,7 @@ var OpenAITranscriptionModel = class {
|
|
|
4551
4575
|
};
|
|
4552
4576
|
|
|
4553
4577
|
// src/version.ts
|
|
4554
|
-
var VERSION = true ? "2.0.
|
|
4578
|
+
var VERSION = true ? "2.0.67" : "0.0.0-test";
|
|
4555
4579
|
|
|
4556
4580
|
// src/openai-provider.ts
|
|
4557
4581
|
function createOpenAI(options = {}) {
|