@ai-sdk/openai 3.0.0-beta.57 → 3.0.0-beta.59
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +5 -3
- package/dist/index.d.ts +5 -3
- package/dist/index.js +27 -3
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +27 -3
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +4 -3
- package/dist/internal/index.d.ts +4 -3
- package/dist/internal/index.js +26 -2
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +26 -2
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -435,7 +435,7 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
435
435
|
/**
|
|
436
436
|
* Reasoning effort for reasoning models. Defaults to `medium`.
|
|
437
437
|
*/
|
|
438
|
-
reasoningEffort: z3.enum(["minimal", "low", "medium", "high"]).optional(),
|
|
438
|
+
reasoningEffort: z3.enum(["none", "minimal", "low", "medium", "high"]).optional(),
|
|
439
439
|
/**
|
|
440
440
|
* Maximum number of completion tokens to generate. Useful for reasoning models.
|
|
441
441
|
*/
|
|
@@ -485,6 +485,15 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
485
485
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
486
486
|
*/
|
|
487
487
|
promptCacheKey: z3.string().optional(),
|
|
488
|
+
/**
|
|
489
|
+
* The retention policy for the prompt cache.
|
|
490
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
491
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
492
|
+
* Currently only available for 5.1 series models.
|
|
493
|
+
*
|
|
494
|
+
* @default 'in_memory'
|
|
495
|
+
*/
|
|
496
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
488
497
|
/**
|
|
489
498
|
* A stable identifier used to help detect users of your application
|
|
490
499
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -653,6 +662,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
653
662
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
654
663
|
service_tier: openaiOptions.serviceTier,
|
|
655
664
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
665
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
656
666
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
657
667
|
// messages:
|
|
658
668
|
messages
|
|
@@ -3157,7 +3167,11 @@ var openaiResponsesReasoningModelIds = [
|
|
|
3157
3167
|
"gpt-5-nano",
|
|
3158
3168
|
"gpt-5-nano-2025-08-07",
|
|
3159
3169
|
"gpt-5-pro",
|
|
3160
|
-
"gpt-5-pro-2025-10-06"
|
|
3170
|
+
"gpt-5-pro-2025-10-06",
|
|
3171
|
+
"gpt-5.1",
|
|
3172
|
+
"gpt-5.1-chat-latest",
|
|
3173
|
+
"gpt-5.1-codex-mini",
|
|
3174
|
+
"gpt-5.1-codex"
|
|
3161
3175
|
];
|
|
3162
3176
|
var openaiResponsesModelIds = [
|
|
3163
3177
|
"gpt-4.1",
|
|
@@ -3230,6 +3244,15 @@ var openaiResponsesProviderOptionsSchema = lazySchema16(
|
|
|
3230
3244
|
parallelToolCalls: z18.boolean().nullish(),
|
|
3231
3245
|
previousResponseId: z18.string().nullish(),
|
|
3232
3246
|
promptCacheKey: z18.string().nullish(),
|
|
3247
|
+
/**
|
|
3248
|
+
* The retention policy for the prompt cache.
|
|
3249
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3250
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3251
|
+
* Currently only available for 5.1 series models.
|
|
3252
|
+
*
|
|
3253
|
+
* @default 'in_memory'
|
|
3254
|
+
*/
|
|
3255
|
+
promptCacheRetention: z18.enum(["in_memory", "24h"]).nullish(),
|
|
3233
3256
|
reasoningEffort: z18.string().nullish(),
|
|
3234
3257
|
reasoningSummary: z18.string().nullish(),
|
|
3235
3258
|
safetyIdentifier: z18.string().nullish(),
|
|
@@ -3546,6 +3569,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3546
3569
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3547
3570
|
include,
|
|
3548
3571
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3572
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3549
3573
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3550
3574
|
top_logprobs: topLogprobs,
|
|
3551
3575
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
@@ -5006,7 +5030,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5006
5030
|
};
|
|
5007
5031
|
|
|
5008
5032
|
// src/version.ts
|
|
5009
|
-
var VERSION = true ? "3.0.0-beta.
|
|
5033
|
+
var VERSION = true ? "3.0.0-beta.59" : "0.0.0-test";
|
|
5010
5034
|
|
|
5011
5035
|
// src/openai-provider.ts
|
|
5012
5036
|
function createOpenAI(options = {}) {
|