@ai-sdk/openai 3.0.0-beta.58 → 3.0.0-beta.60
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +3 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +30 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +30 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -0
- package/dist/internal/index.d.ts +1 -0
- package/dist/internal/index.js +29 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +29 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/dist/index.mjs
CHANGED
|
@@ -485,6 +485,15 @@ var openaiChatLanguageModelOptions = lazySchema2(
|
|
|
485
485
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
486
486
|
*/
|
|
487
487
|
promptCacheKey: z3.string().optional(),
|
|
488
|
+
/**
|
|
489
|
+
* The retention policy for the prompt cache.
|
|
490
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
491
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
492
|
+
* Currently only available for 5.1 series models.
|
|
493
|
+
*
|
|
494
|
+
* @default 'in_memory'
|
|
495
|
+
*/
|
|
496
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
488
497
|
/**
|
|
489
498
|
* A stable identifier used to help detect users of your application
|
|
490
499
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -653,6 +662,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
653
662
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
654
663
|
service_tier: openaiOptions.serviceTier,
|
|
655
664
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
665
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
656
666
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
657
667
|
// messages:
|
|
658
668
|
messages
|
|
@@ -3202,6 +3212,7 @@ var openaiResponsesModelIds = [
|
|
|
3202
3212
|
var openaiResponsesProviderOptionsSchema = lazySchema16(
|
|
3203
3213
|
() => zodSchema16(
|
|
3204
3214
|
z18.object({
|
|
3215
|
+
conversation: z18.string().nullish(),
|
|
3205
3216
|
include: z18.array(
|
|
3206
3217
|
z18.enum([
|
|
3207
3218
|
"reasoning.encrypted_content",
|
|
@@ -3234,6 +3245,15 @@ var openaiResponsesProviderOptionsSchema = lazySchema16(
|
|
|
3234
3245
|
parallelToolCalls: z18.boolean().nullish(),
|
|
3235
3246
|
previousResponseId: z18.string().nullish(),
|
|
3236
3247
|
promptCacheKey: z18.string().nullish(),
|
|
3248
|
+
/**
|
|
3249
|
+
* The retention policy for the prompt cache.
|
|
3250
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3251
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3252
|
+
* Currently only available for 5.1 series models.
|
|
3253
|
+
*
|
|
3254
|
+
* @default 'in_memory'
|
|
3255
|
+
*/
|
|
3256
|
+
promptCacheRetention: z18.enum(["in_memory", "24h"]).nullish(),
|
|
3237
3257
|
reasoningEffort: z18.string().nullish(),
|
|
3238
3258
|
reasoningSummary: z18.string().nullish(),
|
|
3239
3259
|
safetyIdentifier: z18.string().nullish(),
|
|
@@ -3478,6 +3498,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3478
3498
|
providerOptions,
|
|
3479
3499
|
schema: openaiResponsesProviderOptionsSchema
|
|
3480
3500
|
});
|
|
3501
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
3502
|
+
warnings.push({
|
|
3503
|
+
type: "unsupported-setting",
|
|
3504
|
+
setting: "conversation",
|
|
3505
|
+
details: "conversation and previousResponseId cannot be used together"
|
|
3506
|
+
});
|
|
3507
|
+
}
|
|
3481
3508
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
3482
3509
|
prompt,
|
|
3483
3510
|
systemMessageMode: modelConfig.systemMessageMode,
|
|
@@ -3540,6 +3567,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3540
3567
|
}
|
|
3541
3568
|
},
|
|
3542
3569
|
// provider options:
|
|
3570
|
+
conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
|
|
3543
3571
|
max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
|
|
3544
3572
|
metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
|
|
3545
3573
|
parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
|
|
@@ -3550,6 +3578,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3550
3578
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3551
3579
|
include,
|
|
3552
3580
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3581
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3553
3582
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3554
3583
|
top_logprobs: topLogprobs,
|
|
3555
3584
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
@@ -5010,7 +5039,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5010
5039
|
};
|
|
5011
5040
|
|
|
5012
5041
|
// src/version.ts
|
|
5013
|
-
var VERSION = true ? "3.0.0-beta.
|
|
5042
|
+
var VERSION = true ? "3.0.0-beta.60" : "0.0.0-test";
|
|
5014
5043
|
|
|
5015
5044
|
// src/openai-provider.ts
|
|
5016
5045
|
function createOpenAI(options = {}) {
|