@ai-sdk/openai 2.0.66 → 2.0.68
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/index.d.mts +3 -0
- package/dist/index.d.ts +3 -0
- package/dist/index.js +30 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +30 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -0
- package/dist/internal/index.d.ts +1 -0
- package/dist/internal/index.js +29 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +29 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/index.mjs
CHANGED
|
@@ -478,6 +478,15 @@ var openaiChatLanguageModelOptions = lazyValidator2(
|
|
|
478
478
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
479
479
|
*/
|
|
480
480
|
promptCacheKey: z3.string().optional(),
|
|
481
|
+
/**
|
|
482
|
+
* The retention policy for the prompt cache.
|
|
483
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
484
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
485
|
+
* Currently only available for 5.1 series models.
|
|
486
|
+
*
|
|
487
|
+
* @default 'in_memory'
|
|
488
|
+
*/
|
|
489
|
+
promptCacheRetention: z3.enum(["in_memory", "24h"]).optional(),
|
|
481
490
|
/**
|
|
482
491
|
* A stable identifier used to help detect users of your application
|
|
483
492
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -646,6 +655,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
646
655
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
647
656
|
service_tier: openaiOptions.serviceTier,
|
|
648
657
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
658
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
649
659
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
650
660
|
// messages:
|
|
651
661
|
messages
|
|
@@ -2959,6 +2969,7 @@ var openaiResponsesModelIds = [
|
|
|
2959
2969
|
var openaiResponsesProviderOptionsSchema = lazyValidator9(
|
|
2960
2970
|
() => zodSchema15(
|
|
2961
2971
|
z17.object({
|
|
2972
|
+
conversation: z17.string().nullish(),
|
|
2962
2973
|
include: z17.array(
|
|
2963
2974
|
z17.enum([
|
|
2964
2975
|
"reasoning.encrypted_content",
|
|
@@ -2991,6 +3002,15 @@ var openaiResponsesProviderOptionsSchema = lazyValidator9(
|
|
|
2991
3002
|
parallelToolCalls: z17.boolean().nullish(),
|
|
2992
3003
|
previousResponseId: z17.string().nullish(),
|
|
2993
3004
|
promptCacheKey: z17.string().nullish(),
|
|
3005
|
+
/**
|
|
3006
|
+
* The retention policy for the prompt cache.
|
|
3007
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3008
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3009
|
+
* Currently only available for 5.1 series models.
|
|
3010
|
+
*
|
|
3011
|
+
* @default 'in_memory'
|
|
3012
|
+
*/
|
|
3013
|
+
promptCacheRetention: z17.enum(["in_memory", "24h"]).nullish(),
|
|
2994
3014
|
reasoningEffort: z17.string().nullish(),
|
|
2995
3015
|
reasoningSummary: z17.string().nullish(),
|
|
2996
3016
|
safetyIdentifier: z17.string().nullish(),
|
|
@@ -3204,6 +3224,13 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3204
3224
|
providerOptions,
|
|
3205
3225
|
schema: openaiResponsesProviderOptionsSchema
|
|
3206
3226
|
});
|
|
3227
|
+
if ((openaiOptions == null ? void 0 : openaiOptions.conversation) && (openaiOptions == null ? void 0 : openaiOptions.previousResponseId)) {
|
|
3228
|
+
warnings.push({
|
|
3229
|
+
type: "unsupported-setting",
|
|
3230
|
+
setting: "conversation",
|
|
3231
|
+
details: "conversation and previousResponseId cannot be used together"
|
|
3232
|
+
});
|
|
3233
|
+
}
|
|
3207
3234
|
const { input, warnings: inputWarnings } = await convertToOpenAIResponsesInput({
|
|
3208
3235
|
prompt,
|
|
3209
3236
|
systemMessageMode: modelConfig.systemMessageMode,
|
|
@@ -3266,6 +3293,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3266
3293
|
}
|
|
3267
3294
|
},
|
|
3268
3295
|
// provider options:
|
|
3296
|
+
conversation: openaiOptions == null ? void 0 : openaiOptions.conversation,
|
|
3269
3297
|
max_tool_calls: openaiOptions == null ? void 0 : openaiOptions.maxToolCalls,
|
|
3270
3298
|
metadata: openaiOptions == null ? void 0 : openaiOptions.metadata,
|
|
3271
3299
|
parallel_tool_calls: openaiOptions == null ? void 0 : openaiOptions.parallelToolCalls,
|
|
@@ -3276,6 +3304,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3276
3304
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3277
3305
|
include,
|
|
3278
3306
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3307
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3279
3308
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3280
3309
|
top_logprobs: topLogprobs,
|
|
3281
3310
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
@@ -4555,7 +4584,7 @@ var OpenAITranscriptionModel = class {
|
|
|
4555
4584
|
};
|
|
4556
4585
|
|
|
4557
4586
|
// src/version.ts
|
|
4558
|
-
var VERSION = true ? "2.0.
|
|
4587
|
+
var VERSION = true ? "2.0.68" : "0.0.0-test";
|
|
4559
4588
|
|
|
4560
4589
|
// src/openai-provider.ts
|
|
4561
4590
|
function createOpenAI(options = {}) {
|