@ai-sdk/openai 3.0.0-beta.58 → 3.0.0-beta.59
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.d.mts +2 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.js +21 -1
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +21 -1
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +1 -0
- package/dist/internal/index.d.ts +1 -0
- package/dist/internal/index.js +20 -0
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +20 -0
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
package/dist/index.d.mts
CHANGED
|
@@ -19,6 +19,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
19
19
|
strictJsonSchema?: boolean | undefined;
|
|
20
20
|
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
21
21
|
promptCacheKey?: string | undefined;
|
|
22
|
+
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
22
23
|
safetyIdentifier?: string | undefined;
|
|
23
24
|
}>;
|
|
24
25
|
type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
|
|
@@ -389,6 +390,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyS
|
|
|
389
390
|
parallelToolCalls?: boolean | null | undefined;
|
|
390
391
|
previousResponseId?: string | null | undefined;
|
|
391
392
|
promptCacheKey?: string | null | undefined;
|
|
393
|
+
promptCacheRetention?: "in_memory" | "24h" | null | undefined;
|
|
392
394
|
reasoningEffort?: string | null | undefined;
|
|
393
395
|
reasoningSummary?: string | null | undefined;
|
|
394
396
|
safetyIdentifier?: string | null | undefined;
|
package/dist/index.d.ts
CHANGED
|
@@ -19,6 +19,7 @@ declare const openaiChatLanguageModelOptions: _ai_sdk_provider_utils.LazySchema<
|
|
|
19
19
|
strictJsonSchema?: boolean | undefined;
|
|
20
20
|
textVerbosity?: "low" | "medium" | "high" | undefined;
|
|
21
21
|
promptCacheKey?: string | undefined;
|
|
22
|
+
promptCacheRetention?: "in_memory" | "24h" | undefined;
|
|
22
23
|
safetyIdentifier?: string | undefined;
|
|
23
24
|
}>;
|
|
24
25
|
type OpenAIChatLanguageModelOptions = InferSchema<typeof openaiChatLanguageModelOptions>;
|
|
@@ -389,6 +390,7 @@ declare const openaiResponsesProviderOptionsSchema: _ai_sdk_provider_utils.LazyS
|
|
|
389
390
|
parallelToolCalls?: boolean | null | undefined;
|
|
390
391
|
previousResponseId?: string | null | undefined;
|
|
391
392
|
promptCacheKey?: string | null | undefined;
|
|
393
|
+
promptCacheRetention?: "in_memory" | "24h" | null | undefined;
|
|
392
394
|
reasoningEffort?: string | null | undefined;
|
|
393
395
|
reasoningSummary?: string | null | undefined;
|
|
394
396
|
safetyIdentifier?: string | null | undefined;
|
package/dist/index.js
CHANGED
|
@@ -496,6 +496,15 @@ var openaiChatLanguageModelOptions = (0, import_provider_utils4.lazySchema)(
|
|
|
496
496
|
* Useful for improving cache hit rates and working around automatic caching issues.
|
|
497
497
|
*/
|
|
498
498
|
promptCacheKey: import_v43.z.string().optional(),
|
|
499
|
+
/**
|
|
500
|
+
* The retention policy for the prompt cache.
|
|
501
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
502
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
503
|
+
* Currently only available for 5.1 series models.
|
|
504
|
+
*
|
|
505
|
+
* @default 'in_memory'
|
|
506
|
+
*/
|
|
507
|
+
promptCacheRetention: import_v43.z.enum(["in_memory", "24h"]).optional(),
|
|
499
508
|
/**
|
|
500
509
|
* A stable identifier used to help detect users of your application
|
|
501
510
|
* that may be violating OpenAI's usage policies. The IDs should be a
|
|
@@ -662,6 +671,7 @@ var OpenAIChatLanguageModel = class {
|
|
|
662
671
|
reasoning_effort: openaiOptions.reasoningEffort,
|
|
663
672
|
service_tier: openaiOptions.serviceTier,
|
|
664
673
|
prompt_cache_key: openaiOptions.promptCacheKey,
|
|
674
|
+
prompt_cache_retention: openaiOptions.promptCacheRetention,
|
|
665
675
|
safety_identifier: openaiOptions.safetyIdentifier,
|
|
666
676
|
// messages:
|
|
667
677
|
messages
|
|
@@ -3179,6 +3189,15 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils23.lazySchem
|
|
|
3179
3189
|
parallelToolCalls: import_v418.z.boolean().nullish(),
|
|
3180
3190
|
previousResponseId: import_v418.z.string().nullish(),
|
|
3181
3191
|
promptCacheKey: import_v418.z.string().nullish(),
|
|
3192
|
+
/**
|
|
3193
|
+
* The retention policy for the prompt cache.
|
|
3194
|
+
* - 'in_memory': Default. Standard prompt caching behavior.
|
|
3195
|
+
* - '24h': Extended prompt caching that keeps cached prefixes active for up to 24 hours.
|
|
3196
|
+
* Currently only available for 5.1 series models.
|
|
3197
|
+
*
|
|
3198
|
+
* @default 'in_memory'
|
|
3199
|
+
*/
|
|
3200
|
+
promptCacheRetention: import_v418.z.enum(["in_memory", "24h"]).nullish(),
|
|
3182
3201
|
reasoningEffort: import_v418.z.string().nullish(),
|
|
3183
3202
|
reasoningSummary: import_v418.z.string().nullish(),
|
|
3184
3203
|
safetyIdentifier: import_v418.z.string().nullish(),
|
|
@@ -3493,6 +3512,7 @@ var OpenAIResponsesLanguageModel = class {
|
|
|
3493
3512
|
service_tier: openaiOptions == null ? void 0 : openaiOptions.serviceTier,
|
|
3494
3513
|
include,
|
|
3495
3514
|
prompt_cache_key: openaiOptions == null ? void 0 : openaiOptions.promptCacheKey,
|
|
3515
|
+
prompt_cache_retention: openaiOptions == null ? void 0 : openaiOptions.promptCacheRetention,
|
|
3496
3516
|
safety_identifier: openaiOptions == null ? void 0 : openaiOptions.safetyIdentifier,
|
|
3497
3517
|
top_logprobs: topLogprobs,
|
|
3498
3518
|
truncation: openaiOptions == null ? void 0 : openaiOptions.truncation,
|
|
@@ -4941,7 +4961,7 @@ var OpenAITranscriptionModel = class {
|
|
|
4941
4961
|
};
|
|
4942
4962
|
|
|
4943
4963
|
// src/version.ts
|
|
4944
|
-
var VERSION = true ? "3.0.0-beta.
|
|
4964
|
+
var VERSION = true ? "3.0.0-beta.59" : "0.0.0-test";
|
|
4945
4965
|
|
|
4946
4966
|
// src/openai-provider.ts
|
|
4947
4967
|
function createOpenAI(options = {}) {
|