@ai-sdk/openai 3.0.0-beta.77 → 3.0.0-beta.78
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.js +79 -2
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +79 -2
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +78 -1
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +78 -1
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/dist/internal/index.js
CHANGED
|
@@ -3251,7 +3251,18 @@ var openaiResponsesModelIds = [
|
|
|
3251
3251
|
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils23.lazySchema)(
|
|
3252
3252
|
() => (0, import_provider_utils23.zodSchema)(
|
|
3253
3253
|
import_v416.z.object({
|
|
3254
|
+
/**
|
|
3255
|
+
* The ID of the OpenAI Conversation to continue.
|
|
3256
|
+
* You must create a conversation first via the OpenAI API.
|
|
3257
|
+
* Cannot be used in conjunction with `previousResponseId`.
|
|
3258
|
+
* Defaults to `undefined`.
|
|
3259
|
+
* @see https://platform.openai.com/docs/api-reference/conversations/create
|
|
3260
|
+
*/
|
|
3254
3261
|
conversation: import_v416.z.string().nullish(),
|
|
3262
|
+
/**
|
|
3263
|
+
* The set of extra fields to include in the response (advanced, usually not needed).
|
|
3264
|
+
* Example values: 'reasoning.encrypted_content', 'file_search_call.results', 'message.output_text.logprobs'.
|
|
3265
|
+
*/
|
|
3255
3266
|
include: import_v416.z.array(
|
|
3256
3267
|
import_v416.z.enum([
|
|
3257
3268
|
"reasoning.encrypted_content",
|
|
@@ -3260,9 +3271,16 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils23.lazySchem
|
|
|
3260
3271
|
"message.output_text.logprobs"
|
|
3261
3272
|
])
|
|
3262
3273
|
).nullish(),
|
|
3274
|
+
/**
|
|
3275
|
+
* Instructions for the model.
|
|
3276
|
+
* They can be used to change the system or developer message when continuing a conversation using the `previousResponseId` option.
|
|
3277
|
+
* Defaults to `undefined`.
|
|
3278
|
+
*/
|
|
3263
3279
|
instructions: import_v416.z.string().nullish(),
|
|
3264
3280
|
/**
|
|
3265
|
-
* Return the log probabilities of the tokens.
|
|
3281
|
+
* Return the log probabilities of the tokens. Including logprobs will increase
|
|
3282
|
+
* the response size and can slow down response times. However, it can
|
|
3283
|
+
* be useful to better understand how the model is behaving.
|
|
3266
3284
|
*
|
|
3267
3285
|
* Setting to true will return the log probabilities of the tokens that
|
|
3268
3286
|
* were generated.
|
|
@@ -3280,9 +3298,22 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils23.lazySchem
|
|
|
3280
3298
|
* Any further attempts to call a tool by the model will be ignored.
|
|
3281
3299
|
*/
|
|
3282
3300
|
maxToolCalls: import_v416.z.number().nullish(),
|
|
3301
|
+
/**
|
|
3302
|
+
* Additional metadata to store with the generation.
|
|
3303
|
+
*/
|
|
3283
3304
|
metadata: import_v416.z.any().nullish(),
|
|
3305
|
+
/**
|
|
3306
|
+
* Whether to use parallel tool calls. Defaults to `true`.
|
|
3307
|
+
*/
|
|
3284
3308
|
parallelToolCalls: import_v416.z.boolean().nullish(),
|
|
3309
|
+
/**
|
|
3310
|
+
* The ID of the previous response. You can use it to continue a conversation.
|
|
3311
|
+
* Defaults to `undefined`.
|
|
3312
|
+
*/
|
|
3285
3313
|
previousResponseId: import_v416.z.string().nullish(),
|
|
3314
|
+
/**
|
|
3315
|
+
* Sets a cache key to tie this prompt to cached prefixes for better caching performance.
|
|
3316
|
+
*/
|
|
3286
3317
|
promptCacheKey: import_v416.z.string().nullish(),
|
|
3287
3318
|
/**
|
|
3288
3319
|
* The retention policy for the prompt cache.
|
|
@@ -3293,14 +3324,60 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils23.lazySchem
|
|
|
3293
3324
|
* @default 'in_memory'
|
|
3294
3325
|
*/
|
|
3295
3326
|
promptCacheRetention: import_v416.z.enum(["in_memory", "24h"]).nullish(),
|
|
3327
|
+
/**
|
|
3328
|
+
* Reasoning effort for reasoning models. Defaults to `medium`. If you use
|
|
3329
|
+
* `providerOptions` to set the `reasoningEffort` option, this model setting will be ignored.
|
|
3330
|
+
* Valid values: 'none' | 'minimal' | 'low' | 'medium' | 'high'
|
|
3331
|
+
*
|
|
3332
|
+
* Note: The 'none' type for `reasoningEffort` is only available for OpenAI's GPT-5.1
|
|
3333
|
+
* models. Setting `reasoningEffort` to 'none' with other models will result in
|
|
3334
|
+
* an error.
|
|
3335
|
+
*/
|
|
3296
3336
|
reasoningEffort: import_v416.z.string().nullish(),
|
|
3337
|
+
/**
|
|
3338
|
+
* Controls reasoning summary output from the model.
|
|
3339
|
+
* Set to "auto" to automatically receive the richest level available,
|
|
3340
|
+
* or "detailed" for comprehensive summaries.
|
|
3341
|
+
*/
|
|
3297
3342
|
reasoningSummary: import_v416.z.string().nullish(),
|
|
3343
|
+
/**
|
|
3344
|
+
* The identifier for safety monitoring and tracking.
|
|
3345
|
+
*/
|
|
3298
3346
|
safetyIdentifier: import_v416.z.string().nullish(),
|
|
3347
|
+
/**
|
|
3348
|
+
* Service tier for the request.
|
|
3349
|
+
* Set to 'flex' for 50% cheaper processing at the cost of increased latency (available for o3, o4-mini, and gpt-5 models).
|
|
3350
|
+
* Set to 'priority' for faster processing with Enterprise access (available for gpt-4, gpt-5, gpt-5-mini, o3, o4-mini; gpt-5-nano is not supported).
|
|
3351
|
+
*
|
|
3352
|
+
* Defaults to 'auto'.
|
|
3353
|
+
*/
|
|
3299
3354
|
serviceTier: import_v416.z.enum(["auto", "flex", "priority", "default"]).nullish(),
|
|
3355
|
+
/**
|
|
3356
|
+
* Whether to store the generation. Defaults to `true`.
|
|
3357
|
+
*/
|
|
3300
3358
|
store: import_v416.z.boolean().nullish(),
|
|
3359
|
+
/**
|
|
3360
|
+
* Whether to use strict JSON schema validation.
|
|
3361
|
+
* Defaults to `true`.
|
|
3362
|
+
*/
|
|
3301
3363
|
strictJsonSchema: import_v416.z.boolean().nullish(),
|
|
3364
|
+
/**
|
|
3365
|
+
* Controls the verbosity of the model's responses. Lower values ('low') will result
|
|
3366
|
+
* in more concise responses, while higher values ('high') will result in more verbose responses.
|
|
3367
|
+
* Valid values: 'low', 'medium', 'high'.
|
|
3368
|
+
*/
|
|
3302
3369
|
textVerbosity: import_v416.z.enum(["low", "medium", "high"]).nullish(),
|
|
3370
|
+
/**
|
|
3371
|
+
* Controls output truncation. 'auto' (default) performs truncation automatically;
|
|
3372
|
+
* 'disabled' turns truncation off.
|
|
3373
|
+
*/
|
|
3303
3374
|
truncation: import_v416.z.enum(["auto", "disabled"]).nullish(),
|
|
3375
|
+
/**
|
|
3376
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
3377
|
+
* monitor and detect abuse.
|
|
3378
|
+
* Defaults to `undefined`.
|
|
3379
|
+
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
|
|
3380
|
+
*/
|
|
3304
3381
|
user: import_v416.z.string().nullish()
|
|
3305
3382
|
})
|
|
3306
3383
|
)
|