@ai-sdk/openai 3.0.0-beta.77 → 3.0.0-beta.78
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/index.js +79 -2
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +79 -2
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.js +78 -1
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +78 -1
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
package/dist/index.js
CHANGED
|
@@ -3272,7 +3272,18 @@ var openaiResponsesModelIds = [
|
|
|
3272
3272
|
var openaiResponsesProviderOptionsSchema = (0, import_provider_utils24.lazySchema)(
|
|
3273
3273
|
() => (0, import_provider_utils24.zodSchema)(
|
|
3274
3274
|
import_v419.z.object({
|
|
3275
|
+
/**
|
|
3276
|
+
* The ID of the OpenAI Conversation to continue.
|
|
3277
|
+
* You must create a conversation first via the OpenAI API.
|
|
3278
|
+
* Cannot be used in conjunction with `previousResponseId`.
|
|
3279
|
+
* Defaults to `undefined`.
|
|
3280
|
+
* @see https://platform.openai.com/docs/api-reference/conversations/create
|
|
3281
|
+
*/
|
|
3275
3282
|
conversation: import_v419.z.string().nullish(),
|
|
3283
|
+
/**
|
|
3284
|
+
* The set of extra fields to include in the response (advanced, usually not needed).
|
|
3285
|
+
* Example values: 'reasoning.encrypted_content', 'file_search_call.results', 'message.output_text.logprobs'.
|
|
3286
|
+
*/
|
|
3276
3287
|
include: import_v419.z.array(
|
|
3277
3288
|
import_v419.z.enum([
|
|
3278
3289
|
"reasoning.encrypted_content",
|
|
@@ -3281,9 +3292,16 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils24.lazySchem
|
|
|
3281
3292
|
"message.output_text.logprobs"
|
|
3282
3293
|
])
|
|
3283
3294
|
).nullish(),
|
|
3295
|
+
/**
|
|
3296
|
+
* Instructions for the model.
|
|
3297
|
+
* They can be used to change the system or developer message when continuing a conversation using the `previousResponseId` option.
|
|
3298
|
+
* Defaults to `undefined`.
|
|
3299
|
+
*/
|
|
3284
3300
|
instructions: import_v419.z.string().nullish(),
|
|
3285
3301
|
/**
|
|
3286
|
-
* Return the log probabilities of the tokens.
|
|
3302
|
+
* Return the log probabilities of the tokens. Including logprobs will increase
|
|
3303
|
+
* the response size and can slow down response times. However, it can
|
|
3304
|
+
* be useful to better understand how the model is behaving.
|
|
3287
3305
|
*
|
|
3288
3306
|
* Setting to true will return the log probabilities of the tokens that
|
|
3289
3307
|
* were generated.
|
|
@@ -3301,9 +3319,22 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils24.lazySchem
|
|
|
3301
3319
|
* Any further attempts to call a tool by the model will be ignored.
|
|
3302
3320
|
*/
|
|
3303
3321
|
maxToolCalls: import_v419.z.number().nullish(),
|
|
3322
|
+
/**
|
|
3323
|
+
* Additional metadata to store with the generation.
|
|
3324
|
+
*/
|
|
3304
3325
|
metadata: import_v419.z.any().nullish(),
|
|
3326
|
+
/**
|
|
3327
|
+
* Whether to use parallel tool calls. Defaults to `true`.
|
|
3328
|
+
*/
|
|
3305
3329
|
parallelToolCalls: import_v419.z.boolean().nullish(),
|
|
3330
|
+
/**
|
|
3331
|
+
* The ID of the previous response. You can use it to continue a conversation.
|
|
3332
|
+
* Defaults to `undefined`.
|
|
3333
|
+
*/
|
|
3306
3334
|
previousResponseId: import_v419.z.string().nullish(),
|
|
3335
|
+
/**
|
|
3336
|
+
* Sets a cache key to tie this prompt to cached prefixes for better caching performance.
|
|
3337
|
+
*/
|
|
3307
3338
|
promptCacheKey: import_v419.z.string().nullish(),
|
|
3308
3339
|
/**
|
|
3309
3340
|
* The retention policy for the prompt cache.
|
|
@@ -3314,14 +3345,60 @@ var openaiResponsesProviderOptionsSchema = (0, import_provider_utils24.lazySchem
|
|
|
3314
3345
|
* @default 'in_memory'
|
|
3315
3346
|
*/
|
|
3316
3347
|
promptCacheRetention: import_v419.z.enum(["in_memory", "24h"]).nullish(),
|
|
3348
|
+
/**
|
|
3349
|
+
* Reasoning effort for reasoning models. Defaults to `medium`. If you use
|
|
3350
|
+
* `providerOptions` to set the `reasoningEffort` option, this model setting will be ignored.
|
|
3351
|
+
* Valid values: 'none' | 'minimal' | 'low' | 'medium' | 'high'
|
|
3352
|
+
*
|
|
3353
|
+
* Note: The 'none' type for `reasoningEffort` is only available for OpenAI's GPT-5.1
|
|
3354
|
+
* models. Setting `reasoningEffort` to 'none' with other models will result in
|
|
3355
|
+
* an error.
|
|
3356
|
+
*/
|
|
3317
3357
|
reasoningEffort: import_v419.z.string().nullish(),
|
|
3358
|
+
/**
|
|
3359
|
+
* Controls reasoning summary output from the model.
|
|
3360
|
+
* Set to "auto" to automatically receive the richest level available,
|
|
3361
|
+
* or "detailed" for comprehensive summaries.
|
|
3362
|
+
*/
|
|
3318
3363
|
reasoningSummary: import_v419.z.string().nullish(),
|
|
3364
|
+
/**
|
|
3365
|
+
* The identifier for safety monitoring and tracking.
|
|
3366
|
+
*/
|
|
3319
3367
|
safetyIdentifier: import_v419.z.string().nullish(),
|
|
3368
|
+
/**
|
|
3369
|
+
* Service tier for the request.
|
|
3370
|
+
* Set to 'flex' for 50% cheaper processing at the cost of increased latency (available for o3, o4-mini, and gpt-5 models).
|
|
3371
|
+
* Set to 'priority' for faster processing with Enterprise access (available for gpt-4, gpt-5, gpt-5-mini, o3, o4-mini; gpt-5-nano is not supported).
|
|
3372
|
+
*
|
|
3373
|
+
* Defaults to 'auto'.
|
|
3374
|
+
*/
|
|
3320
3375
|
serviceTier: import_v419.z.enum(["auto", "flex", "priority", "default"]).nullish(),
|
|
3376
|
+
/**
|
|
3377
|
+
* Whether to store the generation. Defaults to `true`.
|
|
3378
|
+
*/
|
|
3321
3379
|
store: import_v419.z.boolean().nullish(),
|
|
3380
|
+
/**
|
|
3381
|
+
* Whether to use strict JSON schema validation.
|
|
3382
|
+
* Defaults to `true`.
|
|
3383
|
+
*/
|
|
3322
3384
|
strictJsonSchema: import_v419.z.boolean().nullish(),
|
|
3385
|
+
/**
|
|
3386
|
+
* Controls the verbosity of the model's responses. Lower values ('low') will result
|
|
3387
|
+
* in more concise responses, while higher values ('high') will result in more verbose responses.
|
|
3388
|
+
* Valid values: 'low', 'medium', 'high'.
|
|
3389
|
+
*/
|
|
3323
3390
|
textVerbosity: import_v419.z.enum(["low", "medium", "high"]).nullish(),
|
|
3391
|
+
/**
|
|
3392
|
+
* Controls output truncation. 'auto' (default) performs truncation automatically;
|
|
3393
|
+
* 'disabled' turns truncation off.
|
|
3394
|
+
*/
|
|
3324
3395
|
truncation: import_v419.z.enum(["auto", "disabled"]).nullish(),
|
|
3396
|
+
/**
|
|
3397
|
+
* A unique identifier representing your end-user, which can help OpenAI to
|
|
3398
|
+
* monitor and detect abuse.
|
|
3399
|
+
* Defaults to `undefined`.
|
|
3400
|
+
* @see https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
|
|
3401
|
+
*/
|
|
3325
3402
|
user: import_v419.z.string().nullish()
|
|
3326
3403
|
})
|
|
3327
3404
|
)
|
|
@@ -5180,7 +5257,7 @@ var OpenAITranscriptionModel = class {
|
|
|
5180
5257
|
};
|
|
5181
5258
|
|
|
5182
5259
|
// src/version.ts
|
|
5183
|
-
var VERSION = true ? "3.0.0-beta.
|
|
5260
|
+
var VERSION = true ? "3.0.0-beta.78" : "0.0.0-test";
|
|
5184
5261
|
|
|
5185
5262
|
// src/openai-provider.ts
|
|
5186
5263
|
function createOpenAI(options = {}) {
|