@ai-sdk/openai 2.0.6 → 2.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,6 +9,7 @@ declare const openaiProviderOptions: z.ZodObject<{
9
9
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
10
10
  user: z.ZodOptional<z.ZodString>;
11
11
  reasoningEffort: z.ZodOptional<z.ZodEnum<{
12
+ minimal: "minimal";
12
13
  low: "low";
13
14
  medium: "medium";
14
15
  high: "high";
@@ -24,6 +25,11 @@ declare const openaiProviderOptions: z.ZodObject<{
24
25
  priority: "priority";
25
26
  }>>;
26
27
  strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
28
+ textVerbosity: z.ZodOptional<z.ZodEnum<{
29
+ low: "low";
30
+ medium: "medium";
31
+ high: "high";
32
+ }>>;
27
33
  }, z.core.$strip>;
28
34
  type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
29
35
 
@@ -218,6 +224,11 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
218
224
  "reasoning.encrypted_content": "reasoning.encrypted_content";
219
225
  "file_search_call.results": "file_search_call.results";
220
226
  }>>>>;
227
+ textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
228
+ low: "low";
229
+ medium: "medium";
230
+ high: "high";
231
+ }>>>;
221
232
  }, z.core.$strip>;
222
233
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
223
234
 
@@ -9,6 +9,7 @@ declare const openaiProviderOptions: z.ZodObject<{
9
9
  parallelToolCalls: z.ZodOptional<z.ZodBoolean>;
10
10
  user: z.ZodOptional<z.ZodString>;
11
11
  reasoningEffort: z.ZodOptional<z.ZodEnum<{
12
+ minimal: "minimal";
12
13
  low: "low";
13
14
  medium: "medium";
14
15
  high: "high";
@@ -24,6 +25,11 @@ declare const openaiProviderOptions: z.ZodObject<{
24
25
  priority: "priority";
25
26
  }>>;
26
27
  strictJsonSchema: z.ZodOptional<z.ZodBoolean>;
28
+ textVerbosity: z.ZodOptional<z.ZodEnum<{
29
+ low: "low";
30
+ medium: "medium";
31
+ high: "high";
32
+ }>>;
27
33
  }, z.core.$strip>;
28
34
  type OpenAIProviderOptions = z.infer<typeof openaiProviderOptions>;
29
35
 
@@ -218,6 +224,11 @@ declare const openaiResponsesProviderOptionsSchema: z.ZodObject<{
218
224
  "reasoning.encrypted_content": "reasoning.encrypted_content";
219
225
  "file_search_call.results": "file_search_call.results";
220
226
  }>>>>;
227
+ textVerbosity: z.ZodOptional<z.ZodNullable<z.ZodEnum<{
228
+ low: "low";
229
+ medium: "medium";
230
+ high: "high";
231
+ }>>>;
221
232
  }, z.core.$strip>;
222
233
  type OpenAIResponsesProviderOptions = z.infer<typeof openaiResponsesProviderOptionsSchema>;
223
234
 
@@ -281,7 +281,7 @@ var openaiProviderOptions = import_v4.z.object({
281
281
  /**
282
282
  * Reasoning effort for reasoning models. Defaults to `medium`.
283
283
  */
284
- reasoningEffort: import_v4.z.enum(["low", "medium", "high"]).optional(),
284
+ reasoningEffort: import_v4.z.enum(["minimal", "low", "medium", "high"]).optional(),
285
285
  /**
286
286
  * Maximum number of completion tokens to generate. Useful for reasoning models.
287
287
  */
@@ -318,7 +318,12 @@ var openaiProviderOptions = import_v4.z.object({
318
318
  *
319
319
  * @default false
320
320
  */
321
- strictJsonSchema: import_v4.z.boolean().optional()
321
+ strictJsonSchema: import_v4.z.boolean().optional(),
322
+ /**
323
+ * Controls the verbosity of the model's responses.
324
+ * Lower values will result in more concise responses, while higher values will result in more verbose responses.
325
+ */
326
+ textVerbosity: import_v4.z.enum(["low", "medium", "high"]).optional()
322
327
  });
323
328
 
324
329
  // src/openai-error.ts
@@ -599,6 +604,7 @@ var OpenAIChatLanguageModel = class {
599
604
  } : { type: "json_object" } : void 0,
600
605
  stop: stopSequences,
601
606
  seed,
607
+ verbosity: openaiOptions.textVerbosity,
602
608
  // openai specific settings:
603
609
  // TODO remove in next major version; we auto-map maxOutputTokens now
604
610
  max_completion_tokens: openaiOptions.maxCompletionTokens,
@@ -684,7 +690,7 @@ var OpenAIChatLanguageModel = class {
684
690
  warnings.push({
685
691
  type: "unsupported-setting",
686
692
  setting: "serviceTier",
687
- details: "flex processing is only available for o3 and o4-mini models"
693
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
688
694
  });
689
695
  baseArgs.service_tier = void 0;
690
696
  }
@@ -692,7 +698,7 @@ var OpenAIChatLanguageModel = class {
692
698
  warnings.push({
693
699
  type: "unsupported-setting",
694
700
  setting: "serviceTier",
695
- details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
701
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
696
702
  });
697
703
  baseArgs.service_tier = void 0;
698
704
  }
@@ -1125,10 +1131,10 @@ function isReasoningModel(modelId) {
1125
1131
  return modelId.startsWith("o") || modelId.startsWith("gpt-5");
1126
1132
  }
1127
1133
  function supportsFlexProcessing(modelId) {
1128
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1134
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
1129
1135
  }
1130
1136
  function supportsPriorityProcessing(modelId) {
1131
- return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1137
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1132
1138
  }
1133
1139
  function getSystemMessageMode(modelId) {
1134
1140
  var _a, _b;
@@ -2407,15 +2413,20 @@ var OpenAIResponsesLanguageModel = class {
2407
2413
  temperature,
2408
2414
  top_p: topP,
2409
2415
  max_output_tokens: maxOutputTokens,
2410
- ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2416
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
2411
2417
  text: {
2412
- format: responseFormat.schema != null ? {
2413
- type: "json_schema",
2414
- strict: strictJsonSchema,
2415
- name: (_b = responseFormat.name) != null ? _b : "response",
2416
- description: responseFormat.description,
2417
- schema: responseFormat.schema
2418
- } : { type: "json_object" }
2418
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2419
+ format: responseFormat.schema != null ? {
2420
+ type: "json_schema",
2421
+ strict: strictJsonSchema,
2422
+ name: (_b = responseFormat.name) != null ? _b : "response",
2423
+ description: responseFormat.description,
2424
+ schema: responseFormat.schema
2425
+ } : { type: "json_object" }
2426
+ },
2427
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
2428
+ verbosity: openaiOptions.textVerbosity
2429
+ }
2419
2430
  }
2420
2431
  },
2421
2432
  // provider options:
@@ -2479,7 +2490,7 @@ var OpenAIResponsesLanguageModel = class {
2479
2490
  warnings.push({
2480
2491
  type: "unsupported-setting",
2481
2492
  setting: "serviceTier",
2482
- details: "flex processing is only available for o3 and o4-mini models"
2493
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
2483
2494
  });
2484
2495
  delete baseArgs.service_tier;
2485
2496
  }
@@ -2487,7 +2498,7 @@ var OpenAIResponsesLanguageModel = class {
2487
2498
  warnings.push({
2488
2499
  type: "unsupported-setting",
2489
2500
  setting: "serviceTier",
2490
- details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
2501
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
2491
2502
  });
2492
2503
  delete baseArgs.service_tier;
2493
2504
  }
@@ -3243,10 +3254,10 @@ function getResponsesModelConfig(modelId) {
3243
3254
  };
3244
3255
  }
3245
3256
  function supportsFlexProcessing2(modelId) {
3246
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3257
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
3247
3258
  }
3248
3259
  function supportsPriorityProcessing2(modelId) {
3249
- return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3260
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3250
3261
  }
3251
3262
  var openaiResponsesProviderOptionsSchema = import_v415.z.object({
3252
3263
  metadata: import_v415.z.any().nullish(),
@@ -3259,7 +3270,8 @@ var openaiResponsesProviderOptionsSchema = import_v415.z.object({
3259
3270
  instructions: import_v415.z.string().nullish(),
3260
3271
  reasoningSummary: import_v415.z.string().nullish(),
3261
3272
  serviceTier: import_v415.z.enum(["auto", "flex", "priority"]).nullish(),
3262
- include: import_v415.z.array(import_v415.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
3273
+ include: import_v415.z.array(import_v415.z.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
3274
+ textVerbosity: import_v415.z.enum(["low", "medium", "high"]).nullish()
3263
3275
  });
3264
3276
  // Annotate the CommonJS export names for ESM import in node:
3265
3277
  0 && (module.exports = {