@ai-sdk/openai 2.0.6 → 2.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -261,7 +261,7 @@ var openaiProviderOptions = z.object({
261
261
  /**
262
262
  * Reasoning effort for reasoning models. Defaults to `medium`.
263
263
  */
264
- reasoningEffort: z.enum(["low", "medium", "high"]).optional(),
264
+ reasoningEffort: z.enum(["minimal", "low", "medium", "high"]).optional(),
265
265
  /**
266
266
  * Maximum number of completion tokens to generate. Useful for reasoning models.
267
267
  */
@@ -298,7 +298,12 @@ var openaiProviderOptions = z.object({
298
298
  *
299
299
  * @default false
300
300
  */
301
- strictJsonSchema: z.boolean().optional()
301
+ strictJsonSchema: z.boolean().optional(),
302
+ /**
303
+ * Controls the verbosity of the model's responses.
304
+ * Lower values will result in more concise responses, while higher values will result in more verbose responses.
305
+ */
306
+ textVerbosity: z.enum(["low", "medium", "high"]).optional()
302
307
  });
303
308
 
304
309
  // src/openai-error.ts
@@ -581,6 +586,7 @@ var OpenAIChatLanguageModel = class {
581
586
  } : { type: "json_object" } : void 0,
582
587
  stop: stopSequences,
583
588
  seed,
589
+ verbosity: openaiOptions.textVerbosity,
584
590
  // openai specific settings:
585
591
  // TODO remove in next major version; we auto-map maxOutputTokens now
586
592
  max_completion_tokens: openaiOptions.maxCompletionTokens,
@@ -666,7 +672,7 @@ var OpenAIChatLanguageModel = class {
666
672
  warnings.push({
667
673
  type: "unsupported-setting",
668
674
  setting: "serviceTier",
669
- details: "flex processing is only available for o3 and o4-mini models"
675
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
670
676
  });
671
677
  baseArgs.service_tier = void 0;
672
678
  }
@@ -674,7 +680,7 @@ var OpenAIChatLanguageModel = class {
674
680
  warnings.push({
675
681
  type: "unsupported-setting",
676
682
  setting: "serviceTier",
677
- details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
683
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
678
684
  });
679
685
  baseArgs.service_tier = void 0;
680
686
  }
@@ -1107,10 +1113,10 @@ function isReasoningModel(modelId) {
1107
1113
  return modelId.startsWith("o") || modelId.startsWith("gpt-5");
1108
1114
  }
1109
1115
  function supportsFlexProcessing(modelId) {
1110
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1116
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
1111
1117
  }
1112
1118
  function supportsPriorityProcessing(modelId) {
1113
- return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1119
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1114
1120
  }
1115
1121
  function getSystemMessageMode(modelId) {
1116
1122
  var _a, _b;
@@ -2327,15 +2333,20 @@ var OpenAIResponsesLanguageModel = class {
2327
2333
  temperature,
2328
2334
  top_p: topP,
2329
2335
  max_output_tokens: maxOutputTokens,
2330
- ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2336
+ ...((responseFormat == null ? void 0 : responseFormat.type) === "json" || (openaiOptions == null ? void 0 : openaiOptions.textVerbosity)) && {
2331
2337
  text: {
2332
- format: responseFormat.schema != null ? {
2333
- type: "json_schema",
2334
- strict: strictJsonSchema,
2335
- name: (_b = responseFormat.name) != null ? _b : "response",
2336
- description: responseFormat.description,
2337
- schema: responseFormat.schema
2338
- } : { type: "json_object" }
2338
+ ...(responseFormat == null ? void 0 : responseFormat.type) === "json" && {
2339
+ format: responseFormat.schema != null ? {
2340
+ type: "json_schema",
2341
+ strict: strictJsonSchema,
2342
+ name: (_b = responseFormat.name) != null ? _b : "response",
2343
+ description: responseFormat.description,
2344
+ schema: responseFormat.schema
2345
+ } : { type: "json_object" }
2346
+ },
2347
+ ...(openaiOptions == null ? void 0 : openaiOptions.textVerbosity) && {
2348
+ verbosity: openaiOptions.textVerbosity
2349
+ }
2339
2350
  }
2340
2351
  },
2341
2352
  // provider options:
@@ -2399,7 +2410,7 @@ var OpenAIResponsesLanguageModel = class {
2399
2410
  warnings.push({
2400
2411
  type: "unsupported-setting",
2401
2412
  setting: "serviceTier",
2402
- details: "flex processing is only available for o3 and o4-mini models"
2413
+ details: "flex processing is only available for o3, o4-mini, and gpt-5 models"
2403
2414
  });
2404
2415
  delete baseArgs.service_tier;
2405
2416
  }
@@ -2407,7 +2418,7 @@ var OpenAIResponsesLanguageModel = class {
2407
2418
  warnings.push({
2408
2419
  type: "unsupported-setting",
2409
2420
  setting: "serviceTier",
2410
- details: "priority processing is only available for supported models (GPT-4, o3, o4-mini) and requires Enterprise access"
2421
+ details: "priority processing is only available for supported models (gpt-4, gpt-5, gpt-5-mini, o3, o4-mini) and requires Enterprise access. gpt-5-nano is not supported"
2411
2422
  });
2412
2423
  delete baseArgs.service_tier;
2413
2424
  }
@@ -3163,10 +3174,10 @@ function getResponsesModelConfig(modelId) {
3163
3174
  };
3164
3175
  }
3165
3176
  function supportsFlexProcessing2(modelId) {
3166
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3177
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
3167
3178
  }
3168
3179
  function supportsPriorityProcessing2(modelId) {
3169
- return modelId.startsWith("gpt-4") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3180
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3170
3181
  }
3171
3182
  var openaiResponsesProviderOptionsSchema = z14.object({
3172
3183
  metadata: z14.any().nullish(),
@@ -3179,7 +3190,8 @@ var openaiResponsesProviderOptionsSchema = z14.object({
3179
3190
  instructions: z14.string().nullish(),
3180
3191
  reasoningSummary: z14.string().nullish(),
3181
3192
  serviceTier: z14.enum(["auto", "flex", "priority"]).nullish(),
3182
- include: z14.array(z14.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish()
3193
+ include: z14.array(z14.enum(["reasoning.encrypted_content", "file_search_call.results"])).nullish(),
3194
+ textVerbosity: z14.enum(["low", "medium", "high"]).nullish()
3183
3195
  });
3184
3196
 
3185
3197
  // src/openai-speech-model.ts