@ai-sdk/openai 2.0.11 → 2.0.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,15 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 2.0.12
4
+
5
+ ### Patch Changes
6
+
7
+ - ec336a1: feat(provider/openai): add response_format to be supported by default
8
+ - 2935ec7: fix(provider/openai): exclude gpt-5-chat from reasoning model
9
+ - Updated dependencies [034e229]
10
+ - Updated dependencies [f25040d]
11
+ - @ai-sdk/provider-utils@3.0.3
12
+
3
13
  ## 2.0.11
4
14
 
5
15
  ### Patch Changes
package/dist/index.js CHANGED
@@ -1135,13 +1135,13 @@ var openaiChatChunkSchema = import_v45.z.union([
1135
1135
  openaiErrorDataSchema
1136
1136
  ]);
1137
1137
  function isReasoningModel(modelId) {
1138
- return modelId.startsWith("o") || modelId.startsWith("gpt-5");
1138
+ return (modelId.startsWith("o") || modelId.startsWith("gpt-5")) && !modelId.startsWith("gpt-5-chat");
1139
1139
  }
1140
1140
  function supportsFlexProcessing(modelId) {
1141
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
1141
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
1142
1142
  }
1143
1143
  function supportsPriorityProcessing(modelId) {
1144
- return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1144
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
1145
1145
  }
1146
1146
  function getSystemMessageMode(modelId) {
1147
1147
  var _a, _b;
@@ -3089,6 +3089,13 @@ function isErrorChunk(chunk) {
3089
3089
  return chunk.type === "error";
3090
3090
  }
3091
3091
  function getResponsesModelConfig(modelId) {
3092
+ if (modelId.startsWith("gpt-5-chat")) {
3093
+ return {
3094
+ isReasoningModel: false,
3095
+ systemMessageMode: "system",
3096
+ requiredAutoTruncation: false
3097
+ };
3098
+ }
3092
3099
  if (modelId.startsWith("o") || modelId.startsWith("gpt-5") || modelId.startsWith("codex-") || modelId.startsWith("computer-use")) {
3093
3100
  if (modelId.startsWith("o1-mini") || modelId.startsWith("o1-preview")) {
3094
3101
  return {
@@ -3110,10 +3117,10 @@ function getResponsesModelConfig(modelId) {
3110
3117
  };
3111
3118
  }
3112
3119
  function supportsFlexProcessing2(modelId) {
3113
- return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5");
3120
+ return modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
3114
3121
  }
3115
3122
  function supportsPriorityProcessing2(modelId) {
3116
- return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3123
+ return modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
3117
3124
  }
3118
3125
  var openaiResponsesProviderOptionsSchema = import_v413.z.object({
3119
3126
  metadata: import_v413.z.any().nullish(),
@@ -3359,6 +3366,8 @@ var OpenAITranscriptionModel = class {
3359
3366
  include: openAIOptions.include,
3360
3367
  language: openAIOptions.language,
3361
3368
  prompt: openAIOptions.prompt,
3369
+ response_format: "verbose_json",
3370
+ // always use verbose_json to get segments
3362
3371
  temperature: openAIOptions.temperature,
3363
3372
  timestamp_granularities: openAIOptions.timestampGranularities
3364
3373
  };
@@ -3374,7 +3383,7 @@ var OpenAITranscriptionModel = class {
3374
3383
  };
3375
3384
  }
3376
3385
  async doGenerate(options) {
3377
- var _a, _b, _c, _d, _e, _f;
3386
+ var _a, _b, _c, _d, _e, _f, _g, _h;
3378
3387
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
3379
3388
  const { formData, warnings } = await this.getArgs(options);
3380
3389
  const {
@@ -3398,13 +3407,17 @@ var OpenAITranscriptionModel = class {
3398
3407
  const language = response.language != null && response.language in languageMap ? languageMap[response.language] : void 0;
3399
3408
  return {
3400
3409
  text: response.text,
3401
- segments: (_e = (_d = response.words) == null ? void 0 : _d.map((word) => ({
3410
+ segments: (_g = (_f = (_d = response.segments) == null ? void 0 : _d.map((segment) => ({
3411
+ text: segment.text,
3412
+ startSecond: segment.start,
3413
+ endSecond: segment.end
3414
+ }))) != null ? _f : (_e = response.words) == null ? void 0 : _e.map((word) => ({
3402
3415
  text: word.word,
3403
3416
  startSecond: word.start,
3404
3417
  endSecond: word.end
3405
- }))) != null ? _e : [],
3418
+ }))) != null ? _g : [],
3406
3419
  language,
3407
- durationInSeconds: (_f = response.duration) != null ? _f : void 0,
3420
+ durationInSeconds: (_h = response.duration) != null ? _h : void 0,
3408
3421
  warnings,
3409
3422
  response: {
3410
3423
  timestamp: currentDate,
@@ -3425,6 +3438,20 @@ var openaiTranscriptionResponseSchema = import_v416.z.object({
3425
3438
  start: import_v416.z.number(),
3426
3439
  end: import_v416.z.number()
3427
3440
  })
3441
+ ).nullish(),
3442
+ segments: import_v416.z.array(
3443
+ import_v416.z.object({
3444
+ id: import_v416.z.number(),
3445
+ seek: import_v416.z.number(),
3446
+ start: import_v416.z.number(),
3447
+ end: import_v416.z.number(),
3448
+ text: import_v416.z.string(),
3449
+ tokens: import_v416.z.array(import_v416.z.number()),
3450
+ temperature: import_v416.z.number(),
3451
+ avg_logprob: import_v416.z.number(),
3452
+ compression_ratio: import_v416.z.number(),
3453
+ no_speech_prob: import_v416.z.number()
3454
+ })
3428
3455
  ).nullish()
3429
3456
  });
3430
3457