@ai-sdk/openai 3.0.47 → 3.0.49

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -34,7 +34,7 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
34
34
  // src/openai-language-model-capabilities.ts
35
35
  function getOpenAILanguageModelCapabilities(modelId) {
36
36
  const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
37
- const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
37
+ const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") && !modelId.startsWith("gpt-5.4-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
38
38
  const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
39
39
  const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2") || modelId.startsWith("gpt-5.3") || modelId.startsWith("gpt-5.4");
40
40
  const systemMessageMode = isReasoningModel ? "developer" : "system";
@@ -3102,6 +3102,11 @@ async function convertToOpenAIResponsesInput({
3102
3102
  filename: (_a2 = item.filename) != null ? _a2 : "data",
3103
3103
  file_data: `data:${item.mediaType};base64,${item.data}`
3104
3104
  };
3105
+ case "file-url":
3106
+ return {
3107
+ type: "input_file",
3108
+ file_url: item.url
3109
+ };
3105
3110
  default:
3106
3111
  warnings.push({
3107
3112
  type: "other",
@@ -3160,6 +3165,12 @@ async function convertToOpenAIResponsesInput({
3160
3165
  file_data: `data:${item.mediaType};base64,${item.data}`
3161
3166
  };
3162
3167
  }
3168
+ case "file-url": {
3169
+ return {
3170
+ type: "input_file",
3171
+ file_url: item.url
3172
+ };
3173
+ }
3163
3174
  default: {
3164
3175
  warnings.push({
3165
3176
  type: "other",
@@ -3267,6 +3278,23 @@ var openaiResponsesChunkSchema = lazySchema15(
3267
3278
  service_tier: z17.string().nullish()
3268
3279
  })
3269
3280
  }),
3281
+ z17.object({
3282
+ type: z17.literal("response.failed"),
3283
+ response: z17.object({
3284
+ error: z17.object({
3285
+ code: z17.string().nullish(),
3286
+ message: z17.string()
3287
+ }).nullish(),
3288
+ incomplete_details: z17.object({ reason: z17.string() }).nullish(),
3289
+ usage: z17.object({
3290
+ input_tokens: z17.number(),
3291
+ input_tokens_details: z17.object({ cached_tokens: z17.number().nullish() }).nullish(),
3292
+ output_tokens: z17.number(),
3293
+ output_tokens_details: z17.object({ reasoning_tokens: z17.number().nullish() }).nullish()
3294
+ }).nullish(),
3295
+ service_tier: z17.string().nullish()
3296
+ })
3297
+ }),
3270
3298
  z17.object({
3271
3299
  type: z17.literal("response.created"),
3272
3300
  response: z17.object({
@@ -4084,6 +4112,10 @@ var openaiResponsesReasoningModelIds = [
4084
4112
  "gpt-5.3-codex",
4085
4113
  "gpt-5.4",
4086
4114
  "gpt-5.4-2026-03-05",
4115
+ "gpt-5.4-mini",
4116
+ "gpt-5.4-mini-2026-03-17",
4117
+ "gpt-5.4-nano",
4118
+ "gpt-5.4-nano-2026-03-17",
4087
4119
  "gpt-5.4-pro",
4088
4120
  "gpt-5.4-pro-2026-03-05"
4089
4121
  ];
@@ -5686,7 +5718,7 @@ var OpenAIResponsesLanguageModel = class {
5686
5718
  controller.enqueue({ type: "stream-start", warnings });
5687
5719
  },
5688
5720
  transform(chunk, controller) {
5689
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F, _G, _H, _I, _J;
5721
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F, _G, _H, _I, _J, _K, _L;
5690
5722
  if (options.includeRawChunks) {
5691
5723
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
5692
5724
  }
@@ -6411,13 +6443,23 @@ var OpenAIResponsesLanguageModel = class {
6411
6443
  if (typeof value.response.service_tier === "string") {
6412
6444
  serviceTier = value.response.service_tier;
6413
6445
  }
6446
+ } else if (isResponseFailedChunk(value)) {
6447
+ const incompleteReason = (_y = value.response.incomplete_details) == null ? void 0 : _y.reason;
6448
+ finishReason = {
6449
+ unified: incompleteReason ? mapOpenAIResponseFinishReason({
6450
+ finishReason: incompleteReason,
6451
+ hasFunctionCall
6452
+ }) : "error",
6453
+ raw: incompleteReason != null ? incompleteReason : "error"
6454
+ };
6455
+ usage = (_z = value.response.usage) != null ? _z : void 0;
6414
6456
  } else if (isResponseAnnotationAddedChunk(value)) {
6415
6457
  ongoingAnnotations.push(value.annotation);
6416
6458
  if (value.annotation.type === "url_citation") {
6417
6459
  controller.enqueue({
6418
6460
  type: "source",
6419
6461
  sourceType: "url",
6420
- id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : generateId2(),
6462
+ id: (_C = (_B = (_A = self.config).generateId) == null ? void 0 : _B.call(_A)) != null ? _C : generateId2(),
6421
6463
  url: value.annotation.url,
6422
6464
  title: value.annotation.title
6423
6465
  });
@@ -6425,7 +6467,7 @@ var OpenAIResponsesLanguageModel = class {
6425
6467
  controller.enqueue({
6426
6468
  type: "source",
6427
6469
  sourceType: "document",
6428
- id: (_D = (_C = (_B = self.config).generateId) == null ? void 0 : _C.call(_B)) != null ? _D : generateId2(),
6470
+ id: (_F = (_E = (_D = self.config).generateId) == null ? void 0 : _E.call(_D)) != null ? _F : generateId2(),
6429
6471
  mediaType: "text/plain",
6430
6472
  title: value.annotation.filename,
6431
6473
  filename: value.annotation.filename,
@@ -6441,7 +6483,7 @@ var OpenAIResponsesLanguageModel = class {
6441
6483
  controller.enqueue({
6442
6484
  type: "source",
6443
6485
  sourceType: "document",
6444
- id: (_G = (_F = (_E = self.config).generateId) == null ? void 0 : _F.call(_E)) != null ? _G : generateId2(),
6486
+ id: (_I = (_H = (_G = self.config).generateId) == null ? void 0 : _H.call(_G)) != null ? _I : generateId2(),
6445
6487
  mediaType: "text/plain",
6446
6488
  title: value.annotation.filename,
6447
6489
  filename: value.annotation.filename,
@@ -6457,7 +6499,7 @@ var OpenAIResponsesLanguageModel = class {
6457
6499
  controller.enqueue({
6458
6500
  type: "source",
6459
6501
  sourceType: "document",
6460
- id: (_J = (_I = (_H = self.config).generateId) == null ? void 0 : _I.call(_H)) != null ? _J : generateId2(),
6502
+ id: (_L = (_K = (_J = self.config).generateId) == null ? void 0 : _K.call(_J)) != null ? _L : generateId2(),
6461
6503
  mediaType: "application/octet-stream",
6462
6504
  title: value.annotation.file_id,
6463
6505
  filename: value.annotation.file_id,
@@ -6505,6 +6547,9 @@ function isResponseOutputItemDoneChunk(chunk) {
6505
6547
  function isResponseFinishedChunk(chunk) {
6506
6548
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
6507
6549
  }
6550
+ function isResponseFailedChunk(chunk) {
6551
+ return chunk.type === "response.failed";
6552
+ }
6508
6553
  function isResponseCreatedChunk(chunk) {
6509
6554
  return chunk.type === "response.created";
6510
6555
  }