@ai-sdk/openai 3.0.47 → 3.0.48

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -42,7 +42,7 @@ var openaiFailedResponseHandler = createJsonErrorResponseHandler({
42
42
  // src/openai-language-model-capabilities.ts
43
43
  function getOpenAILanguageModelCapabilities(modelId) {
44
44
  const supportsFlexProcessing = modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
45
- const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
45
+ const supportsPriorityProcessing = modelId.startsWith("gpt-4") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-nano") && !modelId.startsWith("gpt-5-chat") && !modelId.startsWith("gpt-5.4-nano") || modelId.startsWith("o3") || modelId.startsWith("o4-mini");
46
46
  const isReasoningModel = modelId.startsWith("o1") || modelId.startsWith("o3") || modelId.startsWith("o4-mini") || modelId.startsWith("gpt-5") && !modelId.startsWith("gpt-5-chat");
47
47
  const supportsNonReasoningParameters = modelId.startsWith("gpt-5.1") || modelId.startsWith("gpt-5.2") || modelId.startsWith("gpt-5.3") || modelId.startsWith("gpt-5.4");
48
48
  const systemMessageMode = isReasoningModel ? "developer" : "system";
@@ -3376,6 +3376,23 @@ var openaiResponsesChunkSchema = lazySchema19(
3376
3376
  service_tier: z21.string().nullish()
3377
3377
  })
3378
3378
  }),
3379
+ z21.object({
3380
+ type: z21.literal("response.failed"),
3381
+ response: z21.object({
3382
+ error: z21.object({
3383
+ code: z21.string().nullish(),
3384
+ message: z21.string()
3385
+ }).nullish(),
3386
+ incomplete_details: z21.object({ reason: z21.string() }).nullish(),
3387
+ usage: z21.object({
3388
+ input_tokens: z21.number(),
3389
+ input_tokens_details: z21.object({ cached_tokens: z21.number().nullish() }).nullish(),
3390
+ output_tokens: z21.number(),
3391
+ output_tokens_details: z21.object({ reasoning_tokens: z21.number().nullish() }).nullish()
3392
+ }).nullish(),
3393
+ service_tier: z21.string().nullish()
3394
+ })
3395
+ }),
3379
3396
  z21.object({
3380
3397
  type: z21.literal("response.created"),
3381
3398
  response: z21.object({
@@ -4193,6 +4210,10 @@ var openaiResponsesReasoningModelIds = [
4193
4210
  "gpt-5.3-codex",
4194
4211
  "gpt-5.4",
4195
4212
  "gpt-5.4-2026-03-05",
4213
+ "gpt-5.4-mini",
4214
+ "gpt-5.4-mini-2026-03-17",
4215
+ "gpt-5.4-nano",
4216
+ "gpt-5.4-nano-2026-03-17",
4196
4217
  "gpt-5.4-pro",
4197
4218
  "gpt-5.4-pro-2026-03-05"
4198
4219
  ];
@@ -5448,7 +5469,7 @@ var OpenAIResponsesLanguageModel = class {
5448
5469
  controller.enqueue({ type: "stream-start", warnings });
5449
5470
  },
5450
5471
  transform(chunk, controller) {
5451
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F, _G, _H, _I, _J;
5472
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E, _F, _G, _H, _I, _J, _K, _L;
5452
5473
  if (options.includeRawChunks) {
5453
5474
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
5454
5475
  }
@@ -6173,13 +6194,23 @@ var OpenAIResponsesLanguageModel = class {
6173
6194
  if (typeof value.response.service_tier === "string") {
6174
6195
  serviceTier = value.response.service_tier;
6175
6196
  }
6197
+ } else if (isResponseFailedChunk(value)) {
6198
+ const incompleteReason = (_y = value.response.incomplete_details) == null ? void 0 : _y.reason;
6199
+ finishReason = {
6200
+ unified: incompleteReason ? mapOpenAIResponseFinishReason({
6201
+ finishReason: incompleteReason,
6202
+ hasFunctionCall
6203
+ }) : "error",
6204
+ raw: incompleteReason != null ? incompleteReason : "error"
6205
+ };
6206
+ usage = (_z = value.response.usage) != null ? _z : void 0;
6176
6207
  } else if (isResponseAnnotationAddedChunk(value)) {
6177
6208
  ongoingAnnotations.push(value.annotation);
6178
6209
  if (value.annotation.type === "url_citation") {
6179
6210
  controller.enqueue({
6180
6211
  type: "source",
6181
6212
  sourceType: "url",
6182
- id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : generateId2(),
6213
+ id: (_C = (_B = (_A = self.config).generateId) == null ? void 0 : _B.call(_A)) != null ? _C : generateId2(),
6183
6214
  url: value.annotation.url,
6184
6215
  title: value.annotation.title
6185
6216
  });
@@ -6187,7 +6218,7 @@ var OpenAIResponsesLanguageModel = class {
6187
6218
  controller.enqueue({
6188
6219
  type: "source",
6189
6220
  sourceType: "document",
6190
- id: (_D = (_C = (_B = self.config).generateId) == null ? void 0 : _C.call(_B)) != null ? _D : generateId2(),
6221
+ id: (_F = (_E = (_D = self.config).generateId) == null ? void 0 : _E.call(_D)) != null ? _F : generateId2(),
6191
6222
  mediaType: "text/plain",
6192
6223
  title: value.annotation.filename,
6193
6224
  filename: value.annotation.filename,
@@ -6203,7 +6234,7 @@ var OpenAIResponsesLanguageModel = class {
6203
6234
  controller.enqueue({
6204
6235
  type: "source",
6205
6236
  sourceType: "document",
6206
- id: (_G = (_F = (_E = self.config).generateId) == null ? void 0 : _F.call(_E)) != null ? _G : generateId2(),
6237
+ id: (_I = (_H = (_G = self.config).generateId) == null ? void 0 : _H.call(_G)) != null ? _I : generateId2(),
6207
6238
  mediaType: "text/plain",
6208
6239
  title: value.annotation.filename,
6209
6240
  filename: value.annotation.filename,
@@ -6219,7 +6250,7 @@ var OpenAIResponsesLanguageModel = class {
6219
6250
  controller.enqueue({
6220
6251
  type: "source",
6221
6252
  sourceType: "document",
6222
- id: (_J = (_I = (_H = self.config).generateId) == null ? void 0 : _I.call(_H)) != null ? _J : generateId2(),
6253
+ id: (_L = (_K = (_J = self.config).generateId) == null ? void 0 : _K.call(_J)) != null ? _L : generateId2(),
6223
6254
  mediaType: "application/octet-stream",
6224
6255
  title: value.annotation.file_id,
6225
6256
  filename: value.annotation.file_id,
@@ -6267,6 +6298,9 @@ function isResponseOutputItemDoneChunk(chunk) {
6267
6298
  function isResponseFinishedChunk(chunk) {
6268
6299
  return chunk.type === "response.completed" || chunk.type === "response.incomplete";
6269
6300
  }
6301
+ function isResponseFailedChunk(chunk) {
6302
+ return chunk.type === "response.failed";
6303
+ }
6270
6304
  function isResponseCreatedChunk(chunk) {
6271
6305
  return chunk.type === "response.created";
6272
6306
  }
@@ -6693,7 +6727,7 @@ var OpenAITranscriptionModel = class {
6693
6727
  };
6694
6728
 
6695
6729
  // src/version.ts
6696
- var VERSION = true ? "3.0.47" : "0.0.0-test";
6730
+ var VERSION = true ? "3.0.48" : "0.0.0-test";
6697
6731
 
6698
6732
  // src/openai-provider.ts
6699
6733
  function createOpenAI(options = {}) {