@ai-sdk/openai 3.0.0-beta.108 → 3.0.0-beta.109

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -296,7 +296,7 @@ function mapOpenAIFinishReason(finishReason) {
296
296
  case "tool_calls":
297
297
  return "tool-calls";
298
298
  default:
299
- return "unknown";
299
+ return "other";
300
300
  }
301
301
  }
302
302
 
@@ -820,7 +820,7 @@ var OpenAIChatLanguageModel = class {
820
820
  };
821
821
  }
822
822
  async doGenerate(options) {
823
- var _a, _b, _c, _d, _e, _f;
823
+ var _a, _b, _c, _d, _e, _f, _g;
824
824
  const { args: body, warnings } = await this.getArgs(options);
825
825
  const {
826
826
  responseHeaders,
@@ -877,7 +877,10 @@ var OpenAIChatLanguageModel = class {
877
877
  }
878
878
  return {
879
879
  content,
880
- finishReason: mapOpenAIFinishReason(choice.finish_reason),
880
+ finishReason: {
881
+ unified: mapOpenAIFinishReason(choice.finish_reason),
882
+ raw: (_g = choice.finish_reason) != null ? _g : void 0
883
+ },
881
884
  usage: convertOpenAIChatUsage(response.usage),
882
885
  request: { body },
883
886
  response: {
@@ -913,7 +916,10 @@ var OpenAIChatLanguageModel = class {
913
916
  fetch: this.config.fetch
914
917
  });
915
918
  const toolCalls = [];
916
- let finishReason = "unknown";
919
+ let finishReason = {
920
+ unified: "other",
921
+ raw: void 0
922
+ };
917
923
  let usage = void 0;
918
924
  let metadataExtracted = false;
919
925
  let isActiveText = false;
@@ -930,13 +936,13 @@ var OpenAIChatLanguageModel = class {
930
936
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
931
937
  }
932
938
  if (!chunk.success) {
933
- finishReason = "error";
939
+ finishReason = { unified: "error", raw: void 0 };
934
940
  controller.enqueue({ type: "error", error: chunk.error });
935
941
  return;
936
942
  }
937
943
  const value = chunk.value;
938
944
  if ("error" in value) {
939
- finishReason = "error";
945
+ finishReason = { unified: "error", raw: void 0 };
940
946
  controller.enqueue({ type: "error", error: value.error });
941
947
  return;
942
948
  }
@@ -961,7 +967,10 @@ var OpenAIChatLanguageModel = class {
961
967
  }
962
968
  const choice = value.choices[0];
963
969
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
964
- finishReason = mapOpenAIFinishReason(choice.finish_reason);
970
+ finishReason = {
971
+ unified: mapOpenAIFinishReason(choice.finish_reason),
972
+ raw: choice.finish_reason
973
+ };
965
974
  }
966
975
  if (((_e = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _e.content) != null) {
967
976
  providerMetadata.openai.logprobs = choice.logprobs.content;
@@ -1250,7 +1259,7 @@ function mapOpenAIFinishReason2(finishReason) {
1250
1259
  case "tool_calls":
1251
1260
  return "tool-calls";
1252
1261
  default:
1253
- return "unknown";
1262
+ return "other";
1254
1263
  }
1255
1264
  }
1256
1265
 
@@ -1448,6 +1457,7 @@ var OpenAICompletionLanguageModel = class {
1448
1457
  };
1449
1458
  }
1450
1459
  async doGenerate(options) {
1460
+ var _a;
1451
1461
  const { args, warnings } = await this.getArgs(options);
1452
1462
  const {
1453
1463
  responseHeaders,
@@ -1475,7 +1485,10 @@ var OpenAICompletionLanguageModel = class {
1475
1485
  return {
1476
1486
  content: [{ type: "text", text: choice.text }],
1477
1487
  usage: convertOpenAICompletionUsage(response.usage),
1478
- finishReason: mapOpenAIFinishReason2(choice.finish_reason),
1488
+ finishReason: {
1489
+ unified: mapOpenAIFinishReason2(choice.finish_reason),
1490
+ raw: (_a = choice.finish_reason) != null ? _a : void 0
1491
+ },
1479
1492
  request: { body: args },
1480
1493
  response: {
1481
1494
  ...getResponseMetadata2(response),
@@ -1509,7 +1522,10 @@ var OpenAICompletionLanguageModel = class {
1509
1522
  abortSignal: options.abortSignal,
1510
1523
  fetch: this.config.fetch
1511
1524
  });
1512
- let finishReason = "unknown";
1525
+ let finishReason = {
1526
+ unified: "other",
1527
+ raw: void 0
1528
+ };
1513
1529
  const providerMetadata = { openai: {} };
1514
1530
  let usage = void 0;
1515
1531
  let isFirstChunk = true;
@@ -1524,13 +1540,13 @@ var OpenAICompletionLanguageModel = class {
1524
1540
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1525
1541
  }
1526
1542
  if (!chunk.success) {
1527
- finishReason = "error";
1543
+ finishReason = { unified: "error", raw: void 0 };
1528
1544
  controller.enqueue({ type: "error", error: chunk.error });
1529
1545
  return;
1530
1546
  }
1531
1547
  const value = chunk.value;
1532
1548
  if ("error" in value) {
1533
- finishReason = "error";
1549
+ finishReason = { unified: "error", raw: void 0 };
1534
1550
  controller.enqueue({ type: "error", error: value.error });
1535
1551
  return;
1536
1552
  }
@@ -1547,7 +1563,10 @@ var OpenAICompletionLanguageModel = class {
1547
1563
  }
1548
1564
  const choice = value.choices[0];
1549
1565
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
1550
- finishReason = mapOpenAIFinishReason2(choice.finish_reason);
1566
+ finishReason = {
1567
+ unified: mapOpenAIFinishReason2(choice.finish_reason),
1568
+ raw: choice.finish_reason
1569
+ };
1551
1570
  }
1552
1571
  if ((choice == null ? void 0 : choice.logprobs) != null) {
1553
1572
  providerMetadata.openai.logprobs = choice.logprobs;
@@ -2818,7 +2837,7 @@ function mapOpenAIResponseFinishReason({
2818
2837
  case "content_filter":
2819
2838
  return "content-filter";
2820
2839
  default:
2821
- return hasFunctionCall ? "tool-calls" : "unknown";
2840
+ return hasFunctionCall ? "tool-calls" : "other";
2822
2841
  }
2823
2842
  }
2824
2843
 
@@ -4476,7 +4495,7 @@ var OpenAIResponsesLanguageModel = class {
4476
4495
  };
4477
4496
  }
4478
4497
  async doGenerate(options) {
4479
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
4498
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z;
4480
4499
  const {
4481
4500
  args: body,
4482
4501
  warnings,
@@ -4873,10 +4892,13 @@ var OpenAIResponsesLanguageModel = class {
4873
4892
  const usage = response.usage;
4874
4893
  return {
4875
4894
  content,
4876
- finishReason: mapOpenAIResponseFinishReason({
4877
- finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
4878
- hasFunctionCall
4879
- }),
4895
+ finishReason: {
4896
+ unified: mapOpenAIResponseFinishReason({
4897
+ finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
4898
+ hasFunctionCall
4899
+ }),
4900
+ raw: (_z = (_y = response.incomplete_details) == null ? void 0 : _y.reason) != null ? _z : void 0
4901
+ },
4880
4902
  usage: convertOpenAIResponsesUsage(usage),
4881
4903
  request: { body },
4882
4904
  response: {
@@ -4917,7 +4939,10 @@ var OpenAIResponsesLanguageModel = class {
4917
4939
  });
4918
4940
  const self = this;
4919
4941
  const providerKey = this.config.provider.replace(".responses", "");
4920
- let finishReason = "unknown";
4942
+ let finishReason = {
4943
+ unified: "other",
4944
+ raw: void 0
4945
+ };
4921
4946
  let usage = void 0;
4922
4947
  const logprobs = [];
4923
4948
  let responseId = null;
@@ -4933,12 +4958,12 @@ var OpenAIResponsesLanguageModel = class {
4933
4958
  controller.enqueue({ type: "stream-start", warnings });
4934
4959
  },
4935
4960
  transform(chunk, controller) {
4936
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A;
4961
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C;
4937
4962
  if (options.includeRawChunks) {
4938
4963
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
4939
4964
  }
4940
4965
  if (!chunk.success) {
4941
- finishReason = "error";
4966
+ finishReason = { unified: "error", raw: void 0 };
4942
4967
  controller.enqueue({ type: "error", error: chunk.error });
4943
4968
  return;
4944
4969
  }
@@ -5437,10 +5462,13 @@ var OpenAIResponsesLanguageModel = class {
5437
5462
  activeReasoning[value.item_id].summaryParts[value.summary_index] = "can-conclude";
5438
5463
  }
5439
5464
  } else if (isResponseFinishedChunk(value)) {
5440
- finishReason = mapOpenAIResponseFinishReason({
5441
- finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
5442
- hasFunctionCall
5443
- });
5465
+ finishReason = {
5466
+ unified: mapOpenAIResponseFinishReason({
5467
+ finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
5468
+ hasFunctionCall
5469
+ }),
5470
+ raw: (_k = (_j = value.response.incomplete_details) == null ? void 0 : _j.reason) != null ? _k : void 0
5471
+ };
5444
5472
  usage = value.response.usage;
5445
5473
  if (typeof value.response.service_tier === "string") {
5446
5474
  serviceTier = value.response.service_tier;
@@ -5451,7 +5479,7 @@ var OpenAIResponsesLanguageModel = class {
5451
5479
  controller.enqueue({
5452
5480
  type: "source",
5453
5481
  sourceType: "url",
5454
- id: (_l = (_k = (_j = self.config).generateId) == null ? void 0 : _k.call(_j)) != null ? _l : generateId2(),
5482
+ id: (_n = (_m = (_l = self.config).generateId) == null ? void 0 : _m.call(_l)) != null ? _n : generateId2(),
5455
5483
  url: value.annotation.url,
5456
5484
  title: value.annotation.title
5457
5485
  });
@@ -5459,10 +5487,10 @@ var OpenAIResponsesLanguageModel = class {
5459
5487
  controller.enqueue({
5460
5488
  type: "source",
5461
5489
  sourceType: "document",
5462
- id: (_o = (_n = (_m = self.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : generateId2(),
5490
+ id: (_q = (_p = (_o = self.config).generateId) == null ? void 0 : _p.call(_o)) != null ? _q : generateId2(),
5463
5491
  mediaType: "text/plain",
5464
- title: (_q = (_p = value.annotation.quote) != null ? _p : value.annotation.filename) != null ? _q : "Document",
5465
- filename: (_r = value.annotation.filename) != null ? _r : value.annotation.file_id,
5492
+ title: (_s = (_r = value.annotation.quote) != null ? _r : value.annotation.filename) != null ? _s : "Document",
5493
+ filename: (_t = value.annotation.filename) != null ? _t : value.annotation.file_id,
5466
5494
  ...value.annotation.file_id ? {
5467
5495
  providerMetadata: {
5468
5496
  [providerKey]: {
@@ -5475,10 +5503,10 @@ var OpenAIResponsesLanguageModel = class {
5475
5503
  controller.enqueue({
5476
5504
  type: "source",
5477
5505
  sourceType: "document",
5478
- id: (_u = (_t = (_s = self.config).generateId) == null ? void 0 : _t.call(_s)) != null ? _u : generateId2(),
5506
+ id: (_w = (_v = (_u = self.config).generateId) == null ? void 0 : _v.call(_u)) != null ? _w : generateId2(),
5479
5507
  mediaType: "text/plain",
5480
- title: (_w = (_v = value.annotation.filename) != null ? _v : value.annotation.file_id) != null ? _w : "Document",
5481
- filename: (_x = value.annotation.filename) != null ? _x : value.annotation.file_id,
5508
+ title: (_y = (_x = value.annotation.filename) != null ? _x : value.annotation.file_id) != null ? _y : "Document",
5509
+ filename: (_z = value.annotation.filename) != null ? _z : value.annotation.file_id,
5482
5510
  providerMetadata: {
5483
5511
  [providerKey]: {
5484
5512
  fileId: value.annotation.file_id,
@@ -5491,7 +5519,7 @@ var OpenAIResponsesLanguageModel = class {
5491
5519
  controller.enqueue({
5492
5520
  type: "source",
5493
5521
  sourceType: "document",
5494
- id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : generateId2(),
5522
+ id: (_C = (_B = (_A = self.config).generateId) == null ? void 0 : _B.call(_A)) != null ? _C : generateId2(),
5495
5523
  mediaType: "application/octet-stream",
5496
5524
  title: value.annotation.file_id,
5497
5525
  filename: value.annotation.file_id,