@ai-sdk/openai 3.0.0-beta.88 → 3.0.0-beta.89

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @ai-sdk/openai
2
2
 
3
+ ## 3.0.0-beta.89
4
+
5
+ ### Patch Changes
6
+
7
+ - 3bd2689: feat: extended token usage
8
+ - Updated dependencies [3bd2689]
9
+ - @ai-sdk/provider@3.0.0-beta.26
10
+ - @ai-sdk/provider-utils@4.0.0-beta.45
11
+
3
12
  ## 3.0.0-beta.88
4
13
 
5
14
  ### Patch Changes
package/dist/index.js CHANGED
@@ -68,6 +68,45 @@ function getOpenAILanguageModelCapabilities(modelId) {
68
68
  };
69
69
  }
70
70
 
71
+ // src/chat/convert-openai-chat-usage.ts
72
+ function convertOpenAIChatUsage(usage) {
73
+ var _a, _b, _c, _d, _e, _f;
74
+ if (usage == null) {
75
+ return {
76
+ inputTokens: {
77
+ total: void 0,
78
+ noCache: void 0,
79
+ cacheRead: void 0,
80
+ cacheWrite: void 0
81
+ },
82
+ outputTokens: {
83
+ total: void 0,
84
+ text: void 0,
85
+ reasoning: void 0
86
+ },
87
+ raw: void 0
88
+ };
89
+ }
90
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
91
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
92
+ const cachedTokens = (_d = (_c = usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : 0;
93
+ const reasoningTokens = (_f = (_e = usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0;
94
+ return {
95
+ inputTokens: {
96
+ total: promptTokens,
97
+ noCache: promptTokens - cachedTokens,
98
+ cacheRead: cachedTokens,
99
+ cacheWrite: void 0
100
+ },
101
+ outputTokens: {
102
+ total: completionTokens,
103
+ text: completionTokens - reasoningTokens,
104
+ reasoning: reasoningTokens
105
+ },
106
+ raw: usage
107
+ };
108
+ }
109
+
71
110
  // src/chat/convert-to-openai-chat-messages.ts
72
111
  var import_provider = require("@ai-sdk/provider");
73
112
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
@@ -774,7 +813,7 @@ var OpenAIChatLanguageModel = class {
774
813
  };
775
814
  }
776
815
  async doGenerate(options) {
777
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
816
+ var _a, _b, _c, _d, _e, _f;
778
817
  const { args: body, warnings } = await this.getArgs(options);
779
818
  const {
780
819
  responseHeaders,
@@ -832,13 +871,7 @@ var OpenAIChatLanguageModel = class {
832
871
  return {
833
872
  content,
834
873
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
835
- usage: {
836
- inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
837
- outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
838
- totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
839
- reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
840
- cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
841
- },
874
+ usage: convertOpenAIChatUsage(response.usage),
842
875
  request: { body },
843
876
  response: {
844
877
  ...getResponseMetadata(response),
@@ -874,11 +907,7 @@ var OpenAIChatLanguageModel = class {
874
907
  });
875
908
  const toolCalls = [];
876
909
  let finishReason = "unknown";
877
- const usage = {
878
- inputTokens: void 0,
879
- outputTokens: void 0,
880
- totalTokens: void 0
881
- };
910
+ let usage = void 0;
882
911
  let metadataExtracted = false;
883
912
  let isActiveText = false;
884
913
  const providerMetadata = { openai: {} };
@@ -889,7 +918,7 @@ var OpenAIChatLanguageModel = class {
889
918
  controller.enqueue({ type: "stream-start", warnings });
890
919
  },
891
920
  transform(chunk, controller) {
892
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
921
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
893
922
  if (options.includeRawChunks) {
894
923
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
895
924
  }
@@ -915,23 +944,19 @@ var OpenAIChatLanguageModel = class {
915
944
  }
916
945
  }
917
946
  if (value.usage != null) {
918
- usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
919
- usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
920
- usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
921
- usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
922
- usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
923
- if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
924
- providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
947
+ usage = value.usage;
948
+ if (((_a = value.usage.completion_tokens_details) == null ? void 0 : _a.accepted_prediction_tokens) != null) {
949
+ providerMetadata.openai.acceptedPredictionTokens = (_b = value.usage.completion_tokens_details) == null ? void 0 : _b.accepted_prediction_tokens;
925
950
  }
926
- if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
927
- providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
951
+ if (((_c = value.usage.completion_tokens_details) == null ? void 0 : _c.rejected_prediction_tokens) != null) {
952
+ providerMetadata.openai.rejectedPredictionTokens = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.rejected_prediction_tokens;
928
953
  }
929
954
  }
930
955
  const choice = value.choices[0];
931
956
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
932
957
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
933
958
  }
934
- if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
959
+ if (((_e = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _e.content) != null) {
935
960
  providerMetadata.openai.logprobs = choice.logprobs.content;
936
961
  }
937
962
  if ((choice == null ? void 0 : choice.delta) == null) {
@@ -965,7 +990,7 @@ var OpenAIChatLanguageModel = class {
965
990
  message: `Expected 'id' to be a string.`
966
991
  });
967
992
  }
968
- if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
993
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.name) == null) {
969
994
  throw new import_provider3.InvalidResponseDataError({
970
995
  data: toolCallDelta,
971
996
  message: `Expected 'function.name' to be a string.`
@@ -981,12 +1006,12 @@ var OpenAIChatLanguageModel = class {
981
1006
  type: "function",
982
1007
  function: {
983
1008
  name: toolCallDelta.function.name,
984
- arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
1009
+ arguments: (_g = toolCallDelta.function.arguments) != null ? _g : ""
985
1010
  },
986
1011
  hasFinished: false
987
1012
  };
988
1013
  const toolCall2 = toolCalls[index];
989
- if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
1014
+ if (((_h = toolCall2.function) == null ? void 0 : _h.name) != null && ((_i = toolCall2.function) == null ? void 0 : _i.arguments) != null) {
990
1015
  if (toolCall2.function.arguments.length > 0) {
991
1016
  controller.enqueue({
992
1017
  type: "tool-input-delta",
@@ -1001,7 +1026,7 @@ var OpenAIChatLanguageModel = class {
1001
1026
  });
1002
1027
  controller.enqueue({
1003
1028
  type: "tool-call",
1004
- toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
1029
+ toolCallId: (_j = toolCall2.id) != null ? _j : (0, import_provider_utils5.generateId)(),
1005
1030
  toolName: toolCall2.function.name,
1006
1031
  input: toolCall2.function.arguments
1007
1032
  });
@@ -1014,22 +1039,22 @@ var OpenAIChatLanguageModel = class {
1014
1039
  if (toolCall.hasFinished) {
1015
1040
  continue;
1016
1041
  }
1017
- if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
1018
- toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
1042
+ if (((_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null) {
1043
+ toolCall.function.arguments += (_m = (_l = toolCallDelta.function) == null ? void 0 : _l.arguments) != null ? _m : "";
1019
1044
  }
1020
1045
  controller.enqueue({
1021
1046
  type: "tool-input-delta",
1022
1047
  id: toolCall.id,
1023
- delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
1048
+ delta: (_n = toolCallDelta.function.arguments) != null ? _n : ""
1024
1049
  });
1025
- if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
1050
+ if (((_o = toolCall.function) == null ? void 0 : _o.name) != null && ((_p = toolCall.function) == null ? void 0 : _p.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
1026
1051
  controller.enqueue({
1027
1052
  type: "tool-input-end",
1028
1053
  id: toolCall.id
1029
1054
  });
1030
1055
  controller.enqueue({
1031
1056
  type: "tool-call",
1032
- toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
1057
+ toolCallId: (_q = toolCall.id) != null ? _q : (0, import_provider_utils5.generateId)(),
1033
1058
  toolName: toolCall.function.name,
1034
1059
  input: toolCall.function.arguments
1035
1060
  });
@@ -1056,7 +1081,7 @@ var OpenAIChatLanguageModel = class {
1056
1081
  controller.enqueue({
1057
1082
  type: "finish",
1058
1083
  finishReason,
1059
- usage,
1084
+ usage: convertOpenAIChatUsage(usage),
1060
1085
  ...providerMetadata != null ? { providerMetadata } : {}
1061
1086
  });
1062
1087
  }
@@ -1071,6 +1096,43 @@ var OpenAIChatLanguageModel = class {
1071
1096
  // src/completion/openai-completion-language-model.ts
1072
1097
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1073
1098
 
1099
+ // src/completion/convert-openai-completion-usage.ts
1100
+ function convertOpenAICompletionUsage(usage) {
1101
+ var _a, _b, _c, _d;
1102
+ if (usage == null) {
1103
+ return {
1104
+ inputTokens: {
1105
+ total: void 0,
1106
+ noCache: void 0,
1107
+ cacheRead: void 0,
1108
+ cacheWrite: void 0
1109
+ },
1110
+ outputTokens: {
1111
+ total: void 0,
1112
+ text: void 0,
1113
+ reasoning: void 0
1114
+ },
1115
+ raw: void 0
1116
+ };
1117
+ }
1118
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
1119
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
1120
+ return {
1121
+ inputTokens: {
1122
+ total: (_c = usage.prompt_tokens) != null ? _c : void 0,
1123
+ noCache: promptTokens,
1124
+ cacheRead: void 0,
1125
+ cacheWrite: void 0
1126
+ },
1127
+ outputTokens: {
1128
+ total: (_d = usage.completion_tokens) != null ? _d : void 0,
1129
+ text: completionTokens,
1130
+ reasoning: void 0
1131
+ },
1132
+ raw: usage
1133
+ };
1134
+ }
1135
+
1074
1136
  // src/completion/convert-to-openai-completion-prompt.ts
1075
1137
  var import_provider4 = require("@ai-sdk/provider");
1076
1138
  function convertToOpenAICompletionPrompt({
@@ -1370,7 +1432,6 @@ var OpenAICompletionLanguageModel = class {
1370
1432
  };
1371
1433
  }
1372
1434
  async doGenerate(options) {
1373
- var _a, _b, _c;
1374
1435
  const { args, warnings } = await this.getArgs(options);
1375
1436
  const {
1376
1437
  responseHeaders,
@@ -1397,11 +1458,7 @@ var OpenAICompletionLanguageModel = class {
1397
1458
  }
1398
1459
  return {
1399
1460
  content: [{ type: "text", text: choice.text }],
1400
- usage: {
1401
- inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1402
- outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1403
- totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1404
- },
1461
+ usage: convertOpenAICompletionUsage(response.usage),
1405
1462
  finishReason: mapOpenAIFinishReason2(choice.finish_reason),
1406
1463
  request: { body: args },
1407
1464
  response: {
@@ -1438,11 +1495,7 @@ var OpenAICompletionLanguageModel = class {
1438
1495
  });
1439
1496
  let finishReason = "unknown";
1440
1497
  const providerMetadata = { openai: {} };
1441
- const usage = {
1442
- inputTokens: void 0,
1443
- outputTokens: void 0,
1444
- totalTokens: void 0
1445
- };
1498
+ let usage = void 0;
1446
1499
  let isFirstChunk = true;
1447
1500
  return {
1448
1501
  stream: response.pipeThrough(
@@ -1474,9 +1527,7 @@ var OpenAICompletionLanguageModel = class {
1474
1527
  controller.enqueue({ type: "text-start", id: "0" });
1475
1528
  }
1476
1529
  if (value.usage != null) {
1477
- usage.inputTokens = value.usage.prompt_tokens;
1478
- usage.outputTokens = value.usage.completion_tokens;
1479
- usage.totalTokens = value.usage.total_tokens;
1530
+ usage = value.usage;
1480
1531
  }
1481
1532
  const choice = value.choices[0];
1482
1533
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1501,7 +1552,7 @@ var OpenAICompletionLanguageModel = class {
1501
1552
  type: "finish",
1502
1553
  finishReason,
1503
1554
  providerMetadata,
1504
- usage
1555
+ usage: convertOpenAICompletionUsage(usage)
1505
1556
  });
1506
1557
  }
1507
1558
  })
@@ -2270,6 +2321,45 @@ var openaiTools = {
2270
2321
  var import_provider8 = require("@ai-sdk/provider");
2271
2322
  var import_provider_utils27 = require("@ai-sdk/provider-utils");
2272
2323
 
2324
+ // src/responses/convert-openai-responses-usage.ts
2325
+ function convertOpenAIResponsesUsage(usage) {
2326
+ var _a, _b, _c, _d;
2327
+ if (usage == null) {
2328
+ return {
2329
+ inputTokens: {
2330
+ total: void 0,
2331
+ noCache: void 0,
2332
+ cacheRead: void 0,
2333
+ cacheWrite: void 0
2334
+ },
2335
+ outputTokens: {
2336
+ total: void 0,
2337
+ text: void 0,
2338
+ reasoning: void 0
2339
+ },
2340
+ raw: void 0
2341
+ };
2342
+ }
2343
+ const inputTokens = usage.input_tokens;
2344
+ const outputTokens = usage.output_tokens;
2345
+ const cachedTokens = (_b = (_a = usage.input_tokens_details) == null ? void 0 : _a.cached_tokens) != null ? _b : 0;
2346
+ const reasoningTokens = (_d = (_c = usage.output_tokens_details) == null ? void 0 : _c.reasoning_tokens) != null ? _d : 0;
2347
+ return {
2348
+ inputTokens: {
2349
+ total: inputTokens,
2350
+ noCache: inputTokens - cachedTokens,
2351
+ cacheRead: cachedTokens,
2352
+ cacheWrite: void 0
2353
+ },
2354
+ outputTokens: {
2355
+ total: outputTokens,
2356
+ text: outputTokens - reasoningTokens,
2357
+ reasoning: reasoningTokens
2358
+ },
2359
+ raw: usage
2360
+ };
2361
+ }
2362
+
2273
2363
  // src/responses/convert-to-openai-responses-input.ts
2274
2364
  var import_provider6 = require("@ai-sdk/provider");
2275
2365
  var import_provider_utils23 = require("@ai-sdk/provider-utils");
@@ -3929,7 +4019,7 @@ var OpenAIResponsesLanguageModel = class {
3929
4019
  };
3930
4020
  }
3931
4021
  async doGenerate(options) {
3932
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B;
4022
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
3933
4023
  const {
3934
4024
  args: body,
3935
4025
  warnings,
@@ -4330,13 +4420,7 @@ var OpenAIResponsesLanguageModel = class {
4330
4420
  finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
4331
4421
  hasFunctionCall
4332
4422
  }),
4333
- usage: {
4334
- inputTokens: usage.input_tokens,
4335
- outputTokens: usage.output_tokens,
4336
- totalTokens: usage.input_tokens + usage.output_tokens,
4337
- reasoningTokens: (_z = (_y = usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
4338
- cachedInputTokens: (_B = (_A = usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
4339
- },
4423
+ usage: convertOpenAIResponsesUsage(usage),
4340
4424
  request: { body },
4341
4425
  response: {
4342
4426
  id: response.id,
@@ -4377,11 +4461,7 @@ var OpenAIResponsesLanguageModel = class {
4377
4461
  const self = this;
4378
4462
  const providerKey = this.config.provider.replace(".responses", "");
4379
4463
  let finishReason = "unknown";
4380
- const usage = {
4381
- inputTokens: void 0,
4382
- outputTokens: void 0,
4383
- totalTokens: void 0
4384
- };
4464
+ let usage = void 0;
4385
4465
  const logprobs = [];
4386
4466
  let responseId = null;
4387
4467
  const ongoingToolCalls = {};
@@ -4396,7 +4476,7 @@ var OpenAIResponsesLanguageModel = class {
4396
4476
  controller.enqueue({ type: "stream-start", warnings });
4397
4477
  },
4398
4478
  transform(chunk, controller) {
4399
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E;
4479
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A;
4400
4480
  if (options.includeRawChunks) {
4401
4481
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
4402
4482
  }
@@ -4904,11 +4984,7 @@ var OpenAIResponsesLanguageModel = class {
4904
4984
  finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
4905
4985
  hasFunctionCall
4906
4986
  });
4907
- usage.inputTokens = value.response.usage.input_tokens;
4908
- usage.outputTokens = value.response.usage.output_tokens;
4909
- usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
4910
- usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
4911
- usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
4987
+ usage = value.response.usage;
4912
4988
  if (typeof value.response.service_tier === "string") {
4913
4989
  serviceTier = value.response.service_tier;
4914
4990
  }
@@ -4918,7 +4994,7 @@ var OpenAIResponsesLanguageModel = class {
4918
4994
  controller.enqueue({
4919
4995
  type: "source",
4920
4996
  sourceType: "url",
4921
- id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : (0, import_provider_utils27.generateId)(),
4997
+ id: (_l = (_k = (_j = self.config).generateId) == null ? void 0 : _k.call(_j)) != null ? _l : (0, import_provider_utils27.generateId)(),
4922
4998
  url: value.annotation.url,
4923
4999
  title: value.annotation.title
4924
5000
  });
@@ -4926,10 +5002,10 @@ var OpenAIResponsesLanguageModel = class {
4926
5002
  controller.enqueue({
4927
5003
  type: "source",
4928
5004
  sourceType: "document",
4929
- id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : (0, import_provider_utils27.generateId)(),
5005
+ id: (_o = (_n = (_m = self.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : (0, import_provider_utils27.generateId)(),
4930
5006
  mediaType: "text/plain",
4931
- title: (_u = (_t = value.annotation.quote) != null ? _t : value.annotation.filename) != null ? _u : "Document",
4932
- filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id,
5007
+ title: (_q = (_p = value.annotation.quote) != null ? _p : value.annotation.filename) != null ? _q : "Document",
5008
+ filename: (_r = value.annotation.filename) != null ? _r : value.annotation.file_id,
4933
5009
  ...value.annotation.file_id ? {
4934
5010
  providerMetadata: {
4935
5011
  [providerKey]: {
@@ -4942,10 +5018,10 @@ var OpenAIResponsesLanguageModel = class {
4942
5018
  controller.enqueue({
4943
5019
  type: "source",
4944
5020
  sourceType: "document",
4945
- id: (_y = (_x = (_w = self.config).generateId) == null ? void 0 : _x.call(_w)) != null ? _y : (0, import_provider_utils27.generateId)(),
5021
+ id: (_u = (_t = (_s = self.config).generateId) == null ? void 0 : _t.call(_s)) != null ? _u : (0, import_provider_utils27.generateId)(),
4946
5022
  mediaType: "text/plain",
4947
- title: (_A = (_z = value.annotation.filename) != null ? _z : value.annotation.file_id) != null ? _A : "Document",
4948
- filename: (_B = value.annotation.filename) != null ? _B : value.annotation.file_id,
5023
+ title: (_w = (_v = value.annotation.filename) != null ? _v : value.annotation.file_id) != null ? _w : "Document",
5024
+ filename: (_x = value.annotation.filename) != null ? _x : value.annotation.file_id,
4949
5025
  providerMetadata: {
4950
5026
  [providerKey]: {
4951
5027
  fileId: value.annotation.file_id,
@@ -4958,7 +5034,7 @@ var OpenAIResponsesLanguageModel = class {
4958
5034
  controller.enqueue({
4959
5035
  type: "source",
4960
5036
  sourceType: "document",
4961
- id: (_E = (_D = (_C = self.config).generateId) == null ? void 0 : _D.call(_C)) != null ? _E : (0, import_provider_utils27.generateId)(),
5037
+ id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : (0, import_provider_utils27.generateId)(),
4962
5038
  mediaType: "application/octet-stream",
4963
5039
  title: value.annotation.file_id,
4964
5040
  filename: value.annotation.file_id,
@@ -4989,7 +5065,7 @@ var OpenAIResponsesLanguageModel = class {
4989
5065
  controller.enqueue({
4990
5066
  type: "finish",
4991
5067
  finishReason,
4992
- usage,
5068
+ usage: convertOpenAIResponsesUsage(usage),
4993
5069
  providerMetadata
4994
5070
  });
4995
5071
  }
@@ -5404,7 +5480,7 @@ var OpenAITranscriptionModel = class {
5404
5480
  };
5405
5481
 
5406
5482
  // src/version.ts
5407
- var VERSION = true ? "3.0.0-beta.88" : "0.0.0-test";
5483
+ var VERSION = true ? "3.0.0-beta.89" : "0.0.0-test";
5408
5484
 
5409
5485
  // src/openai-provider.ts
5410
5486
  function createOpenAI(options = {}) {