@ai-sdk/openai 3.0.0-beta.88 → 3.0.0-beta.90

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -95,6 +95,45 @@ function getOpenAILanguageModelCapabilities(modelId) {
95
95
  };
96
96
  }
97
97
 
98
+ // src/chat/convert-openai-chat-usage.ts
99
+ function convertOpenAIChatUsage(usage) {
100
+ var _a, _b, _c, _d, _e, _f;
101
+ if (usage == null) {
102
+ return {
103
+ inputTokens: {
104
+ total: void 0,
105
+ noCache: void 0,
106
+ cacheRead: void 0,
107
+ cacheWrite: void 0
108
+ },
109
+ outputTokens: {
110
+ total: void 0,
111
+ text: void 0,
112
+ reasoning: void 0
113
+ },
114
+ raw: void 0
115
+ };
116
+ }
117
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
118
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
119
+ const cachedTokens = (_d = (_c = usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : 0;
120
+ const reasoningTokens = (_f = (_e = usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : 0;
121
+ return {
122
+ inputTokens: {
123
+ total: promptTokens,
124
+ noCache: promptTokens - cachedTokens,
125
+ cacheRead: cachedTokens,
126
+ cacheWrite: void 0
127
+ },
128
+ outputTokens: {
129
+ total: completionTokens,
130
+ text: completionTokens - reasoningTokens,
131
+ reasoning: reasoningTokens
132
+ },
133
+ raw: usage
134
+ };
135
+ }
136
+
98
137
  // src/chat/convert-to-openai-chat-messages.ts
99
138
  var import_provider = require("@ai-sdk/provider");
100
139
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
@@ -801,7 +840,7 @@ var OpenAIChatLanguageModel = class {
801
840
  };
802
841
  }
803
842
  async doGenerate(options) {
804
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
843
+ var _a, _b, _c, _d, _e, _f;
805
844
  const { args: body, warnings } = await this.getArgs(options);
806
845
  const {
807
846
  responseHeaders,
@@ -859,13 +898,7 @@ var OpenAIChatLanguageModel = class {
859
898
  return {
860
899
  content,
861
900
  finishReason: mapOpenAIFinishReason(choice.finish_reason),
862
- usage: {
863
- inputTokens: (_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : void 0,
864
- outputTokens: (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : void 0,
865
- totalTokens: (_l = (_k = response.usage) == null ? void 0 : _k.total_tokens) != null ? _l : void 0,
866
- reasoningTokens: (_m = completionTokenDetails == null ? void 0 : completionTokenDetails.reasoning_tokens) != null ? _m : void 0,
867
- cachedInputTokens: (_n = promptTokenDetails == null ? void 0 : promptTokenDetails.cached_tokens) != null ? _n : void 0
868
- },
901
+ usage: convertOpenAIChatUsage(response.usage),
869
902
  request: { body },
870
903
  response: {
871
904
  ...getResponseMetadata(response),
@@ -901,11 +934,7 @@ var OpenAIChatLanguageModel = class {
901
934
  });
902
935
  const toolCalls = [];
903
936
  let finishReason = "unknown";
904
- const usage = {
905
- inputTokens: void 0,
906
- outputTokens: void 0,
907
- totalTokens: void 0
908
- };
937
+ let usage = void 0;
909
938
  let metadataExtracted = false;
910
939
  let isActiveText = false;
911
940
  const providerMetadata = { openai: {} };
@@ -916,7 +945,7 @@ var OpenAIChatLanguageModel = class {
916
945
  controller.enqueue({ type: "stream-start", warnings });
917
946
  },
918
947
  transform(chunk, controller) {
919
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
948
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
920
949
  if (options.includeRawChunks) {
921
950
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
922
951
  }
@@ -942,23 +971,19 @@ var OpenAIChatLanguageModel = class {
942
971
  }
943
972
  }
944
973
  if (value.usage != null) {
945
- usage.inputTokens = (_a = value.usage.prompt_tokens) != null ? _a : void 0;
946
- usage.outputTokens = (_b = value.usage.completion_tokens) != null ? _b : void 0;
947
- usage.totalTokens = (_c = value.usage.total_tokens) != null ? _c : void 0;
948
- usage.reasoningTokens = (_e = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.reasoning_tokens) != null ? _e : void 0;
949
- usage.cachedInputTokens = (_g = (_f = value.usage.prompt_tokens_details) == null ? void 0 : _f.cached_tokens) != null ? _g : void 0;
950
- if (((_h = value.usage.completion_tokens_details) == null ? void 0 : _h.accepted_prediction_tokens) != null) {
951
- providerMetadata.openai.acceptedPredictionTokens = (_i = value.usage.completion_tokens_details) == null ? void 0 : _i.accepted_prediction_tokens;
974
+ usage = value.usage;
975
+ if (((_a = value.usage.completion_tokens_details) == null ? void 0 : _a.accepted_prediction_tokens) != null) {
976
+ providerMetadata.openai.acceptedPredictionTokens = (_b = value.usage.completion_tokens_details) == null ? void 0 : _b.accepted_prediction_tokens;
952
977
  }
953
- if (((_j = value.usage.completion_tokens_details) == null ? void 0 : _j.rejected_prediction_tokens) != null) {
954
- providerMetadata.openai.rejectedPredictionTokens = (_k = value.usage.completion_tokens_details) == null ? void 0 : _k.rejected_prediction_tokens;
978
+ if (((_c = value.usage.completion_tokens_details) == null ? void 0 : _c.rejected_prediction_tokens) != null) {
979
+ providerMetadata.openai.rejectedPredictionTokens = (_d = value.usage.completion_tokens_details) == null ? void 0 : _d.rejected_prediction_tokens;
955
980
  }
956
981
  }
957
982
  const choice = value.choices[0];
958
983
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
959
984
  finishReason = mapOpenAIFinishReason(choice.finish_reason);
960
985
  }
961
- if (((_l = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _l.content) != null) {
986
+ if (((_e = choice == null ? void 0 : choice.logprobs) == null ? void 0 : _e.content) != null) {
962
987
  providerMetadata.openai.logprobs = choice.logprobs.content;
963
988
  }
964
989
  if ((choice == null ? void 0 : choice.delta) == null) {
@@ -992,7 +1017,7 @@ var OpenAIChatLanguageModel = class {
992
1017
  message: `Expected 'id' to be a string.`
993
1018
  });
994
1019
  }
995
- if (((_m = toolCallDelta.function) == null ? void 0 : _m.name) == null) {
1020
+ if (((_f = toolCallDelta.function) == null ? void 0 : _f.name) == null) {
996
1021
  throw new import_provider3.InvalidResponseDataError({
997
1022
  data: toolCallDelta,
998
1023
  message: `Expected 'function.name' to be a string.`
@@ -1008,12 +1033,12 @@ var OpenAIChatLanguageModel = class {
1008
1033
  type: "function",
1009
1034
  function: {
1010
1035
  name: toolCallDelta.function.name,
1011
- arguments: (_n = toolCallDelta.function.arguments) != null ? _n : ""
1036
+ arguments: (_g = toolCallDelta.function.arguments) != null ? _g : ""
1012
1037
  },
1013
1038
  hasFinished: false
1014
1039
  };
1015
1040
  const toolCall2 = toolCalls[index];
1016
- if (((_o = toolCall2.function) == null ? void 0 : _o.name) != null && ((_p = toolCall2.function) == null ? void 0 : _p.arguments) != null) {
1041
+ if (((_h = toolCall2.function) == null ? void 0 : _h.name) != null && ((_i = toolCall2.function) == null ? void 0 : _i.arguments) != null) {
1017
1042
  if (toolCall2.function.arguments.length > 0) {
1018
1043
  controller.enqueue({
1019
1044
  type: "tool-input-delta",
@@ -1028,7 +1053,7 @@ var OpenAIChatLanguageModel = class {
1028
1053
  });
1029
1054
  controller.enqueue({
1030
1055
  type: "tool-call",
1031
- toolCallId: (_q = toolCall2.id) != null ? _q : (0, import_provider_utils5.generateId)(),
1056
+ toolCallId: (_j = toolCall2.id) != null ? _j : (0, import_provider_utils5.generateId)(),
1032
1057
  toolName: toolCall2.function.name,
1033
1058
  input: toolCall2.function.arguments
1034
1059
  });
@@ -1041,22 +1066,22 @@ var OpenAIChatLanguageModel = class {
1041
1066
  if (toolCall.hasFinished) {
1042
1067
  continue;
1043
1068
  }
1044
- if (((_r = toolCallDelta.function) == null ? void 0 : _r.arguments) != null) {
1045
- toolCall.function.arguments += (_t = (_s = toolCallDelta.function) == null ? void 0 : _s.arguments) != null ? _t : "";
1069
+ if (((_k = toolCallDelta.function) == null ? void 0 : _k.arguments) != null) {
1070
+ toolCall.function.arguments += (_m = (_l = toolCallDelta.function) == null ? void 0 : _l.arguments) != null ? _m : "";
1046
1071
  }
1047
1072
  controller.enqueue({
1048
1073
  type: "tool-input-delta",
1049
1074
  id: toolCall.id,
1050
- delta: (_u = toolCallDelta.function.arguments) != null ? _u : ""
1075
+ delta: (_n = toolCallDelta.function.arguments) != null ? _n : ""
1051
1076
  });
1052
- if (((_v = toolCall.function) == null ? void 0 : _v.name) != null && ((_w = toolCall.function) == null ? void 0 : _w.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
1077
+ if (((_o = toolCall.function) == null ? void 0 : _o.name) != null && ((_p = toolCall.function) == null ? void 0 : _p.arguments) != null && (0, import_provider_utils5.isParsableJson)(toolCall.function.arguments)) {
1053
1078
  controller.enqueue({
1054
1079
  type: "tool-input-end",
1055
1080
  id: toolCall.id
1056
1081
  });
1057
1082
  controller.enqueue({
1058
1083
  type: "tool-call",
1059
- toolCallId: (_x = toolCall.id) != null ? _x : (0, import_provider_utils5.generateId)(),
1084
+ toolCallId: (_q = toolCall.id) != null ? _q : (0, import_provider_utils5.generateId)(),
1060
1085
  toolName: toolCall.function.name,
1061
1086
  input: toolCall.function.arguments
1062
1087
  });
@@ -1083,7 +1108,7 @@ var OpenAIChatLanguageModel = class {
1083
1108
  controller.enqueue({
1084
1109
  type: "finish",
1085
1110
  finishReason,
1086
- usage,
1111
+ usage: convertOpenAIChatUsage(usage),
1087
1112
  ...providerMetadata != null ? { providerMetadata } : {}
1088
1113
  });
1089
1114
  }
@@ -1098,6 +1123,43 @@ var OpenAIChatLanguageModel = class {
1098
1123
  // src/completion/openai-completion-language-model.ts
1099
1124
  var import_provider_utils8 = require("@ai-sdk/provider-utils");
1100
1125
 
1126
+ // src/completion/convert-openai-completion-usage.ts
1127
+ function convertOpenAICompletionUsage(usage) {
1128
+ var _a, _b, _c, _d;
1129
+ if (usage == null) {
1130
+ return {
1131
+ inputTokens: {
1132
+ total: void 0,
1133
+ noCache: void 0,
1134
+ cacheRead: void 0,
1135
+ cacheWrite: void 0
1136
+ },
1137
+ outputTokens: {
1138
+ total: void 0,
1139
+ text: void 0,
1140
+ reasoning: void 0
1141
+ },
1142
+ raw: void 0
1143
+ };
1144
+ }
1145
+ const promptTokens = (_a = usage.prompt_tokens) != null ? _a : 0;
1146
+ const completionTokens = (_b = usage.completion_tokens) != null ? _b : 0;
1147
+ return {
1148
+ inputTokens: {
1149
+ total: (_c = usage.prompt_tokens) != null ? _c : void 0,
1150
+ noCache: promptTokens,
1151
+ cacheRead: void 0,
1152
+ cacheWrite: void 0
1153
+ },
1154
+ outputTokens: {
1155
+ total: (_d = usage.completion_tokens) != null ? _d : void 0,
1156
+ text: completionTokens,
1157
+ reasoning: void 0
1158
+ },
1159
+ raw: usage
1160
+ };
1161
+ }
1162
+
1101
1163
  // src/completion/convert-to-openai-completion-prompt.ts
1102
1164
  var import_provider4 = require("@ai-sdk/provider");
1103
1165
  function convertToOpenAICompletionPrompt({
@@ -1397,7 +1459,6 @@ var OpenAICompletionLanguageModel = class {
1397
1459
  };
1398
1460
  }
1399
1461
  async doGenerate(options) {
1400
- var _a, _b, _c;
1401
1462
  const { args, warnings } = await this.getArgs(options);
1402
1463
  const {
1403
1464
  responseHeaders,
@@ -1424,11 +1485,7 @@ var OpenAICompletionLanguageModel = class {
1424
1485
  }
1425
1486
  return {
1426
1487
  content: [{ type: "text", text: choice.text }],
1427
- usage: {
1428
- inputTokens: (_a = response.usage) == null ? void 0 : _a.prompt_tokens,
1429
- outputTokens: (_b = response.usage) == null ? void 0 : _b.completion_tokens,
1430
- totalTokens: (_c = response.usage) == null ? void 0 : _c.total_tokens
1431
- },
1488
+ usage: convertOpenAICompletionUsage(response.usage),
1432
1489
  finishReason: mapOpenAIFinishReason2(choice.finish_reason),
1433
1490
  request: { body: args },
1434
1491
  response: {
@@ -1465,11 +1522,7 @@ var OpenAICompletionLanguageModel = class {
1465
1522
  });
1466
1523
  let finishReason = "unknown";
1467
1524
  const providerMetadata = { openai: {} };
1468
- const usage = {
1469
- inputTokens: void 0,
1470
- outputTokens: void 0,
1471
- totalTokens: void 0
1472
- };
1525
+ let usage = void 0;
1473
1526
  let isFirstChunk = true;
1474
1527
  return {
1475
1528
  stream: response.pipeThrough(
@@ -1501,9 +1554,7 @@ var OpenAICompletionLanguageModel = class {
1501
1554
  controller.enqueue({ type: "text-start", id: "0" });
1502
1555
  }
1503
1556
  if (value.usage != null) {
1504
- usage.inputTokens = value.usage.prompt_tokens;
1505
- usage.outputTokens = value.usage.completion_tokens;
1506
- usage.totalTokens = value.usage.total_tokens;
1557
+ usage = value.usage;
1507
1558
  }
1508
1559
  const choice = value.choices[0];
1509
1560
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1528,7 +1579,7 @@ var OpenAICompletionLanguageModel = class {
1528
1579
  type: "finish",
1529
1580
  finishReason,
1530
1581
  providerMetadata,
1531
- usage
1582
+ usage: convertOpenAICompletionUsage(usage)
1532
1583
  });
1533
1584
  }
1534
1585
  })
@@ -2131,6 +2182,45 @@ var OpenAISpeechModel = class {
2131
2182
  var import_provider8 = require("@ai-sdk/provider");
2132
2183
  var import_provider_utils32 = require("@ai-sdk/provider-utils");
2133
2184
 
2185
+ // src/responses/convert-openai-responses-usage.ts
2186
+ function convertOpenAIResponsesUsage(usage) {
2187
+ var _a, _b, _c, _d;
2188
+ if (usage == null) {
2189
+ return {
2190
+ inputTokens: {
2191
+ total: void 0,
2192
+ noCache: void 0,
2193
+ cacheRead: void 0,
2194
+ cacheWrite: void 0
2195
+ },
2196
+ outputTokens: {
2197
+ total: void 0,
2198
+ text: void 0,
2199
+ reasoning: void 0
2200
+ },
2201
+ raw: void 0
2202
+ };
2203
+ }
2204
+ const inputTokens = usage.input_tokens;
2205
+ const outputTokens = usage.output_tokens;
2206
+ const cachedTokens = (_b = (_a = usage.input_tokens_details) == null ? void 0 : _a.cached_tokens) != null ? _b : 0;
2207
+ const reasoningTokens = (_d = (_c = usage.output_tokens_details) == null ? void 0 : _c.reasoning_tokens) != null ? _d : 0;
2208
+ return {
2209
+ inputTokens: {
2210
+ total: inputTokens,
2211
+ noCache: inputTokens - cachedTokens,
2212
+ cacheRead: cachedTokens,
2213
+ cacheWrite: void 0
2214
+ },
2215
+ outputTokens: {
2216
+ total: outputTokens,
2217
+ text: outputTokens - reasoningTokens,
2218
+ reasoning: reasoningTokens
2219
+ },
2220
+ raw: usage
2221
+ };
2222
+ }
2223
+
2134
2224
  // src/responses/convert-to-openai-responses-input.ts
2135
2225
  var import_provider6 = require("@ai-sdk/provider");
2136
2226
  var import_provider_utils22 = require("@ai-sdk/provider-utils");
@@ -4212,7 +4302,7 @@ var OpenAIResponsesLanguageModel = class {
4212
4302
  };
4213
4303
  }
4214
4304
  async doGenerate(options) {
4215
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B;
4305
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x;
4216
4306
  const {
4217
4307
  args: body,
4218
4308
  warnings,
@@ -4613,13 +4703,7 @@ var OpenAIResponsesLanguageModel = class {
4613
4703
  finishReason: (_x = response.incomplete_details) == null ? void 0 : _x.reason,
4614
4704
  hasFunctionCall
4615
4705
  }),
4616
- usage: {
4617
- inputTokens: usage.input_tokens,
4618
- outputTokens: usage.output_tokens,
4619
- totalTokens: usage.input_tokens + usage.output_tokens,
4620
- reasoningTokens: (_z = (_y = usage.output_tokens_details) == null ? void 0 : _y.reasoning_tokens) != null ? _z : void 0,
4621
- cachedInputTokens: (_B = (_A = usage.input_tokens_details) == null ? void 0 : _A.cached_tokens) != null ? _B : void 0
4622
- },
4706
+ usage: convertOpenAIResponsesUsage(usage),
4623
4707
  request: { body },
4624
4708
  response: {
4625
4709
  id: response.id,
@@ -4660,11 +4744,7 @@ var OpenAIResponsesLanguageModel = class {
4660
4744
  const self = this;
4661
4745
  const providerKey = this.config.provider.replace(".responses", "");
4662
4746
  let finishReason = "unknown";
4663
- const usage = {
4664
- inputTokens: void 0,
4665
- outputTokens: void 0,
4666
- totalTokens: void 0
4667
- };
4747
+ let usage = void 0;
4668
4748
  const logprobs = [];
4669
4749
  let responseId = null;
4670
4750
  const ongoingToolCalls = {};
@@ -4679,7 +4759,7 @@ var OpenAIResponsesLanguageModel = class {
4679
4759
  controller.enqueue({ type: "stream-start", warnings });
4680
4760
  },
4681
4761
  transform(chunk, controller) {
4682
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B, _C, _D, _E;
4762
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A;
4683
4763
  if (options.includeRawChunks) {
4684
4764
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
4685
4765
  }
@@ -5187,11 +5267,7 @@ var OpenAIResponsesLanguageModel = class {
5187
5267
  finishReason: (_i = value.response.incomplete_details) == null ? void 0 : _i.reason,
5188
5268
  hasFunctionCall
5189
5269
  });
5190
- usage.inputTokens = value.response.usage.input_tokens;
5191
- usage.outputTokens = value.response.usage.output_tokens;
5192
- usage.totalTokens = value.response.usage.input_tokens + value.response.usage.output_tokens;
5193
- usage.reasoningTokens = (_k = (_j = value.response.usage.output_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0;
5194
- usage.cachedInputTokens = (_m = (_l = value.response.usage.input_tokens_details) == null ? void 0 : _l.cached_tokens) != null ? _m : void 0;
5270
+ usage = value.response.usage;
5195
5271
  if (typeof value.response.service_tier === "string") {
5196
5272
  serviceTier = value.response.service_tier;
5197
5273
  }
@@ -5201,7 +5277,7 @@ var OpenAIResponsesLanguageModel = class {
5201
5277
  controller.enqueue({
5202
5278
  type: "source",
5203
5279
  sourceType: "url",
5204
- id: (_p = (_o = (_n = self.config).generateId) == null ? void 0 : _o.call(_n)) != null ? _p : (0, import_provider_utils32.generateId)(),
5280
+ id: (_l = (_k = (_j = self.config).generateId) == null ? void 0 : _k.call(_j)) != null ? _l : (0, import_provider_utils32.generateId)(),
5205
5281
  url: value.annotation.url,
5206
5282
  title: value.annotation.title
5207
5283
  });
@@ -5209,10 +5285,10 @@ var OpenAIResponsesLanguageModel = class {
5209
5285
  controller.enqueue({
5210
5286
  type: "source",
5211
5287
  sourceType: "document",
5212
- id: (_s = (_r = (_q = self.config).generateId) == null ? void 0 : _r.call(_q)) != null ? _s : (0, import_provider_utils32.generateId)(),
5288
+ id: (_o = (_n = (_m = self.config).generateId) == null ? void 0 : _n.call(_m)) != null ? _o : (0, import_provider_utils32.generateId)(),
5213
5289
  mediaType: "text/plain",
5214
- title: (_u = (_t = value.annotation.quote) != null ? _t : value.annotation.filename) != null ? _u : "Document",
5215
- filename: (_v = value.annotation.filename) != null ? _v : value.annotation.file_id,
5290
+ title: (_q = (_p = value.annotation.quote) != null ? _p : value.annotation.filename) != null ? _q : "Document",
5291
+ filename: (_r = value.annotation.filename) != null ? _r : value.annotation.file_id,
5216
5292
  ...value.annotation.file_id ? {
5217
5293
  providerMetadata: {
5218
5294
  [providerKey]: {
@@ -5225,10 +5301,10 @@ var OpenAIResponsesLanguageModel = class {
5225
5301
  controller.enqueue({
5226
5302
  type: "source",
5227
5303
  sourceType: "document",
5228
- id: (_y = (_x = (_w = self.config).generateId) == null ? void 0 : _x.call(_w)) != null ? _y : (0, import_provider_utils32.generateId)(),
5304
+ id: (_u = (_t = (_s = self.config).generateId) == null ? void 0 : _t.call(_s)) != null ? _u : (0, import_provider_utils32.generateId)(),
5229
5305
  mediaType: "text/plain",
5230
- title: (_A = (_z = value.annotation.filename) != null ? _z : value.annotation.file_id) != null ? _A : "Document",
5231
- filename: (_B = value.annotation.filename) != null ? _B : value.annotation.file_id,
5306
+ title: (_w = (_v = value.annotation.filename) != null ? _v : value.annotation.file_id) != null ? _w : "Document",
5307
+ filename: (_x = value.annotation.filename) != null ? _x : value.annotation.file_id,
5232
5308
  providerMetadata: {
5233
5309
  [providerKey]: {
5234
5310
  fileId: value.annotation.file_id,
@@ -5241,7 +5317,7 @@ var OpenAIResponsesLanguageModel = class {
5241
5317
  controller.enqueue({
5242
5318
  type: "source",
5243
5319
  sourceType: "document",
5244
- id: (_E = (_D = (_C = self.config).generateId) == null ? void 0 : _D.call(_C)) != null ? _E : (0, import_provider_utils32.generateId)(),
5320
+ id: (_A = (_z = (_y = self.config).generateId) == null ? void 0 : _z.call(_y)) != null ? _A : (0, import_provider_utils32.generateId)(),
5245
5321
  mediaType: "application/octet-stream",
5246
5322
  title: value.annotation.file_id,
5247
5323
  filename: value.annotation.file_id,
@@ -5272,7 +5348,7 @@ var OpenAIResponsesLanguageModel = class {
5272
5348
  controller.enqueue({
5273
5349
  type: "finish",
5274
5350
  finishReason,
5275
- usage,
5351
+ usage: convertOpenAIResponsesUsage(usage),
5276
5352
  providerMetadata
5277
5353
  });
5278
5354
  }