@openrouter/ai-sdk-provider 2.2.2 → 2.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2261,6 +2261,46 @@ var OpenRouterProviderOptionsSchema = z3.object({
2261
2261
  }).optional()
2262
2262
  }).optional();
2263
2263
 
2264
+ // src/utils/compute-token-usage.ts
2265
+ function computeTokenUsage(usage) {
2266
+ var _a16, _b16, _c, _d, _e, _f, _g, _h;
2267
+ const promptTokens = (_a16 = usage.prompt_tokens) != null ? _a16 : 0;
2268
+ const completionTokens = (_b16 = usage.completion_tokens) != null ? _b16 : 0;
2269
+ const cacheReadTokens = (_d = (_c = usage.prompt_tokens_details) == null ? void 0 : _c.cached_tokens) != null ? _d : 0;
2270
+ const cacheWriteTokens = (_f = (_e = usage.prompt_tokens_details) == null ? void 0 : _e.cache_write_tokens) != null ? _f : void 0;
2271
+ const reasoningTokens = (_h = (_g = usage.completion_tokens_details) == null ? void 0 : _g.reasoning_tokens) != null ? _h : 0;
2272
+ return {
2273
+ inputTokens: {
2274
+ total: promptTokens,
2275
+ noCache: promptTokens - cacheReadTokens,
2276
+ cacheRead: cacheReadTokens,
2277
+ cacheWrite: cacheWriteTokens
2278
+ },
2279
+ outputTokens: {
2280
+ total: completionTokens,
2281
+ text: completionTokens - reasoningTokens,
2282
+ reasoning: reasoningTokens
2283
+ },
2284
+ raw: usage
2285
+ };
2286
+ }
2287
+ function emptyUsage() {
2288
+ return {
2289
+ inputTokens: {
2290
+ total: 0,
2291
+ noCache: void 0,
2292
+ cacheRead: void 0,
2293
+ cacheWrite: void 0
2294
+ },
2295
+ outputTokens: {
2296
+ total: 0,
2297
+ text: void 0,
2298
+ reasoning: void 0
2299
+ },
2300
+ raw: void 0
2301
+ };
2302
+ }
2303
+
2264
2304
  // src/utils/map-finish-reason.ts
2265
2305
  function mapToUnified(finishReason) {
2266
2306
  switch (finishReason) {
@@ -2767,7 +2807,8 @@ var OpenRouterChatCompletionBaseResponseSchema = z7.object({
2767
2807
  usage: z7.object({
2768
2808
  prompt_tokens: z7.number(),
2769
2809
  prompt_tokens_details: z7.object({
2770
- cached_tokens: z7.number()
2810
+ cached_tokens: z7.number(),
2811
+ cache_write_tokens: z7.number().nullish()
2771
2812
  }).passthrough().nullish(),
2772
2813
  completion_tokens: z7.number(),
2773
2814
  completion_tokens_details: z7.object({
@@ -3042,7 +3083,7 @@ var OpenRouterChatLanguageModel = class {
3042
3083
  return baseArgs;
3043
3084
  }
3044
3085
  async doGenerate(options) {
3045
- var _a16, _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B;
3086
+ var _a16, _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
3046
3087
  const providerOptions = options.providerOptions || {};
3047
3088
  const openrouterOptions = providerOptions.openrouter || {};
3048
3089
  const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
@@ -3081,34 +3122,8 @@ var OpenRouterChatLanguageModel = class {
3081
3122
  message: "No choice in response"
3082
3123
  });
3083
3124
  }
3084
- const usageInfo = response.usage ? {
3085
- inputTokens: {
3086
- total: (_a16 = response.usage.prompt_tokens) != null ? _a16 : 0,
3087
- noCache: void 0,
3088
- cacheRead: (_c = (_b16 = response.usage.prompt_tokens_details) == null ? void 0 : _b16.cached_tokens) != null ? _c : void 0,
3089
- cacheWrite: void 0
3090
- },
3091
- outputTokens: {
3092
- total: (_d = response.usage.completion_tokens) != null ? _d : 0,
3093
- text: void 0,
3094
- reasoning: (_f = (_e = response.usage.completion_tokens_details) == null ? void 0 : _e.reasoning_tokens) != null ? _f : void 0
3095
- },
3096
- raw: response.usage
3097
- } : {
3098
- inputTokens: {
3099
- total: 0,
3100
- noCache: void 0,
3101
- cacheRead: void 0,
3102
- cacheWrite: void 0
3103
- },
3104
- outputTokens: {
3105
- total: 0,
3106
- text: void 0,
3107
- reasoning: void 0
3108
- },
3109
- raw: void 0
3110
- };
3111
- const reasoningDetails = (_g = choice.message.reasoning_details) != null ? _g : [];
3125
+ const usageInfo = response.usage ? computeTokenUsage(response.usage) : emptyUsage();
3126
+ const reasoningDetails = (_a16 = choice.message.reasoning_details) != null ? _a16 : [];
3112
3127
  const reasoning = reasoningDetails.length > 0 ? reasoningDetails.map((detail) => {
3113
3128
  switch (detail.type) {
3114
3129
  case "reasoning.text" /* Text */: {
@@ -3177,9 +3192,9 @@ var OpenRouterChatLanguageModel = class {
3177
3192
  for (const toolCall of choice.message.tool_calls) {
3178
3193
  content.push({
3179
3194
  type: "tool-call",
3180
- toolCallId: (_h = toolCall.id) != null ? _h : generateId(),
3195
+ toolCallId: (_b16 = toolCall.id) != null ? _b16 : generateId(),
3181
3196
  toolName: toolCall.function.name,
3182
- input: (_i = toolCall.function.arguments) != null ? _i : "{}",
3197
+ input: (_c = toolCall.function.arguments) != null ? _c : "{}",
3183
3198
  providerMetadata: !reasoningDetailsAttachedToToolCall ? {
3184
3199
  openrouter: {
3185
3200
  reasoning_details: reasoningDetails
@@ -3206,19 +3221,19 @@ var OpenRouterChatLanguageModel = class {
3206
3221
  sourceType: "url",
3207
3222
  id: annotation.url_citation.url,
3208
3223
  url: annotation.url_citation.url,
3209
- title: (_j = annotation.url_citation.title) != null ? _j : "",
3224
+ title: (_d = annotation.url_citation.title) != null ? _d : "",
3210
3225
  providerMetadata: {
3211
3226
  openrouter: {
3212
- content: (_k = annotation.url_citation.content) != null ? _k : "",
3213
- startIndex: (_l = annotation.url_citation.start_index) != null ? _l : 0,
3214
- endIndex: (_m = annotation.url_citation.end_index) != null ? _m : 0
3227
+ content: (_e = annotation.url_citation.content) != null ? _e : "",
3228
+ startIndex: (_f = annotation.url_citation.start_index) != null ? _f : 0,
3229
+ endIndex: (_g = annotation.url_citation.end_index) != null ? _g : 0
3215
3230
  }
3216
3231
  }
3217
3232
  });
3218
3233
  }
3219
3234
  }
3220
3235
  }
3221
- const fileAnnotations = (_n = choice.message.annotations) == null ? void 0 : _n.filter(
3236
+ const fileAnnotations = (_h = choice.message.annotations) == null ? void 0 : _h.filter(
3222
3237
  (a) => a.type === "file"
3223
3238
  );
3224
3239
  const hasToolCalls = choice.message.tool_calls && choice.message.tool_calls.length > 0;
@@ -3226,7 +3241,7 @@ var OpenRouterChatLanguageModel = class {
3226
3241
  (d) => d.type === "reasoning.encrypted" /* Encrypted */ && d.data
3227
3242
  );
3228
3243
  const shouldOverrideFinishReason = hasToolCalls && hasEncryptedReasoning && choice.finish_reason === "stop";
3229
- const effectiveFinishReason = shouldOverrideFinishReason ? createFinishReason("tool-calls", (_o = choice.finish_reason) != null ? _o : void 0) : mapOpenRouterFinishReason(choice.finish_reason);
3244
+ const effectiveFinishReason = shouldOverrideFinishReason ? createFinishReason("tool-calls", (_i = choice.finish_reason) != null ? _i : void 0) : mapOpenRouterFinishReason(choice.finish_reason);
3230
3245
  return {
3231
3246
  content,
3232
3247
  finishReason: effectiveFinishReason,
@@ -3234,22 +3249,22 @@ var OpenRouterChatLanguageModel = class {
3234
3249
  warnings: [],
3235
3250
  providerMetadata: {
3236
3251
  openrouter: OpenRouterProviderMetadataSchema.parse({
3237
- provider: (_p = response.provider) != null ? _p : "",
3238
- reasoning_details: (_q = choice.message.reasoning_details) != null ? _q : [],
3252
+ provider: (_j = response.provider) != null ? _j : "",
3253
+ reasoning_details: (_k = choice.message.reasoning_details) != null ? _k : [],
3239
3254
  annotations: fileAnnotations && fileAnnotations.length > 0 ? fileAnnotations : void 0,
3240
3255
  usage: __spreadValues(__spreadValues(__spreadValues(__spreadValues({
3241
- promptTokens: (_r = usageInfo.inputTokens.total) != null ? _r : 0,
3242
- completionTokens: (_s = usageInfo.outputTokens.total) != null ? _s : 0,
3243
- totalTokens: ((_t = usageInfo.inputTokens.total) != null ? _t : 0) + ((_u = usageInfo.outputTokens.total) != null ? _u : 0)
3244
- }, ((_v = response.usage) == null ? void 0 : _v.cost) != null ? { cost: response.usage.cost } : {}), ((_x = (_w = response.usage) == null ? void 0 : _w.prompt_tokens_details) == null ? void 0 : _x.cached_tokens) != null ? {
3256
+ promptTokens: (_l = usageInfo.inputTokens.total) != null ? _l : 0,
3257
+ completionTokens: (_m = usageInfo.outputTokens.total) != null ? _m : 0,
3258
+ totalTokens: ((_n = usageInfo.inputTokens.total) != null ? _n : 0) + ((_o = usageInfo.outputTokens.total) != null ? _o : 0)
3259
+ }, ((_p = response.usage) == null ? void 0 : _p.cost) != null ? { cost: response.usage.cost } : {}), ((_r = (_q = response.usage) == null ? void 0 : _q.prompt_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? {
3245
3260
  promptTokensDetails: {
3246
3261
  cachedTokens: response.usage.prompt_tokens_details.cached_tokens
3247
3262
  }
3248
- } : {}), ((_z = (_y = response.usage) == null ? void 0 : _y.completion_tokens_details) == null ? void 0 : _z.reasoning_tokens) != null ? {
3263
+ } : {}), ((_t = (_s = response.usage) == null ? void 0 : _s.completion_tokens_details) == null ? void 0 : _t.reasoning_tokens) != null ? {
3249
3264
  completionTokensDetails: {
3250
3265
  reasoningTokens: response.usage.completion_tokens_details.reasoning_tokens
3251
3266
  }
3252
- } : {}), ((_B = (_A = response.usage) == null ? void 0 : _A.cost_details) == null ? void 0 : _B.upstream_inference_cost) != null ? {
3267
+ } : {}), ((_v = (_u = response.usage) == null ? void 0 : _u.cost_details) == null ? void 0 : _v.upstream_inference_cost) != null ? {
3253
3268
  costDetails: {
3254
3269
  upstreamInferenceCost: response.usage.cost_details.upstream_inference_cost
3255
3270
  }
@@ -3320,7 +3335,7 @@ var OpenRouterChatLanguageModel = class {
3320
3335
  stream: response.pipeThrough(
3321
3336
  new TransformStream({
3322
3337
  transform(chunk, controller) {
3323
- var _a17, _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s;
3338
+ var _a17, _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u;
3324
3339
  if (options.includeRawChunks) {
3325
3340
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
3326
3341
  }
@@ -3352,30 +3367,29 @@ var OpenRouterChatLanguageModel = class {
3352
3367
  });
3353
3368
  }
3354
3369
  if (value.usage != null) {
3355
- usage.inputTokens.total = value.usage.prompt_tokens;
3356
- usage.outputTokens.total = value.usage.completion_tokens;
3370
+ const computed = computeTokenUsage(value.usage);
3371
+ Object.assign(usage.inputTokens, computed.inputTokens);
3372
+ Object.assign(usage.outputTokens, computed.outputTokens);
3357
3373
  rawUsage = value.usage;
3358
- openrouterUsage.promptTokens = value.usage.prompt_tokens;
3374
+ const promptTokens = (_a17 = value.usage.prompt_tokens) != null ? _a17 : 0;
3375
+ const completionTokens = (_b16 = value.usage.completion_tokens) != null ? _b16 : 0;
3376
+ openrouterUsage.promptTokens = promptTokens;
3359
3377
  if (value.usage.prompt_tokens_details) {
3360
- const cachedInputTokens = (_a17 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a17 : 0;
3361
- usage.inputTokens.cacheRead = cachedInputTokens;
3362
3378
  openrouterUsage.promptTokensDetails = {
3363
- cachedTokens: cachedInputTokens
3379
+ cachedTokens: (_c = value.usage.prompt_tokens_details.cached_tokens) != null ? _c : 0
3364
3380
  };
3365
3381
  }
3366
- openrouterUsage.completionTokens = value.usage.completion_tokens;
3382
+ openrouterUsage.completionTokens = completionTokens;
3367
3383
  if (value.usage.completion_tokens_details) {
3368
- const reasoningTokens = (_b16 = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b16 : 0;
3369
- usage.outputTokens.reasoning = reasoningTokens;
3370
3384
  openrouterUsage.completionTokensDetails = {
3371
- reasoningTokens
3385
+ reasoningTokens: (_d = value.usage.completion_tokens_details.reasoning_tokens) != null ? _d : 0
3372
3386
  };
3373
3387
  }
3374
3388
  if (value.usage.cost != null) {
3375
3389
  openrouterUsage.cost = value.usage.cost;
3376
3390
  }
3377
3391
  openrouterUsage.totalTokens = value.usage.total_tokens;
3378
- const upstreamInferenceCost = (_c = value.usage.cost_details) == null ? void 0 : _c.upstream_inference_cost;
3392
+ const upstreamInferenceCost = (_e = value.usage.cost_details) == null ? void 0 : _e.upstream_inference_cost;
3379
3393
  if (upstreamInferenceCost != null) {
3380
3394
  openrouterUsage.costDetails = {
3381
3395
  upstreamInferenceCost
@@ -3495,12 +3509,12 @@ var OpenRouterChatLanguageModel = class {
3495
3509
  sourceType: "url",
3496
3510
  id: annotation.url_citation.url,
3497
3511
  url: annotation.url_citation.url,
3498
- title: (_d = annotation.url_citation.title) != null ? _d : "",
3512
+ title: (_f = annotation.url_citation.title) != null ? _f : "",
3499
3513
  providerMetadata: {
3500
3514
  openrouter: {
3501
- content: (_e = annotation.url_citation.content) != null ? _e : "",
3502
- startIndex: (_f = annotation.url_citation.start_index) != null ? _f : 0,
3503
- endIndex: (_g = annotation.url_citation.end_index) != null ? _g : 0
3515
+ content: (_g = annotation.url_citation.content) != null ? _g : "",
3516
+ startIndex: (_h = annotation.url_citation.start_index) != null ? _h : 0,
3517
+ endIndex: (_i = annotation.url_citation.end_index) != null ? _i : 0
3504
3518
  }
3505
3519
  }
3506
3520
  });
@@ -3516,7 +3530,7 @@ var OpenRouterChatLanguageModel = class {
3516
3530
  }
3517
3531
  if (delta.tool_calls != null) {
3518
3532
  for (const toolCallDelta of delta.tool_calls) {
3519
- const index = (_h = toolCallDelta.index) != null ? _h : toolCalls.length - 1;
3533
+ const index = (_j = toolCallDelta.index) != null ? _j : toolCalls.length - 1;
3520
3534
  if (toolCalls[index] == null) {
3521
3535
  if (toolCallDelta.type !== "function") {
3522
3536
  throw new InvalidResponseDataError({
@@ -3530,7 +3544,7 @@ var OpenRouterChatLanguageModel = class {
3530
3544
  message: `Expected 'id' to be a string.`
3531
3545
  });
3532
3546
  }
3533
- if (((_i = toolCallDelta.function) == null ? void 0 : _i.name) == null) {
3547
+ if (((_k = toolCallDelta.function) == null ? void 0 : _k.name) == null) {
3534
3548
  throw new InvalidResponseDataError({
3535
3549
  data: toolCallDelta,
3536
3550
  message: `Expected 'function.name' to be a string.`
@@ -3541,7 +3555,7 @@ var OpenRouterChatLanguageModel = class {
3541
3555
  type: "function",
3542
3556
  function: {
3543
3557
  name: toolCallDelta.function.name,
3544
- arguments: (_j = toolCallDelta.function.arguments) != null ? _j : ""
3558
+ arguments: (_l = toolCallDelta.function.arguments) != null ? _l : ""
3545
3559
  },
3546
3560
  inputStarted: false,
3547
3561
  sent: false
@@ -3553,7 +3567,7 @@ var OpenRouterChatLanguageModel = class {
3553
3567
  message: `Tool call at index ${index} is missing after creation.`
3554
3568
  });
3555
3569
  }
3556
- if (((_k = toolCall2.function) == null ? void 0 : _k.name) != null && ((_l = toolCall2.function) == null ? void 0 : _l.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
3570
+ if (((_m = toolCall2.function) == null ? void 0 : _m.name) != null && ((_n = toolCall2.function) == null ? void 0 : _n.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
3557
3571
  toolCall2.inputStarted = true;
3558
3572
  controller.enqueue({
3559
3573
  type: "tool-input-start",
@@ -3604,18 +3618,18 @@ var OpenRouterChatLanguageModel = class {
3604
3618
  toolName: toolCall.function.name
3605
3619
  });
3606
3620
  }
3607
- if (((_m = toolCallDelta.function) == null ? void 0 : _m.arguments) != null) {
3608
- toolCall.function.arguments += (_o = (_n = toolCallDelta.function) == null ? void 0 : _n.arguments) != null ? _o : "";
3621
+ if (((_o = toolCallDelta.function) == null ? void 0 : _o.arguments) != null) {
3622
+ toolCall.function.arguments += (_q = (_p = toolCallDelta.function) == null ? void 0 : _p.arguments) != null ? _q : "";
3609
3623
  }
3610
3624
  controller.enqueue({
3611
3625
  type: "tool-input-delta",
3612
3626
  id: toolCall.id,
3613
- delta: (_p = toolCallDelta.function.arguments) != null ? _p : ""
3627
+ delta: (_r = toolCallDelta.function.arguments) != null ? _r : ""
3614
3628
  });
3615
- if (((_q = toolCall.function) == null ? void 0 : _q.name) != null && ((_r = toolCall.function) == null ? void 0 : _r.arguments) != null && isParsableJson(toolCall.function.arguments)) {
3629
+ if (((_s = toolCall.function) == null ? void 0 : _s.name) != null && ((_t = toolCall.function) == null ? void 0 : _t.arguments) != null && isParsableJson(toolCall.function.arguments)) {
3616
3630
  controller.enqueue({
3617
3631
  type: "tool-call",
3618
- toolCallId: (_s = toolCall.id) != null ? _s : generateId(),
3632
+ toolCallId: (_u = toolCall.id) != null ? _u : generateId(),
3619
3633
  toolName: toolCall.function.name,
3620
3634
  input: toolCall.function.arguments,
3621
3635
  providerMetadata: !reasoningDetailsAttachedToToolCall ? {
@@ -3845,7 +3859,8 @@ var OpenRouterCompletionChunkSchema = z8.union([
3845
3859
  usage: z8.object({
3846
3860
  prompt_tokens: z8.number(),
3847
3861
  prompt_tokens_details: z8.object({
3848
- cached_tokens: z8.number()
3862
+ cached_tokens: z8.number(),
3863
+ cache_write_tokens: z8.number().nullish()
3849
3864
  }).passthrough().nullish(),
3850
3865
  completion_tokens: z8.number(),
3851
3866
  completion_tokens_details: z8.object({
@@ -3935,7 +3950,7 @@ var OpenRouterCompletionLanguageModel = class {
3935
3950
  }, this.config.extraBody), this.settings.extraBody);
3936
3951
  }
3937
3952
  async doGenerate(options) {
3938
- var _a16, _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y, _z, _A, _B;
3953
+ var _a16, _b16, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q;
3939
3954
  const providerOptions = options.providerOptions || {};
3940
3955
  const openrouterOptions = providerOptions.openrouter || {};
3941
3956
  const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
@@ -3981,37 +3996,24 @@ var OpenRouterCompletionLanguageModel = class {
3981
3996
  }
3982
3997
  ],
3983
3998
  finishReason: mapOpenRouterFinishReason(choice.finish_reason),
3984
- usage: {
3985
- inputTokens: {
3986
- total: (_c = (_b16 = response.usage) == null ? void 0 : _b16.prompt_tokens) != null ? _c : 0,
3987
- noCache: void 0,
3988
- cacheRead: (_f = (_e = (_d = response.usage) == null ? void 0 : _d.prompt_tokens_details) == null ? void 0 : _e.cached_tokens) != null ? _f : void 0,
3989
- cacheWrite: void 0
3990
- },
3991
- outputTokens: {
3992
- total: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : 0,
3993
- text: void 0,
3994
- reasoning: (_k = (_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens_details) == null ? void 0 : _j.reasoning_tokens) != null ? _k : void 0
3995
- },
3996
- raw: (_l = response.usage) != null ? _l : void 0
3997
- },
3999
+ usage: response.usage ? computeTokenUsage(response.usage) : emptyUsage(),
3998
4000
  warnings: [],
3999
4001
  providerMetadata: {
4000
4002
  openrouter: OpenRouterProviderMetadataSchema.parse({
4001
- provider: (_m = response.provider) != null ? _m : "",
4003
+ provider: (_b16 = response.provider) != null ? _b16 : "",
4002
4004
  usage: __spreadValues(__spreadValues(__spreadValues(__spreadValues({
4003
- promptTokens: (_o = (_n = response.usage) == null ? void 0 : _n.prompt_tokens) != null ? _o : 0,
4004
- completionTokens: (_q = (_p = response.usage) == null ? void 0 : _p.completion_tokens) != null ? _q : 0,
4005
- totalTokens: ((_s = (_r = response.usage) == null ? void 0 : _r.prompt_tokens) != null ? _s : 0) + ((_u = (_t = response.usage) == null ? void 0 : _t.completion_tokens) != null ? _u : 0)
4006
- }, ((_v = response.usage) == null ? void 0 : _v.cost) != null ? { cost: response.usage.cost } : {}), ((_x = (_w = response.usage) == null ? void 0 : _w.prompt_tokens_details) == null ? void 0 : _x.cached_tokens) != null ? {
4005
+ promptTokens: (_d = (_c = response.usage) == null ? void 0 : _c.prompt_tokens) != null ? _d : 0,
4006
+ completionTokens: (_f = (_e = response.usage) == null ? void 0 : _e.completion_tokens) != null ? _f : 0,
4007
+ totalTokens: ((_h = (_g = response.usage) == null ? void 0 : _g.prompt_tokens) != null ? _h : 0) + ((_j = (_i = response.usage) == null ? void 0 : _i.completion_tokens) != null ? _j : 0)
4008
+ }, ((_k = response.usage) == null ? void 0 : _k.cost) != null ? { cost: response.usage.cost } : {}), ((_m = (_l = response.usage) == null ? void 0 : _l.prompt_tokens_details) == null ? void 0 : _m.cached_tokens) != null ? {
4007
4009
  promptTokensDetails: {
4008
4010
  cachedTokens: response.usage.prompt_tokens_details.cached_tokens
4009
4011
  }
4010
- } : {}), ((_z = (_y = response.usage) == null ? void 0 : _y.completion_tokens_details) == null ? void 0 : _z.reasoning_tokens) != null ? {
4012
+ } : {}), ((_o = (_n = response.usage) == null ? void 0 : _n.completion_tokens_details) == null ? void 0 : _o.reasoning_tokens) != null ? {
4011
4013
  completionTokensDetails: {
4012
4014
  reasoningTokens: response.usage.completion_tokens_details.reasoning_tokens
4013
4015
  }
4014
- } : {}), ((_B = (_A = response.usage) == null ? void 0 : _A.cost_details) == null ? void 0 : _B.upstream_inference_cost) != null ? {
4016
+ } : {}), ((_q = (_p = response.usage) == null ? void 0 : _p.cost_details) == null ? void 0 : _q.upstream_inference_cost) != null ? {
4015
4017
  costDetails: {
4016
4018
  upstreamInferenceCost: response.usage.cost_details.upstream_inference_cost
4017
4019
  }
@@ -4067,7 +4069,7 @@ var OpenRouterCompletionLanguageModel = class {
4067
4069
  stream: response.pipeThrough(
4068
4070
  new TransformStream({
4069
4071
  transform(chunk, controller) {
4070
- var _a16, _b16, _c;
4072
+ var _a16, _b16, _c, _d, _e;
4071
4073
  if (options.includeRawChunks) {
4072
4074
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
4073
4075
  }
@@ -4086,30 +4088,29 @@ var OpenRouterCompletionLanguageModel = class {
4086
4088
  provider = value.provider;
4087
4089
  }
4088
4090
  if (value.usage != null) {
4089
- usage.inputTokens.total = value.usage.prompt_tokens;
4090
- usage.outputTokens.total = value.usage.completion_tokens;
4091
+ const computed = computeTokenUsage(value.usage);
4092
+ Object.assign(usage.inputTokens, computed.inputTokens);
4093
+ Object.assign(usage.outputTokens, computed.outputTokens);
4091
4094
  rawUsage = value.usage;
4092
- openrouterUsage.promptTokens = value.usage.prompt_tokens;
4095
+ const promptTokens = (_a16 = value.usage.prompt_tokens) != null ? _a16 : 0;
4096
+ const completionTokens = (_b16 = value.usage.completion_tokens) != null ? _b16 : 0;
4097
+ openrouterUsage.promptTokens = promptTokens;
4093
4098
  if (value.usage.prompt_tokens_details) {
4094
- const cachedInputTokens = (_a16 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a16 : 0;
4095
- usage.inputTokens.cacheRead = cachedInputTokens;
4096
4099
  openrouterUsage.promptTokensDetails = {
4097
- cachedTokens: cachedInputTokens
4100
+ cachedTokens: (_c = value.usage.prompt_tokens_details.cached_tokens) != null ? _c : 0
4098
4101
  };
4099
4102
  }
4100
- openrouterUsage.completionTokens = value.usage.completion_tokens;
4103
+ openrouterUsage.completionTokens = completionTokens;
4101
4104
  if (value.usage.completion_tokens_details) {
4102
- const reasoningTokens = (_b16 = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b16 : 0;
4103
- usage.outputTokens.reasoning = reasoningTokens;
4104
4105
  openrouterUsage.completionTokensDetails = {
4105
- reasoningTokens
4106
+ reasoningTokens: (_d = value.usage.completion_tokens_details.reasoning_tokens) != null ? _d : 0
4106
4107
  };
4107
4108
  }
4108
4109
  if (value.usage.cost != null) {
4109
4110
  openrouterUsage.cost = value.usage.cost;
4110
4111
  }
4111
4112
  openrouterUsage.totalTokens = value.usage.total_tokens;
4112
- const upstreamInferenceCost = (_c = value.usage.cost_details) == null ? void 0 : _c.upstream_inference_cost;
4113
+ const upstreamInferenceCost = (_e = value.usage.cost_details) == null ? void 0 : _e.upstream_inference_cost;
4113
4114
  if (upstreamInferenceCost != null) {
4114
4115
  openrouterUsage.costDetails = {
4115
4116
  upstreamInferenceCost