@openrouter/ai-sdk-provider 0.4.6 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -37,7 +37,7 @@ const { text } = await generateText({
37
37
 
38
38
  ## Supported models
39
39
 
40
- This list is not a definitive list of models supported by OpenRouter, as it constantly changes as we add new models (and deprecate old ones) to our system.
40
+ This list is not a definitive list of models supported by OpenRouter, as it constantly changes as we add new models (and deprecate old ones) to our system.
41
41
  You can find the latest list of models supported by OpenRouter [here](https://openrouter.ai/models).
42
42
 
43
43
  You can find the latest list of tool-supported models supported by OpenRouter [here](https://openrouter.ai/models?order=newest&supported_parameters=tools). (Note: This list may contain models that are not compatible with the AI SDK.)
@@ -147,10 +147,37 @@ await streamText({
147
147
  },
148
148
  {
149
149
  type: 'text',
150
- text: 'Who're the speakers?',
150
+ text: 'List the speakers?',
151
151
  },
152
152
  ],
153
153
  },
154
154
  ],
155
155
  });
156
156
  ```
157
+
158
+ ## Use Cases
159
+
160
+ ### Usage Accounting
161
+
162
+ The provider supports [OpenRouter usage accounting](https://openrouter.ai/docs/use-cases/usage-accounting), which allows you to track token usage details directly in your API responses, without making additional API calls.
163
+
164
+ ```typescript
165
+ // Enable usage accounting
166
+ const model = openrouter('openai/gpt-3.5-turbo', {
167
+ usage: {
168
+ include: true,
169
+ }
170
+ });
171
+
172
+ // Access usage accounting data
173
+ const result = await generateText({
174
+ model,
175
+ prompt: 'Hello, how are you today?',
176
+ });
177
+
178
+ // Provider-specific usage details (available in providerMetadata)
179
+ if (result.providerMetadata?.openrouter?.usage) {
180
+ console.log('Cost:', result.providerMetadata.openrouter.usage.cost);
181
+ console.log('Total Tokens:', result.providerMetadata.openrouter.usage.totalTokens);
182
+ }
183
+ ```
package/dist/index.d.mts CHANGED
@@ -27,7 +27,33 @@ type OpenRouterSharedSettings = OpenRouterProviderOptions & {
27
27
  * @deprecated use `reasoning` instead
28
28
  */
29
29
  includeReasoning?: boolean;
30
- extraBody?: Record<string, any>;
30
+ extraBody?: Record<string, unknown>;
31
+ /**
32
+ * Enable usage accounting to get detailed token usage information.
33
+ * https://openrouter.ai/docs/use-cases/usage-accounting
34
+ */
35
+ usage?: {
36
+ /**
37
+ * When true, includes token usage information in the response.
38
+ */
39
+ include: boolean;
40
+ };
41
+ };
42
+ /**
43
+ * Usage accounting response
44
+ * @see https://openrouter.ai/docs/use-cases/usage-accounting
45
+ */
46
+ type OpenRouterUsageAccounting = {
47
+ promptTokens: number;
48
+ promptTokensDetails?: {
49
+ cachedTokens: number;
50
+ };
51
+ completionTokens: number;
52
+ completionTokensDetails?: {
53
+ reasoningTokens: number;
54
+ };
55
+ totalTokens: number;
56
+ cost?: number;
31
57
  };
32
58
 
33
59
  type OpenRouterChatModelId = string;
@@ -236,4 +262,4 @@ declare class OpenRouter {
236
262
  completion(modelId: OpenRouterCompletionModelId, settings?: OpenRouterCompletionSettings): OpenRouterCompletionLanguageModel;
237
263
  }
238
264
 
239
- export { OpenRouter, type OpenRouterCompletionSettings, type OpenRouterLanguageModel, type OpenRouterProvider, type OpenRouterProviderOptions, type OpenRouterProviderSettings, type OpenRouterSharedSettings, createOpenRouter, openrouter };
265
+ export { OpenRouter, type OpenRouterCompletionSettings, type OpenRouterLanguageModel, type OpenRouterProvider, type OpenRouterProviderOptions, type OpenRouterProviderSettings, type OpenRouterSharedSettings, type OpenRouterUsageAccounting, createOpenRouter, openrouter };
package/dist/index.d.ts CHANGED
@@ -27,7 +27,33 @@ type OpenRouterSharedSettings = OpenRouterProviderOptions & {
27
27
  * @deprecated use `reasoning` instead
28
28
  */
29
29
  includeReasoning?: boolean;
30
- extraBody?: Record<string, any>;
30
+ extraBody?: Record<string, unknown>;
31
+ /**
32
+ * Enable usage accounting to get detailed token usage information.
33
+ * https://openrouter.ai/docs/use-cases/usage-accounting
34
+ */
35
+ usage?: {
36
+ /**
37
+ * When true, includes token usage information in the response.
38
+ */
39
+ include: boolean;
40
+ };
41
+ };
42
+ /**
43
+ * Usage accounting response
44
+ * @see https://openrouter.ai/docs/use-cases/usage-accounting
45
+ */
46
+ type OpenRouterUsageAccounting = {
47
+ promptTokens: number;
48
+ promptTokensDetails?: {
49
+ cachedTokens: number;
50
+ };
51
+ completionTokens: number;
52
+ completionTokensDetails?: {
53
+ reasoningTokens: number;
54
+ };
55
+ totalTokens: number;
56
+ cost?: number;
31
57
  };
32
58
 
33
59
  type OpenRouterChatModelId = string;
@@ -236,4 +262,4 @@ declare class OpenRouter {
236
262
  completion(modelId: OpenRouterCompletionModelId, settings?: OpenRouterCompletionSettings): OpenRouterCompletionLanguageModel;
237
263
  }
238
264
 
239
- export { OpenRouter, type OpenRouterCompletionSettings, type OpenRouterLanguageModel, type OpenRouterProvider, type OpenRouterProviderOptions, type OpenRouterProviderSettings, type OpenRouterSharedSettings, createOpenRouter, openrouter };
265
+ export { OpenRouter, type OpenRouterCompletionSettings, type OpenRouterLanguageModel, type OpenRouterProvider, type OpenRouterProviderOptions, type OpenRouterProviderSettings, type OpenRouterSharedSettings, type OpenRouterUsageAccounting, createOpenRouter, openrouter };
package/dist/index.js CHANGED
@@ -277,7 +277,7 @@ var OpenRouterChatLanguageModel = class {
277
277
  }) {
278
278
  var _a;
279
279
  const type = mode.type;
280
- const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata["openrouter"]) != null ? _a : {};
280
+ const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.openrouter) != null ? _a : {};
281
281
  const baseArgs = __spreadValues(__spreadValues(__spreadValues({
282
282
  // model id:
283
283
  model: this.modelId,
@@ -302,7 +302,8 @@ var OpenRouterChatLanguageModel = class {
302
302
  messages: convertToOpenRouterChatMessages(prompt),
303
303
  // OpenRouter specific settings:
304
304
  include_reasoning: this.settings.includeReasoning,
305
- reasoning: this.settings.reasoning
305
+ reasoning: this.settings.reasoning,
306
+ usage: this.settings.usage
306
307
  }, this.config.extraBody), this.settings.extraBody), extraCallingBody);
307
308
  switch (type) {
308
309
  case "regular": {
@@ -338,7 +339,7 @@ var OpenRouterChatLanguageModel = class {
338
339
  }
339
340
  }
340
341
  async doGenerate(options) {
341
- var _b, _c, _d, _e, _f, _g, _h;
342
+ var _b, _c, _d, _e, _f, _g, _h, _i, _j;
342
343
  const args = this.getArgs(options);
343
344
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
344
345
  url: this.config.url({
@@ -359,14 +360,39 @@ var OpenRouterChatLanguageModel = class {
359
360
  if (!choice) {
360
361
  throw new Error("No choice in response");
361
362
  }
362
- return {
363
+ const usageInfo = response.usage ? {
364
+ promptTokens: (_b = response.usage.prompt_tokens) != null ? _b : 0,
365
+ completionTokens: (_c = response.usage.completion_tokens) != null ? _c : 0
366
+ } : {
367
+ promptTokens: 0,
368
+ completionTokens: 0
369
+ };
370
+ const providerMetadata = {};
371
+ if (response.usage && ((_d = this.settings.usage) == null ? void 0 : _d.include)) {
372
+ providerMetadata.openrouter = {
373
+ usage: {
374
+ promptTokens: response.usage.prompt_tokens,
375
+ promptTokensDetails: response.usage.prompt_tokens_details ? {
376
+ cachedTokens: (_e = response.usage.prompt_tokens_details.cached_tokens) != null ? _e : 0
377
+ } : void 0,
378
+ completionTokens: response.usage.completion_tokens,
379
+ completionTokensDetails: response.usage.completion_tokens_details ? {
380
+ reasoningTokens: (_f = response.usage.completion_tokens_details.reasoning_tokens) != null ? _f : 0
381
+ } : void 0,
382
+ cost: response.usage.cost,
383
+ totalTokens: (_g = response.usage.total_tokens) != null ? _g : 0
384
+ }
385
+ };
386
+ }
387
+ const hasProviderMetadata = Object.keys(providerMetadata).length > 0;
388
+ return __spreadValues({
363
389
  response: {
364
390
  id: response.id,
365
391
  modelId: response.model
366
392
  },
367
- text: (_b = choice.message.content) != null ? _b : void 0,
368
- reasoning: (_c = choice.message.reasoning) != null ? _c : void 0,
369
- toolCalls: (_d = choice.message.tool_calls) == null ? void 0 : _d.map((toolCall) => {
393
+ text: (_h = choice.message.content) != null ? _h : void 0,
394
+ reasoning: (_i = choice.message.reasoning) != null ? _i : void 0,
395
+ toolCalls: (_j = choice.message.tool_calls) == null ? void 0 : _j.map((toolCall) => {
370
396
  var _a2;
371
397
  return {
372
398
  toolCallType: "function",
@@ -376,17 +402,15 @@ var OpenRouterChatLanguageModel = class {
376
402
  };
377
403
  }),
378
404
  finishReason: mapOpenRouterFinishReason(choice.finish_reason),
379
- usage: {
380
- promptTokens: (_f = (_e = response.usage) == null ? void 0 : _e.prompt_tokens) != null ? _f : 0,
381
- completionTokens: (_h = (_g = response.usage) == null ? void 0 : _g.completion_tokens) != null ? _h : 0
382
- },
405
+ usage: usageInfo,
383
406
  rawCall: { rawPrompt, rawSettings },
384
407
  rawResponse: { headers: responseHeaders },
385
408
  warnings: [],
386
409
  logprobs: mapOpenRouterChatLogProbsOutput(choice.logprobs)
387
- };
410
+ }, hasProviderMetadata ? { providerMetadata } : {});
388
411
  }
389
412
  async doStream(options) {
413
+ var _a, _c;
390
414
  const args = this.getArgs(options);
391
415
  const { responseHeaders, value: response } = await (0, import_provider_utils3.postJsonToApi)({
392
416
  url: this.config.url({
@@ -397,7 +421,9 @@ var OpenRouterChatLanguageModel = class {
397
421
  body: __spreadProps(__spreadValues({}, args), {
398
422
  stream: true,
399
423
  // only include stream_options when in strict compatibility mode:
400
- stream_options: this.config.compatibility === "strict" ? { include_usage: true } : void 0
424
+ stream_options: this.config.compatibility === "strict" ? __spreadValues({
425
+ include_usage: true
426
+ }, ((_a = this.settings.usage) == null ? void 0 : _a.include) ? { include_usage: true } : {}) : void 0
401
427
  }),
402
428
  failedResponseHandler: openrouterFailedResponseHandler,
403
429
  successfulResponseHandler: (0, import_provider_utils3.createEventSourceResponseHandler)(
@@ -406,7 +432,7 @@ var OpenRouterChatLanguageModel = class {
406
432
  abortSignal: options.abortSignal,
407
433
  fetch: this.config.fetch
408
434
  });
409
- const _a = args, { messages: rawPrompt } = _a, rawSettings = __objRest(_a, ["messages"]);
435
+ const _b = args, { messages: rawPrompt } = _b, rawSettings = __objRest(_b, ["messages"]);
410
436
  const toolCalls = [];
411
437
  let finishReason = "other";
412
438
  let usage = {
@@ -414,11 +440,13 @@ var OpenRouterChatLanguageModel = class {
414
440
  completionTokens: Number.NaN
415
441
  };
416
442
  let logprobs;
443
+ const openrouterUsage = {};
444
+ const shouldIncludeUsageAccounting = !!((_c = this.settings.usage) == null ? void 0 : _c.include);
417
445
  return {
418
446
  stream: response.pipeThrough(
419
447
  new TransformStream({
420
448
  transform(chunk, controller) {
421
- var _a2, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
449
+ var _a2, _b2, _c2, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
422
450
  if (!chunk.success) {
423
451
  finishReason = "error";
424
452
  controller.enqueue({ type: "error", error: chunk.error });
@@ -447,6 +475,20 @@ var OpenRouterChatLanguageModel = class {
447
475
  promptTokens: value.usage.prompt_tokens,
448
476
  completionTokens: value.usage.completion_tokens
449
477
  };
478
+ openrouterUsage.promptTokens = value.usage.prompt_tokens;
479
+ if (value.usage.prompt_tokens_details) {
480
+ openrouterUsage.promptTokensDetails = {
481
+ cachedTokens: (_a2 = value.usage.prompt_tokens_details.cached_tokens) != null ? _a2 : 0
482
+ };
483
+ }
484
+ openrouterUsage.completionTokens = value.usage.completion_tokens;
485
+ if (value.usage.completion_tokens_details) {
486
+ openrouterUsage.completionTokensDetails = {
487
+ reasoningTokens: (_b2 = value.usage.completion_tokens_details.reasoning_tokens) != null ? _b2 : 0
488
+ };
489
+ }
490
+ openrouterUsage.cost = value.usage.cost;
491
+ openrouterUsage.totalTokens = value.usage.total_tokens;
450
492
  }
451
493
  const choice = value.choices[0];
452
494
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -491,7 +533,7 @@ var OpenRouterChatLanguageModel = class {
491
533
  message: `Expected 'id' to be a string.`
492
534
  });
493
535
  }
494
- if (((_a2 = toolCallDelta.function) == null ? void 0 : _a2.name) == null) {
536
+ if (((_c2 = toolCallDelta.function) == null ? void 0 : _c2.name) == null) {
495
537
  throw new import_provider.InvalidResponseDataError({
496
538
  data: toolCallDelta,
497
539
  message: `Expected 'function.name' to be a string.`
@@ -502,7 +544,7 @@ var OpenRouterChatLanguageModel = class {
502
544
  type: "function",
503
545
  function: {
504
546
  name: toolCallDelta.function.name,
505
- arguments: (_b = toolCallDelta.function.arguments) != null ? _b : ""
547
+ arguments: (_d = toolCallDelta.function.arguments) != null ? _d : ""
506
548
  },
507
549
  sent: false
508
550
  };
@@ -510,7 +552,7 @@ var OpenRouterChatLanguageModel = class {
510
552
  if (toolCall2 == null) {
511
553
  throw new Error("Tool call is missing");
512
554
  }
513
- if (((_c = toolCall2.function) == null ? void 0 : _c.name) != null && ((_d = toolCall2.function) == null ? void 0 : _d.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
555
+ if (((_e = toolCall2.function) == null ? void 0 : _e.name) != null && ((_f = toolCall2.function) == null ? void 0 : _f.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall2.function.arguments)) {
514
556
  controller.enqueue({
515
557
  type: "tool-call-delta",
516
558
  toolCallType: "function",
@@ -521,7 +563,7 @@ var OpenRouterChatLanguageModel = class {
521
563
  controller.enqueue({
522
564
  type: "tool-call",
523
565
  toolCallType: "function",
524
- toolCallId: (_e = toolCall2.id) != null ? _e : (0, import_provider_utils3.generateId)(),
566
+ toolCallId: (_g = toolCall2.id) != null ? _g : (0, import_provider_utils3.generateId)(),
525
567
  toolName: toolCall2.function.name,
526
568
  args: toolCall2.function.arguments
527
569
  });
@@ -533,21 +575,21 @@ var OpenRouterChatLanguageModel = class {
533
575
  if (toolCall == null) {
534
576
  throw new Error("Tool call is missing");
535
577
  }
536
- if (((_f = toolCallDelta.function) == null ? void 0 : _f.arguments) != null) {
537
- toolCall.function.arguments += (_h = (_g = toolCallDelta.function) == null ? void 0 : _g.arguments) != null ? _h : "";
578
+ if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
579
+ toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
538
580
  }
539
581
  controller.enqueue({
540
582
  type: "tool-call-delta",
541
583
  toolCallType: "function",
542
584
  toolCallId: toolCall.id,
543
585
  toolName: toolCall.function.name,
544
- argsTextDelta: (_i = toolCallDelta.function.arguments) != null ? _i : ""
586
+ argsTextDelta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
545
587
  });
546
- if (((_j = toolCall.function) == null ? void 0 : _j.name) != null && ((_k = toolCall.function) == null ? void 0 : _k.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
588
+ if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && (0, import_provider_utils3.isParsableJson)(toolCall.function.arguments)) {
547
589
  controller.enqueue({
548
590
  type: "tool-call",
549
591
  toolCallType: "function",
550
- toolCallId: (_l = toolCall.id) != null ? _l : (0, import_provider_utils3.generateId)(),
592
+ toolCallId: (_n = toolCall.id) != null ? _n : (0, import_provider_utils3.generateId)(),
551
593
  toolName: toolCall.function.name,
552
594
  args: toolCall.function.arguments
553
595
  });
@@ -573,12 +615,19 @@ var OpenRouterChatLanguageModel = class {
573
615
  }
574
616
  }
575
617
  }
576
- controller.enqueue({
618
+ const providerMetadata = {};
619
+ if (shouldIncludeUsageAccounting && (openrouterUsage.totalTokens !== void 0 || openrouterUsage.cost !== void 0 || openrouterUsage.promptTokensDetails !== void 0 || openrouterUsage.completionTokensDetails !== void 0)) {
620
+ providerMetadata.openrouter = {
621
+ usage: openrouterUsage
622
+ };
623
+ }
624
+ const hasProviderMetadata = Object.keys(providerMetadata).length > 0 && shouldIncludeUsageAccounting;
625
+ controller.enqueue(__spreadValues({
577
626
  type: "finish",
578
627
  finishReason,
579
628
  logprobs,
580
629
  usage
581
- });
630
+ }, hasProviderMetadata ? { providerMetadata } : {}));
582
631
  }
583
632
  })
584
633
  ),
@@ -593,8 +642,15 @@ var OpenRouterChatCompletionBaseResponseSchema = import_zod2.z.object({
593
642
  model: import_zod2.z.string().optional(),
594
643
  usage: import_zod2.z.object({
595
644
  prompt_tokens: import_zod2.z.number(),
645
+ prompt_tokens_details: import_zod2.z.object({
646
+ cached_tokens: import_zod2.z.number()
647
+ }).optional(),
596
648
  completion_tokens: import_zod2.z.number(),
597
- total_tokens: import_zod2.z.number()
649
+ completion_tokens_details: import_zod2.z.object({
650
+ reasoning_tokens: import_zod2.z.number()
651
+ }).optional(),
652
+ total_tokens: import_zod2.z.number(),
653
+ cost: import_zod2.z.number().optional()
598
654
  }).nullish()
599
655
  });
600
656
  var OpenRouterNonStreamChatCompletionResponseSchema = OpenRouterChatCompletionBaseResponseSchema.extend({
@@ -691,14 +747,13 @@ function prepareToolsAndToolChoice(mode) {
691
747
  parameters: tool.parameters
692
748
  }
693
749
  };
694
- } else {
695
- return {
696
- type: "function",
697
- function: {
698
- name: tool.name
699
- }
700
- };
701
750
  }
751
+ return {
752
+ type: "function",
753
+ function: {
754
+ name: tool.name
755
+ }
756
+ };
702
757
  });
703
758
  const toolChoice = mode.toolChoice;
704
759
  if (toolChoice == null) {
@@ -887,7 +942,7 @@ var OpenRouterCompletionLanguageModel = class {
887
942
  }) {
888
943
  var _a, _b;
889
944
  const type = mode.type;
890
- const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata["openrouter"]) != null ? _a : {};
945
+ const extraCallingBody = (_a = providerMetadata == null ? void 0 : providerMetadata.openrouter) != null ? _a : {};
891
946
  const { prompt: completionPrompt } = convertToOpenRouterCompletionPrompt({
892
947
  prompt,
893
948
  inputFormat
@@ -1184,9 +1239,7 @@ function createOpenRouter(options = {}) {
1184
1239
  }
1185
1240
  return createChatModel(modelId, settings);
1186
1241
  };
1187
- const provider = function(modelId, settings) {
1188
- return createLanguageModel(modelId, settings);
1189
- };
1242
+ const provider = (modelId, settings) => createLanguageModel(modelId, settings);
1190
1243
  provider.languageModel = createLanguageModel;
1191
1244
  provider.chat = createChatModel;
1192
1245
  provider.completion = createCompletionModel;