@openrouter/ai-sdk-provider 1.2.7 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -167,9 +167,134 @@ await streamText({
167
167
  ],
168
168
  });
169
169
  ```
170
+ ## Anthropic Beta Features
171
+
172
+ You can enable Anthropic beta features by passing custom headers through the OpenRouter SDK.
173
+
174
+ ### Fine-grained Tool Streaming
175
+
176
+ [Fine-grained tool streaming](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/fine-grained-tool-streaming) allows streaming tool parameters without buffering, reducing latency for large schemas. This is particularly useful when working with large nested JSON structures.
177
+
178
+ **Important:** This is a beta feature from Anthropic. Make sure to evaluate responses before using in production.
179
+
180
+ #### Basic Usage
181
+
182
+ ```typescript
183
+ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
184
+ import { streamObject } from 'ai';
185
+
186
+ const provider = createOpenRouter({
187
+ apiKey: process.env.OPENROUTER_API_KEY,
188
+ headers: {
189
+ 'anthropic-beta': 'fine-grained-tool-streaming-2025-05-14',
190
+ },
191
+ });
192
+
193
+ const model = provider.chat('anthropic/claude-sonnet-4');
194
+
195
+ const result = await streamObject({
196
+ model,
197
+ schema: yourLargeSchema,
198
+ prompt: 'Generate a complex object...',
199
+ });
200
+
201
+ for await (const partialObject of result.partialObjectStream) {
202
+ console.log(partialObject);
203
+ }
204
+ ```
205
+
206
+ You can also pass the header at the request level:
207
+
208
+ ```typescript
209
+ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
210
+ import { generateText } from 'ai';
211
+
212
+ const provider = createOpenRouter({
213
+ apiKey: process.env.OPENROUTER_API_KEY,
214
+ });
215
+
216
+ const model = provider.chat('anthropic/claude-sonnet-4');
217
+
218
+ await generateText({
219
+ model,
220
+ prompt: 'Hello',
221
+ headers: {
222
+ 'anthropic-beta': 'fine-grained-tool-streaming-2025-05-14',
223
+ },
224
+ });
225
+ ```
226
+
227
+ **Note:** Fine-grained tool streaming is specific to Anthropic models. When using models from other providers, the header will be ignored.
228
+
229
+ #### Use Case: Large Component Generation
230
+
231
+ This feature is particularly beneficial when streaming large, nested JSON structures like UI component trees:
232
+
233
+ ```typescript
234
+ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
235
+ import { streamObject } from 'ai';
236
+ import { z } from 'zod';
237
+
238
+ const componentSchema = z.object({
239
+ type: z.string(),
240
+ props: z.record(z.any()),
241
+ children: z.array(z.lazy(() => componentSchema)).optional(),
242
+ });
243
+
244
+ const provider = createOpenRouter({
245
+ apiKey: process.env.OPENROUTER_API_KEY,
246
+ headers: {
247
+ 'anthropic-beta': 'fine-grained-tool-streaming-2025-05-14',
248
+ },
249
+ });
250
+
251
+ const model = provider.chat('anthropic/claude-sonnet-4');
252
+
253
+ const result = await streamObject({
254
+ model,
255
+ schema: componentSchema,
256
+ prompt: 'Create a responsive dashboard layout',
257
+ });
258
+
259
+ for await (const partialComponent of result.partialObjectStream) {
260
+ console.log('Partial component:', partialComponent);
261
+ }
262
+ ```
263
+
264
+
170
265
 
171
266
  ## Use Cases
172
267
 
268
+ ### Debugging API Requests
269
+
270
+ The provider supports a debug mode that echoes back the request body sent to the upstream provider. This is useful for troubleshooting and understanding how your requests are being processed. Note that debug mode only works with streaming requests.
271
+
272
+ ```typescript
273
+ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
274
+ import { streamText } from 'ai';
275
+
276
+ const openrouter = createOpenRouter({ apiKey: 'your-api-key' });
277
+ const model = openrouter('anthropic/claude-3.5-sonnet', {
278
+ debug: {
279
+ echo_upstream_body: true,
280
+ },
281
+ });
282
+
283
+ const result = await streamText({
284
+ model,
285
+ prompt: 'Hello, how are you?',
286
+ });
287
+
288
+ // The debug data is available in the stream's first chunk
289
+ // and in the final response's providerMetadata
290
+ for await (const chunk of result.fullStream) {
291
+ // Debug chunks have empty choices and contain debug.echo_upstream_body
292
+ console.log(chunk);
293
+ }
294
+ ```
295
+
296
+ The debug response will include the request body that was sent to the upstream provider, with sensitive data redacted (user IDs, base64 content, etc.). This helps you understand how OpenRouter transforms your request before sending it to the model provider.
297
+
173
298
  ### Usage Accounting
174
299
 
175
300
  The provider supports [OpenRouter usage accounting](https://openrouter.ai/docs/use-cases/usage-accounting), which allows you to track token usage details directly in your API responses, without making additional API calls.
@@ -197,3 +322,35 @@ if (result.providerMetadata?.openrouter?.usage) {
197
322
  );
198
323
  }
199
324
  ```
325
+
326
+ It also supports BYOK (Bring Your Own Key) [usage accounting](https://openrouter.ai/docs/docs/guides/usage-accounting#cost-breakdown), which allows you to track passthrough costs when you are using a provider's own API key in your OpenRouter account.
327
+
328
+ ```typescript
329
+ // Assuming you have set an OpenAI API key in https://openrouter.ai/settings/integrations
330
+
331
+ // Enable usage accounting
332
+ const model = openrouter('openai/gpt-3.5-turbo', {
333
+ usage: {
334
+ include: true,
335
+ },
336
+ });
337
+
338
+ // Access usage accounting data
339
+ const result = await generateText({
340
+ model,
341
+ prompt: 'Hello, how are you today?',
342
+ });
343
+
344
+ // Provider-specific BYOK usage details (available in providerMetadata)
345
+ if (result.providerMetadata?.openrouter?.usage) {
346
+ const costDetails = result.providerMetadata.openrouter.usage.costDetails;
347
+ if (costDetails) {
348
+ console.log('BYOK cost:', costDetails.upstreamInferenceCost);
349
+ }
350
+ console.log('OpenRouter credits cost:', result.providerMetadata.openrouter.usage.cost);
351
+ console.log(
352
+ 'Total Tokens:',
353
+ result.providerMetadata.openrouter.usage.totalTokens,
354
+ );
355
+ }
356
+ ```
package/dist/index.d.mts CHANGED
@@ -71,6 +71,19 @@ type OpenRouterChatSettings = {
71
71
  */
72
72
  search_prompt?: string;
73
73
  };
74
+ /**
75
+ * Debug options for troubleshooting API requests.
76
+ * Only works with streaming requests.
77
+ * @see https://openrouter.ai/docs/api-reference/debugging
78
+ */
79
+ debug?: {
80
+ /**
81
+ * When true, echoes back the request body that was sent to the upstream provider.
82
+ * The debug data will be returned as the first chunk in the stream with a `debug.echo_upstream_body` field.
83
+ * Sensitive data like user IDs and base64 content will be redacted.
84
+ */
85
+ echo_upstream_body?: boolean;
86
+ };
74
87
  /**
75
88
  * Provider routing preferences to control request routing behavior
76
89
  */
@@ -106,7 +119,7 @@ type OpenRouterChatSettings = {
106
119
  /**
107
120
  * Sort providers by price, throughput, or latency
108
121
  */
109
- sort?: models.Sort;
122
+ sort?: models.ProviderSort;
110
123
  /**
111
124
  * Maximum pricing you want to pay for this request
112
125
  */
@@ -173,7 +186,7 @@ type OpenRouterUsageAccounting = {
173
186
  };
174
187
  totalTokens: number;
175
188
  cost?: number;
176
- costDetails: {
189
+ costDetails?: {
177
190
  upstreamInferenceCost: number;
178
191
  };
179
192
  };
package/dist/index.d.ts CHANGED
@@ -71,6 +71,19 @@ type OpenRouterChatSettings = {
71
71
  */
72
72
  search_prompt?: string;
73
73
  };
74
+ /**
75
+ * Debug options for troubleshooting API requests.
76
+ * Only works with streaming requests.
77
+ * @see https://openrouter.ai/docs/api-reference/debugging
78
+ */
79
+ debug?: {
80
+ /**
81
+ * When true, echoes back the request body that was sent to the upstream provider.
82
+ * The debug data will be returned as the first chunk in the stream with a `debug.echo_upstream_body` field.
83
+ * Sensitive data like user IDs and base64 content will be redacted.
84
+ */
85
+ echo_upstream_body?: boolean;
86
+ };
74
87
  /**
75
88
  * Provider routing preferences to control request routing behavior
76
89
  */
@@ -106,7 +119,7 @@ type OpenRouterChatSettings = {
106
119
  /**
107
120
  * Sort providers by price, throughput, or latency
108
121
  */
109
- sort?: models.Sort;
122
+ sort?: models.ProviderSort;
110
123
  /**
111
124
  * Maximum pricing you want to pay for this request
112
125
  */
@@ -173,7 +186,7 @@ type OpenRouterUsageAccounting = {
173
186
  };
174
187
  totalTokens: number;
175
188
  cost?: number;
176
- costDetails: {
189
+ costDetails?: {
177
190
  upstreamInferenceCost: number;
178
191
  };
179
192
  };
package/dist/index.js CHANGED
@@ -950,9 +950,9 @@ var ReasoningFormat = /* @__PURE__ */ ((ReasoningFormat2) => {
950
950
  // src/schemas/reasoning-details.ts
951
951
  var CommonReasoningDetailSchema = import_v4.z.object({
952
952
  id: import_v4.z.string().nullish(),
953
- format: import_v4.z.nativeEnum(ReasoningFormat).nullish(),
953
+ format: import_v4.z.enum(ReasoningFormat).nullish(),
954
954
  index: import_v4.z.number().optional()
955
- }).passthrough();
955
+ }).loose();
956
956
  var ReasoningDetailSummarySchema = import_v4.z.object({
957
957
  type: import_v4.z.literal("reasoning.summary" /* Summary */),
958
958
  summary: import_v4.z.string()
@@ -1030,7 +1030,7 @@ var OpenRouterProviderMetadataSchema = import_v43.z.object({
1030
1030
  cost: import_v43.z.number().optional(),
1031
1031
  costDetails: import_v43.z.object({
1032
1032
  upstreamInferenceCost: import_v43.z.number()
1033
- }).passthrough()
1033
+ }).passthrough().optional()
1034
1034
  }).passthrough()
1035
1035
  }).passthrough();
1036
1036
  var OpenRouterProviderOptionsSchema = import_v43.z.object({
@@ -1149,9 +1149,8 @@ function getCacheControl(providerMetadata) {
1149
1149
  return (_c = (_b = (_a15 = openrouter2 == null ? void 0 : openrouter2.cacheControl) != null ? _a15 : openrouter2 == null ? void 0 : openrouter2.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
1150
1150
  }
1151
1151
  function convertToOpenRouterChatMessages(prompt) {
1152
- var _a15, _b, _c, _d, _e, _f;
1152
+ var _a15, _b, _c, _d, _e, _f, _g, _h;
1153
1153
  const messages = [];
1154
- const accumulatedReasoningDetails = [];
1155
1154
  for (const { role, content, providerOptions } of prompt) {
1156
1155
  switch (role) {
1157
1156
  case "system": {
@@ -1181,7 +1180,7 @@ function convertToOpenRouterChatMessages(prompt) {
1181
1180
  const messageCacheControl = getCacheControl(providerOptions);
1182
1181
  const contentParts = content.map(
1183
1182
  (part) => {
1184
- var _a16, _b2, _c2, _d2, _e2, _f2, _g;
1183
+ var _a16, _b2, _c2, _d2, _e2, _f2, _g2;
1185
1184
  const cacheControl = (_a16 = getCacheControl(part.providerOptions)) != null ? _a16 : messageCacheControl;
1186
1185
  switch (part.type) {
1187
1186
  case "text":
@@ -1214,7 +1213,7 @@ function convertToOpenRouterChatMessages(prompt) {
1214
1213
  };
1215
1214
  }
1216
1215
  const fileName = String(
1217
- (_g = (_f2 = (_e2 = (_d2 = part.providerOptions) == null ? void 0 : _d2.openrouter) == null ? void 0 : _e2.filename) != null ? _f2 : part.filename) != null ? _g : ""
1216
+ (_g2 = (_f2 = (_e2 = (_d2 = part.providerOptions) == null ? void 0 : _d2.openrouter) == null ? void 0 : _e2.filename) != null ? _f2 : part.filename) != null ? _g2 : ""
1218
1217
  );
1219
1218
  const fileData = getFileUrl({
1220
1219
  part,
@@ -1261,6 +1260,7 @@ function convertToOpenRouterChatMessages(prompt) {
1261
1260
  let text = "";
1262
1261
  let reasoning = "";
1263
1262
  const toolCalls = [];
1263
+ const accumulatedReasoningDetails = [];
1264
1264
  for (const part of content) {
1265
1265
  switch (part.type) {
1266
1266
  case "text": {
@@ -1286,6 +1286,12 @@ function convertToOpenRouterChatMessages(prompt) {
1286
1286
  }
1287
1287
  case "reasoning": {
1288
1288
  reasoning += part.text;
1289
+ const parsedPartProviderOptions = OpenRouterProviderOptionsSchema.safeParse(part.providerOptions);
1290
+ if (parsedPartProviderOptions.success && ((_e = (_d = parsedPartProviderOptions.data) == null ? void 0 : _d.openrouter) == null ? void 0 : _e.reasoning_details)) {
1291
+ accumulatedReasoningDetails.push(
1292
+ ...parsedPartProviderOptions.data.openrouter.reasoning_details
1293
+ );
1294
+ }
1289
1295
  break;
1290
1296
  }
1291
1297
  case "file":
@@ -1296,7 +1302,7 @@ function convertToOpenRouterChatMessages(prompt) {
1296
1302
  }
1297
1303
  }
1298
1304
  const parsedProviderOptions = OpenRouterProviderOptionsSchema.safeParse(providerOptions);
1299
- const messageReasoningDetails = parsedProviderOptions.success ? (_e = (_d = parsedProviderOptions.data) == null ? void 0 : _d.openrouter) == null ? void 0 : _e.reasoning_details : void 0;
1305
+ const messageReasoningDetails = parsedProviderOptions.success ? (_g = (_f = parsedProviderOptions.data) == null ? void 0 : _f.openrouter) == null ? void 0 : _g.reasoning_details : void 0;
1300
1306
  const finalReasoningDetails = messageReasoningDetails && Array.isArray(messageReasoningDetails) && messageReasoningDetails.length > 0 ? messageReasoningDetails : accumulatedReasoningDetails.length > 0 ? accumulatedReasoningDetails : void 0;
1301
1307
  messages.push({
1302
1308
  role: "assistant",
@@ -1315,7 +1321,7 @@ function convertToOpenRouterChatMessages(prompt) {
1315
1321
  role: "tool",
1316
1322
  tool_call_id: toolResponse.toolCallId,
1317
1323
  content: content2,
1318
- cache_control: (_f = getCacheControl(providerOptions)) != null ? _f : getCacheControl(toolResponse.providerOptions)
1324
+ cache_control: (_h = getCacheControl(providerOptions)) != null ? _h : getCacheControl(toolResponse.providerOptions)
1319
1325
  });
1320
1326
  }
1321
1327
  break;
@@ -1631,7 +1637,9 @@ var OpenRouterChatLanguageModel = class {
1631
1637
  plugins: this.settings.plugins,
1632
1638
  web_search_options: this.settings.web_search_options,
1633
1639
  // Provider routing settings:
1634
- provider: this.settings.provider
1640
+ provider: this.settings.provider,
1641
+ // Debug settings:
1642
+ debug: this.settings.debug
1635
1643
  }, this.config.extraBody), this.settings.extraBody);
1636
1644
  if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null) {
1637
1645
  return __spreadProps(__spreadValues({}, baseArgs), {
@@ -1666,7 +1674,7 @@ var OpenRouterChatLanguageModel = class {
1666
1674
  return baseArgs;
1667
1675
  }
1668
1676
  async doGenerate(options) {
1669
- var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w, _x, _y;
1677
+ var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v;
1670
1678
  const providerOptions = options.providerOptions || {};
1671
1679
  const openrouterOptions = providerOptions.openrouter || {};
1672
1680
  const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
@@ -1833,21 +1841,24 @@ var OpenRouterChatLanguageModel = class {
1833
1841
  openrouter: OpenRouterProviderMetadataSchema.parse({
1834
1842
  provider: (_k = response.provider) != null ? _k : "",
1835
1843
  reasoning_details: (_l = choice.message.reasoning_details) != null ? _l : [],
1836
- usage: {
1844
+ usage: __spreadValues(__spreadValues(__spreadValues({
1837
1845
  promptTokens: (_m = usageInfo.inputTokens) != null ? _m : 0,
1838
1846
  completionTokens: (_n = usageInfo.outputTokens) != null ? _n : 0,
1839
1847
  totalTokens: (_o = usageInfo.totalTokens) != null ? _o : 0,
1840
- cost: (_p = response.usage) == null ? void 0 : _p.cost,
1848
+ cost: (_p = response.usage) == null ? void 0 : _p.cost
1849
+ }, ((_r = (_q = response.usage) == null ? void 0 : _q.prompt_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? {
1841
1850
  promptTokensDetails: {
1842
- cachedTokens: (_s = (_r = (_q = response.usage) == null ? void 0 : _q.prompt_tokens_details) == null ? void 0 : _r.cached_tokens) != null ? _s : 0
1843
- },
1851
+ cachedTokens: response.usage.prompt_tokens_details.cached_tokens
1852
+ }
1853
+ } : {}), ((_t = (_s = response.usage) == null ? void 0 : _s.completion_tokens_details) == null ? void 0 : _t.reasoning_tokens) != null ? {
1844
1854
  completionTokensDetails: {
1845
- reasoningTokens: (_v = (_u = (_t = response.usage) == null ? void 0 : _t.completion_tokens_details) == null ? void 0 : _u.reasoning_tokens) != null ? _v : 0
1846
- },
1855
+ reasoningTokens: response.usage.completion_tokens_details.reasoning_tokens
1856
+ }
1857
+ } : {}), ((_v = (_u = response.usage) == null ? void 0 : _u.cost_details) == null ? void 0 : _v.upstream_inference_cost) != null ? {
1847
1858
  costDetails: {
1848
- upstreamInferenceCost: (_y = (_x = (_w = response.usage) == null ? void 0 : _w.cost_details) == null ? void 0 : _x.upstream_inference_cost) != null ? _y : 0
1859
+ upstreamInferenceCost: response.usage.cost_details.upstream_inference_cost
1849
1860
  }
1850
- }
1861
+ } : {})
1851
1862
  })
1852
1863
  },
1853
1864
  request: { body: args },
@@ -1904,7 +1915,7 @@ var OpenRouterChatLanguageModel = class {
1904
1915
  stream: response.pipeThrough(
1905
1916
  new TransformStream({
1906
1917
  transform(chunk, controller) {
1907
- var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
1918
+ var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
1908
1919
  if (!chunk.success) {
1909
1920
  finishReason = "error";
1910
1921
  controller.enqueue({ type: "error", error: chunk.error });
@@ -1954,6 +1965,12 @@ var OpenRouterChatLanguageModel = class {
1954
1965
  }
1955
1966
  openrouterUsage.cost = value.usage.cost;
1956
1967
  openrouterUsage.totalTokens = value.usage.total_tokens;
1968
+ const upstreamInferenceCost = (_c = value.usage.cost_details) == null ? void 0 : _c.upstream_inference_cost;
1969
+ if (upstreamInferenceCost != null) {
1970
+ openrouterUsage.costDetails = {
1971
+ upstreamInferenceCost
1972
+ };
1973
+ }
1957
1974
  }
1958
1975
  const choice = value.choices[0];
1959
1976
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -1963,16 +1980,18 @@ var OpenRouterChatLanguageModel = class {
1963
1980
  return;
1964
1981
  }
1965
1982
  const delta = choice.delta;
1966
- const emitReasoningChunk = (chunkText) => {
1983
+ const emitReasoningChunk = (chunkText, providerMetadata) => {
1967
1984
  if (!reasoningStarted) {
1968
1985
  reasoningId = openrouterResponseId || generateId();
1969
1986
  controller.enqueue({
1987
+ providerMetadata,
1970
1988
  type: "reasoning-start",
1971
1989
  id: reasoningId
1972
1990
  });
1973
1991
  reasoningStarted = true;
1974
1992
  }
1975
1993
  controller.enqueue({
1994
+ providerMetadata,
1976
1995
  type: "reasoning-delta",
1977
1996
  delta: chunkText,
1978
1997
  id: reasoningId || generateId()
@@ -1984,6 +2003,8 @@ var OpenRouterChatLanguageModel = class {
1984
2003
  const lastDetail = accumulatedReasoningDetails[accumulatedReasoningDetails.length - 1];
1985
2004
  if ((lastDetail == null ? void 0 : lastDetail.type) === "reasoning.text" /* Text */) {
1986
2005
  lastDetail.text = (lastDetail.text || "") + (detail.text || "");
2006
+ lastDetail.signature = lastDetail.signature || detail.signature;
2007
+ lastDetail.format = lastDetail.format || detail.format;
1987
2008
  } else {
1988
2009
  accumulatedReasoningDetails.push(__spreadValues({}, detail));
1989
2010
  }
@@ -1991,23 +2012,28 @@ var OpenRouterChatLanguageModel = class {
1991
2012
  accumulatedReasoningDetails.push(detail);
1992
2013
  }
1993
2014
  }
2015
+ const reasoningMetadata = {
2016
+ openrouter: {
2017
+ reasoning_details: delta.reasoning_details
2018
+ }
2019
+ };
1994
2020
  for (const detail of delta.reasoning_details) {
1995
2021
  switch (detail.type) {
1996
2022
  case "reasoning.text" /* Text */: {
1997
2023
  if (detail.text) {
1998
- emitReasoningChunk(detail.text);
2024
+ emitReasoningChunk(detail.text, reasoningMetadata);
1999
2025
  }
2000
2026
  break;
2001
2027
  }
2002
2028
  case "reasoning.encrypted" /* Encrypted */: {
2003
2029
  if (detail.data) {
2004
- emitReasoningChunk("[REDACTED]");
2030
+ emitReasoningChunk("[REDACTED]", reasoningMetadata);
2005
2031
  }
2006
2032
  break;
2007
2033
  }
2008
2034
  case "reasoning.summary" /* Summary */: {
2009
2035
  if (detail.summary) {
2010
- emitReasoningChunk(detail.summary);
2036
+ emitReasoningChunk(detail.summary, reasoningMetadata);
2011
2037
  }
2012
2038
  break;
2013
2039
  }
@@ -2062,7 +2088,7 @@ var OpenRouterChatLanguageModel = class {
2062
2088
  }
2063
2089
  if (delta.tool_calls != null) {
2064
2090
  for (const toolCallDelta of delta.tool_calls) {
2065
- const index = (_c = toolCallDelta.index) != null ? _c : toolCalls.length - 1;
2091
+ const index = (_d = toolCallDelta.index) != null ? _d : toolCalls.length - 1;
2066
2092
  if (toolCalls[index] == null) {
2067
2093
  if (toolCallDelta.type !== "function") {
2068
2094
  throw new InvalidResponseDataError({
@@ -2076,7 +2102,7 @@ var OpenRouterChatLanguageModel = class {
2076
2102
  message: `Expected 'id' to be a string.`
2077
2103
  });
2078
2104
  }
2079
- if (((_d = toolCallDelta.function) == null ? void 0 : _d.name) == null) {
2105
+ if (((_e = toolCallDelta.function) == null ? void 0 : _e.name) == null) {
2080
2106
  throw new InvalidResponseDataError({
2081
2107
  data: toolCallDelta,
2082
2108
  message: `Expected 'function.name' to be a string.`
@@ -2087,7 +2113,7 @@ var OpenRouterChatLanguageModel = class {
2087
2113
  type: "function",
2088
2114
  function: {
2089
2115
  name: toolCallDelta.function.name,
2090
- arguments: (_e = toolCallDelta.function.arguments) != null ? _e : ""
2116
+ arguments: (_f = toolCallDelta.function.arguments) != null ? _f : ""
2091
2117
  },
2092
2118
  inputStarted: false,
2093
2119
  sent: false
@@ -2099,7 +2125,7 @@ var OpenRouterChatLanguageModel = class {
2099
2125
  message: `Tool call at index ${index} is missing after creation.`
2100
2126
  });
2101
2127
  }
2102
- if (((_f = toolCall2.function) == null ? void 0 : _f.name) != null && ((_g = toolCall2.function) == null ? void 0 : _g.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
2128
+ if (((_g = toolCall2.function) == null ? void 0 : _g.name) != null && ((_h = toolCall2.function) == null ? void 0 : _h.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
2103
2129
  toolCall2.inputStarted = true;
2104
2130
  controller.enqueue({
2105
2131
  type: "tool-input-start",
@@ -2149,18 +2175,18 @@ var OpenRouterChatLanguageModel = class {
2149
2175
  toolName: toolCall.function.name
2150
2176
  });
2151
2177
  }
2152
- if (((_h = toolCallDelta.function) == null ? void 0 : _h.arguments) != null) {
2153
- toolCall.function.arguments += (_j = (_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null ? _j : "";
2178
+ if (((_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null) {
2179
+ toolCall.function.arguments += (_k = (_j = toolCallDelta.function) == null ? void 0 : _j.arguments) != null ? _k : "";
2154
2180
  }
2155
2181
  controller.enqueue({
2156
2182
  type: "tool-input-delta",
2157
2183
  id: toolCall.id,
2158
- delta: (_k = toolCallDelta.function.arguments) != null ? _k : ""
2184
+ delta: (_l = toolCallDelta.function.arguments) != null ? _l : ""
2159
2185
  });
2160
- if (((_l = toolCall.function) == null ? void 0 : _l.name) != null && ((_m = toolCall.function) == null ? void 0 : _m.arguments) != null && isParsableJson(toolCall.function.arguments)) {
2186
+ if (((_m = toolCall.function) == null ? void 0 : _m.name) != null && ((_n = toolCall.function) == null ? void 0 : _n.arguments) != null && isParsableJson(toolCall.function.arguments)) {
2161
2187
  controller.enqueue({
2162
2188
  type: "tool-call",
2163
- toolCallId: (_n = toolCall.id) != null ? _n : generateId(),
2189
+ toolCallId: (_o = toolCall.id) != null ? _o : generateId(),
2164
2190
  toolName: toolCall.function.name,
2165
2191
  input: toolCall.function.arguments,
2166
2192
  providerMetadata: {
@@ -2376,7 +2402,10 @@ var OpenRouterCompletionChunkSchema = import_v47.z.union([
2376
2402
  reasoning_tokens: import_v47.z.number()
2377
2403
  }).passthrough().nullish(),
2378
2404
  total_tokens: import_v47.z.number(),
2379
- cost: import_v47.z.number().optional()
2405
+ cost: import_v47.z.number().optional(),
2406
+ cost_details: import_v47.z.object({
2407
+ upstream_inference_cost: import_v47.z.number().nullish()
2408
+ }).passthrough().nullish()
2380
2409
  }).passthrough().nullish()
2381
2410
  }).passthrough(),
2382
2411
  OpenRouterErrorResponseSchema
@@ -2549,7 +2578,7 @@ var OpenRouterCompletionLanguageModel = class {
2549
2578
  stream: response.pipeThrough(
2550
2579
  new TransformStream({
2551
2580
  transform(chunk, controller) {
2552
- var _a15, _b;
2581
+ var _a15, _b, _c;
2553
2582
  if (!chunk.success) {
2554
2583
  finishReason = "error";
2555
2584
  controller.enqueue({ type: "error", error: chunk.error });
@@ -2583,6 +2612,12 @@ var OpenRouterCompletionLanguageModel = class {
2583
2612
  }
2584
2613
  openrouterUsage.cost = value.usage.cost;
2585
2614
  openrouterUsage.totalTokens = value.usage.total_tokens;
2615
+ const upstreamInferenceCost = (_c = value.usage.cost_details) == null ? void 0 : _c.upstream_inference_cost;
2616
+ if (upstreamInferenceCost != null) {
2617
+ openrouterUsage.costDetails = {
2618
+ upstreamInferenceCost
2619
+ };
2620
+ }
2586
2621
  }
2587
2622
  const choice = value.choices[0];
2588
2623
  if ((choice == null ? void 0 : choice.finish_reason) != null) {
@@ -2678,7 +2713,7 @@ function withUserAgentSuffix(headers, ...userAgentSuffixParts) {
2678
2713
  }
2679
2714
 
2680
2715
  // src/version.ts
2681
- var VERSION = false ? "0.0.0-test" : "1.2.7";
2716
+ var VERSION = false ? "0.0.0-test" : "1.3.0";
2682
2717
 
2683
2718
  // src/provider.ts
2684
2719
  function createOpenRouter(options = {}) {