@openrouter/ai-sdk-provider 1.2.8 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +157 -0
- package/dist/index.d.mts +17 -2
- package/dist/index.d.ts +17 -2
- package/dist/index.js +96 -40
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +96 -40
- package/dist/index.mjs.map +1 -1
- package/dist/internal/index.d.mts +17 -2
- package/dist/internal/index.d.ts +17 -2
- package/dist/internal/index.js +95 -39
- package/dist/internal/index.js.map +1 -1
- package/dist/internal/index.mjs +95 -39
- package/dist/internal/index.mjs.map +1 -1
- package/package.json +3 -3
package/README.md
CHANGED
|
@@ -167,9 +167,134 @@ await streamText({
|
|
|
167
167
|
],
|
|
168
168
|
});
|
|
169
169
|
```
|
|
170
|
+
## Anthropic Beta Features
|
|
171
|
+
|
|
172
|
+
You can enable Anthropic beta features by passing custom headers through the OpenRouter SDK.
|
|
173
|
+
|
|
174
|
+
### Fine-grained Tool Streaming
|
|
175
|
+
|
|
176
|
+
[Fine-grained tool streaming](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/fine-grained-tool-streaming) allows streaming tool parameters without buffering, reducing latency for large schemas. This is particularly useful when working with large nested JSON structures.
|
|
177
|
+
|
|
178
|
+
**Important:** This is a beta feature from Anthropic. Make sure to evaluate responses before using in production.
|
|
179
|
+
|
|
180
|
+
#### Basic Usage
|
|
181
|
+
|
|
182
|
+
```typescript
|
|
183
|
+
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
|
184
|
+
import { streamObject } from 'ai';
|
|
185
|
+
|
|
186
|
+
const provider = createOpenRouter({
|
|
187
|
+
apiKey: process.env.OPENROUTER_API_KEY,
|
|
188
|
+
headers: {
|
|
189
|
+
'anthropic-beta': 'fine-grained-tool-streaming-2025-05-14',
|
|
190
|
+
},
|
|
191
|
+
});
|
|
192
|
+
|
|
193
|
+
const model = provider.chat('anthropic/claude-sonnet-4');
|
|
194
|
+
|
|
195
|
+
const result = await streamObject({
|
|
196
|
+
model,
|
|
197
|
+
schema: yourLargeSchema,
|
|
198
|
+
prompt: 'Generate a complex object...',
|
|
199
|
+
});
|
|
200
|
+
|
|
201
|
+
for await (const partialObject of result.partialObjectStream) {
|
|
202
|
+
console.log(partialObject);
|
|
203
|
+
}
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
You can also pass the header at the request level:
|
|
207
|
+
|
|
208
|
+
```typescript
|
|
209
|
+
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
|
210
|
+
import { generateText } from 'ai';
|
|
211
|
+
|
|
212
|
+
const provider = createOpenRouter({
|
|
213
|
+
apiKey: process.env.OPENROUTER_API_KEY,
|
|
214
|
+
});
|
|
215
|
+
|
|
216
|
+
const model = provider.chat('anthropic/claude-sonnet-4');
|
|
217
|
+
|
|
218
|
+
await generateText({
|
|
219
|
+
model,
|
|
220
|
+
prompt: 'Hello',
|
|
221
|
+
headers: {
|
|
222
|
+
'anthropic-beta': 'fine-grained-tool-streaming-2025-05-14',
|
|
223
|
+
},
|
|
224
|
+
});
|
|
225
|
+
```
|
|
226
|
+
|
|
227
|
+
**Note:** Fine-grained tool streaming is specific to Anthropic models. When using models from other providers, the header will be ignored.
|
|
228
|
+
|
|
229
|
+
#### Use Case: Large Component Generation
|
|
230
|
+
|
|
231
|
+
This feature is particularly beneficial when streaming large, nested JSON structures like UI component trees:
|
|
232
|
+
|
|
233
|
+
```typescript
|
|
234
|
+
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
|
235
|
+
import { streamObject } from 'ai';
|
|
236
|
+
import { z } from 'zod';
|
|
237
|
+
|
|
238
|
+
const componentSchema = z.object({
|
|
239
|
+
type: z.string(),
|
|
240
|
+
props: z.record(z.any()),
|
|
241
|
+
children: z.array(z.lazy(() => componentSchema)).optional(),
|
|
242
|
+
});
|
|
243
|
+
|
|
244
|
+
const provider = createOpenRouter({
|
|
245
|
+
apiKey: process.env.OPENROUTER_API_KEY,
|
|
246
|
+
headers: {
|
|
247
|
+
'anthropic-beta': 'fine-grained-tool-streaming-2025-05-14',
|
|
248
|
+
},
|
|
249
|
+
});
|
|
250
|
+
|
|
251
|
+
const model = provider.chat('anthropic/claude-sonnet-4');
|
|
252
|
+
|
|
253
|
+
const result = await streamObject({
|
|
254
|
+
model,
|
|
255
|
+
schema: componentSchema,
|
|
256
|
+
prompt: 'Create a responsive dashboard layout',
|
|
257
|
+
});
|
|
258
|
+
|
|
259
|
+
for await (const partialComponent of result.partialObjectStream) {
|
|
260
|
+
console.log('Partial component:', partialComponent);
|
|
261
|
+
}
|
|
262
|
+
```
|
|
263
|
+
|
|
264
|
+
|
|
170
265
|
|
|
171
266
|
## Use Cases
|
|
172
267
|
|
|
268
|
+
### Debugging API Requests
|
|
269
|
+
|
|
270
|
+
The provider supports a debug mode that echoes back the request body sent to the upstream provider. This is useful for troubleshooting and understanding how your requests are being processed. Note that debug mode only works with streaming requests.
|
|
271
|
+
|
|
272
|
+
```typescript
|
|
273
|
+
import { createOpenRouter } from '@openrouter/ai-sdk-provider';
|
|
274
|
+
import { streamText } from 'ai';
|
|
275
|
+
|
|
276
|
+
const openrouter = createOpenRouter({ apiKey: 'your-api-key' });
|
|
277
|
+
const model = openrouter('anthropic/claude-3.5-sonnet', {
|
|
278
|
+
debug: {
|
|
279
|
+
echo_upstream_body: true,
|
|
280
|
+
},
|
|
281
|
+
});
|
|
282
|
+
|
|
283
|
+
const result = await streamText({
|
|
284
|
+
model,
|
|
285
|
+
prompt: 'Hello, how are you?',
|
|
286
|
+
});
|
|
287
|
+
|
|
288
|
+
// The debug data is available in the stream's first chunk
|
|
289
|
+
// and in the final response's providerMetadata
|
|
290
|
+
for await (const chunk of result.fullStream) {
|
|
291
|
+
// Debug chunks have empty choices and contain debug.echo_upstream_body
|
|
292
|
+
console.log(chunk);
|
|
293
|
+
}
|
|
294
|
+
```
|
|
295
|
+
|
|
296
|
+
The debug response will include the request body that was sent to the upstream provider, with sensitive data redacted (user IDs, base64 content, etc.). This helps you understand how OpenRouter transforms your request before sending it to the model provider.
|
|
297
|
+
|
|
173
298
|
### Usage Accounting
|
|
174
299
|
|
|
175
300
|
The provider supports [OpenRouter usage accounting](https://openrouter.ai/docs/use-cases/usage-accounting), which allows you to track token usage details directly in your API responses, without making additional API calls.
|
|
@@ -197,3 +322,35 @@ if (result.providerMetadata?.openrouter?.usage) {
|
|
|
197
322
|
);
|
|
198
323
|
}
|
|
199
324
|
```
|
|
325
|
+
|
|
326
|
+
It also supports BYOK (Bring Your Own Key) [usage accounting](https://openrouter.ai/docs/docs/guides/usage-accounting#cost-breakdown), which allows you to track passthrough costs when you are using a provider's own API key in your OpenRouter account.
|
|
327
|
+
|
|
328
|
+
```typescript
|
|
329
|
+
// Assuming you have set an OpenAI API key in https://openrouter.ai/settings/integrations
|
|
330
|
+
|
|
331
|
+
// Enable usage accounting
|
|
332
|
+
const model = openrouter('openai/gpt-3.5-turbo', {
|
|
333
|
+
usage: {
|
|
334
|
+
include: true,
|
|
335
|
+
},
|
|
336
|
+
});
|
|
337
|
+
|
|
338
|
+
// Access usage accounting data
|
|
339
|
+
const result = await generateText({
|
|
340
|
+
model,
|
|
341
|
+
prompt: 'Hello, how are you today?',
|
|
342
|
+
});
|
|
343
|
+
|
|
344
|
+
// Provider-specific BYOK usage details (available in providerMetadata)
|
|
345
|
+
if (result.providerMetadata?.openrouter?.usage) {
|
|
346
|
+
const costDetails = result.providerMetadata.openrouter.usage.costDetails;
|
|
347
|
+
if (costDetails) {
|
|
348
|
+
console.log('BYOK cost:', costDetails.upstreamInferenceCost);
|
|
349
|
+
}
|
|
350
|
+
console.log('OpenRouter credits cost:', result.providerMetadata.openrouter.usage.cost);
|
|
351
|
+
console.log(
|
|
352
|
+
'Total Tokens:',
|
|
353
|
+
result.providerMetadata.openrouter.usage.totalTokens,
|
|
354
|
+
);
|
|
355
|
+
}
|
|
356
|
+
```
|
package/dist/index.d.mts
CHANGED
|
@@ -71,6 +71,19 @@ type OpenRouterChatSettings = {
|
|
|
71
71
|
*/
|
|
72
72
|
search_prompt?: string;
|
|
73
73
|
};
|
|
74
|
+
/**
|
|
75
|
+
* Debug options for troubleshooting API requests.
|
|
76
|
+
* Only works with streaming requests.
|
|
77
|
+
* @see https://openrouter.ai/docs/api-reference/debugging
|
|
78
|
+
*/
|
|
79
|
+
debug?: {
|
|
80
|
+
/**
|
|
81
|
+
* When true, echoes back the request body that was sent to the upstream provider.
|
|
82
|
+
* The debug data will be returned as the first chunk in the stream with a `debug.echo_upstream_body` field.
|
|
83
|
+
* Sensitive data like user IDs and base64 content will be redacted.
|
|
84
|
+
*/
|
|
85
|
+
echo_upstream_body?: boolean;
|
|
86
|
+
};
|
|
74
87
|
/**
|
|
75
88
|
* Provider routing preferences to control request routing behavior
|
|
76
89
|
*/
|
|
@@ -106,7 +119,7 @@ type OpenRouterChatSettings = {
|
|
|
106
119
|
/**
|
|
107
120
|
* Sort providers by price, throughput, or latency
|
|
108
121
|
*/
|
|
109
|
-
sort?: models.
|
|
122
|
+
sort?: models.ProviderSort;
|
|
110
123
|
/**
|
|
111
124
|
* Maximum pricing you want to pay for this request
|
|
112
125
|
*/
|
|
@@ -173,7 +186,7 @@ type OpenRouterUsageAccounting = {
|
|
|
173
186
|
};
|
|
174
187
|
totalTokens: number;
|
|
175
188
|
cost?: number;
|
|
176
|
-
costDetails
|
|
189
|
+
costDetails?: {
|
|
177
190
|
upstreamInferenceCost: number;
|
|
178
191
|
};
|
|
179
192
|
};
|
|
@@ -264,6 +277,7 @@ declare class OpenRouterChatLanguageModel implements LanguageModelV2 {
|
|
|
264
277
|
readonly provider = "openrouter";
|
|
265
278
|
readonly defaultObjectGenerationMode: "tool";
|
|
266
279
|
readonly modelId: OpenRouterChatModelId;
|
|
280
|
+
readonly supportsImageUrls = true;
|
|
267
281
|
readonly supportedUrls: Record<string, RegExp[]>;
|
|
268
282
|
readonly settings: OpenRouterChatSettings;
|
|
269
283
|
private readonly config;
|
|
@@ -317,6 +331,7 @@ declare class OpenRouterCompletionLanguageModel implements LanguageModelV2 {
|
|
|
317
331
|
readonly specificationVersion: "v2";
|
|
318
332
|
readonly provider = "openrouter";
|
|
319
333
|
readonly modelId: OpenRouterCompletionModelId;
|
|
334
|
+
readonly supportsImageUrls = true;
|
|
320
335
|
readonly supportedUrls: Record<string, RegExp[]>;
|
|
321
336
|
readonly defaultObjectGenerationMode: undefined;
|
|
322
337
|
readonly settings: OpenRouterCompletionSettings;
|
package/dist/index.d.ts
CHANGED
|
@@ -71,6 +71,19 @@ type OpenRouterChatSettings = {
|
|
|
71
71
|
*/
|
|
72
72
|
search_prompt?: string;
|
|
73
73
|
};
|
|
74
|
+
/**
|
|
75
|
+
* Debug options for troubleshooting API requests.
|
|
76
|
+
* Only works with streaming requests.
|
|
77
|
+
* @see https://openrouter.ai/docs/api-reference/debugging
|
|
78
|
+
*/
|
|
79
|
+
debug?: {
|
|
80
|
+
/**
|
|
81
|
+
* When true, echoes back the request body that was sent to the upstream provider.
|
|
82
|
+
* The debug data will be returned as the first chunk in the stream with a `debug.echo_upstream_body` field.
|
|
83
|
+
* Sensitive data like user IDs and base64 content will be redacted.
|
|
84
|
+
*/
|
|
85
|
+
echo_upstream_body?: boolean;
|
|
86
|
+
};
|
|
74
87
|
/**
|
|
75
88
|
* Provider routing preferences to control request routing behavior
|
|
76
89
|
*/
|
|
@@ -106,7 +119,7 @@ type OpenRouterChatSettings = {
|
|
|
106
119
|
/**
|
|
107
120
|
* Sort providers by price, throughput, or latency
|
|
108
121
|
*/
|
|
109
|
-
sort?: models.
|
|
122
|
+
sort?: models.ProviderSort;
|
|
110
123
|
/**
|
|
111
124
|
* Maximum pricing you want to pay for this request
|
|
112
125
|
*/
|
|
@@ -173,7 +186,7 @@ type OpenRouterUsageAccounting = {
|
|
|
173
186
|
};
|
|
174
187
|
totalTokens: number;
|
|
175
188
|
cost?: number;
|
|
176
|
-
costDetails
|
|
189
|
+
costDetails?: {
|
|
177
190
|
upstreamInferenceCost: number;
|
|
178
191
|
};
|
|
179
192
|
};
|
|
@@ -264,6 +277,7 @@ declare class OpenRouterChatLanguageModel implements LanguageModelV2 {
|
|
|
264
277
|
readonly provider = "openrouter";
|
|
265
278
|
readonly defaultObjectGenerationMode: "tool";
|
|
266
279
|
readonly modelId: OpenRouterChatModelId;
|
|
280
|
+
readonly supportsImageUrls = true;
|
|
267
281
|
readonly supportedUrls: Record<string, RegExp[]>;
|
|
268
282
|
readonly settings: OpenRouterChatSettings;
|
|
269
283
|
private readonly config;
|
|
@@ -317,6 +331,7 @@ declare class OpenRouterCompletionLanguageModel implements LanguageModelV2 {
|
|
|
317
331
|
readonly specificationVersion: "v2";
|
|
318
332
|
readonly provider = "openrouter";
|
|
319
333
|
readonly modelId: OpenRouterCompletionModelId;
|
|
334
|
+
readonly supportsImageUrls = true;
|
|
320
335
|
readonly supportedUrls: Record<string, RegExp[]>;
|
|
321
336
|
readonly defaultObjectGenerationMode: undefined;
|
|
322
337
|
readonly settings: OpenRouterCompletionSettings;
|
package/dist/index.js
CHANGED
|
@@ -1014,9 +1014,23 @@ var openrouterFailedResponseHandler = createJsonErrorResponseHandler({
|
|
|
1014
1014
|
|
|
1015
1015
|
// src/schemas/provider-metadata.ts
|
|
1016
1016
|
var import_v43 = require("zod/v4");
|
|
1017
|
+
var FileAnnotationSchema = import_v43.z.object({
|
|
1018
|
+
type: import_v43.z.literal("file"),
|
|
1019
|
+
file: import_v43.z.object({
|
|
1020
|
+
hash: import_v43.z.string(),
|
|
1021
|
+
name: import_v43.z.string(),
|
|
1022
|
+
content: import_v43.z.array(
|
|
1023
|
+
import_v43.z.object({
|
|
1024
|
+
type: import_v43.z.string(),
|
|
1025
|
+
text: import_v43.z.string().optional()
|
|
1026
|
+
}).passthrough()
|
|
1027
|
+
).optional()
|
|
1028
|
+
}).passthrough()
|
|
1029
|
+
});
|
|
1017
1030
|
var OpenRouterProviderMetadataSchema = import_v43.z.object({
|
|
1018
1031
|
provider: import_v43.z.string(),
|
|
1019
1032
|
reasoning_details: import_v43.z.array(ReasoningDetailUnionSchema).optional(),
|
|
1033
|
+
annotations: import_v43.z.array(FileAnnotationSchema).optional(),
|
|
1020
1034
|
usage: import_v43.z.object({
|
|
1021
1035
|
promptTokens: import_v43.z.number(),
|
|
1022
1036
|
promptTokensDetails: import_v43.z.object({
|
|
@@ -1030,12 +1044,13 @@ var OpenRouterProviderMetadataSchema = import_v43.z.object({
|
|
|
1030
1044
|
cost: import_v43.z.number().optional(),
|
|
1031
1045
|
costDetails: import_v43.z.object({
|
|
1032
1046
|
upstreamInferenceCost: import_v43.z.number()
|
|
1033
|
-
}).passthrough()
|
|
1047
|
+
}).passthrough().optional()
|
|
1034
1048
|
}).passthrough()
|
|
1035
1049
|
}).passthrough();
|
|
1036
1050
|
var OpenRouterProviderOptionsSchema = import_v43.z.object({
|
|
1037
1051
|
openrouter: import_v43.z.object({
|
|
1038
|
-
reasoning_details: import_v43.z.array(ReasoningDetailUnionSchema).optional()
|
|
1052
|
+
reasoning_details: import_v43.z.array(ReasoningDetailUnionSchema).optional(),
|
|
1053
|
+
annotations: import_v43.z.array(FileAnnotationSchema).optional()
|
|
1039
1054
|
}).optional()
|
|
1040
1055
|
}).optional();
|
|
1041
1056
|
|
|
@@ -1149,9 +1164,8 @@ function getCacheControl(providerMetadata) {
|
|
|
1149
1164
|
return (_c = (_b = (_a15 = openrouter2 == null ? void 0 : openrouter2.cacheControl) != null ? _a15 : openrouter2 == null ? void 0 : openrouter2.cache_control) != null ? _b : anthropic == null ? void 0 : anthropic.cacheControl) != null ? _c : anthropic == null ? void 0 : anthropic.cache_control;
|
|
1150
1165
|
}
|
|
1151
1166
|
function convertToOpenRouterChatMessages(prompt) {
|
|
1152
|
-
var _a15, _b, _c, _d, _e, _f;
|
|
1167
|
+
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j;
|
|
1153
1168
|
const messages = [];
|
|
1154
|
-
const accumulatedReasoningDetails = [];
|
|
1155
1169
|
for (const { role, content, providerOptions } of prompt) {
|
|
1156
1170
|
switch (role) {
|
|
1157
1171
|
case "system": {
|
|
@@ -1181,7 +1195,7 @@ function convertToOpenRouterChatMessages(prompt) {
|
|
|
1181
1195
|
const messageCacheControl = getCacheControl(providerOptions);
|
|
1182
1196
|
const contentParts = content.map(
|
|
1183
1197
|
(part) => {
|
|
1184
|
-
var _a16, _b2, _c2, _d2, _e2, _f2,
|
|
1198
|
+
var _a16, _b2, _c2, _d2, _e2, _f2, _g2;
|
|
1185
1199
|
const cacheControl = (_a16 = getCacheControl(part.providerOptions)) != null ? _a16 : messageCacheControl;
|
|
1186
1200
|
switch (part.type) {
|
|
1187
1201
|
case "text":
|
|
@@ -1214,7 +1228,7 @@ function convertToOpenRouterChatMessages(prompt) {
|
|
|
1214
1228
|
};
|
|
1215
1229
|
}
|
|
1216
1230
|
const fileName = String(
|
|
1217
|
-
(
|
|
1231
|
+
(_g2 = (_f2 = (_e2 = (_d2 = part.providerOptions) == null ? void 0 : _d2.openrouter) == null ? void 0 : _e2.filename) != null ? _f2 : part.filename) != null ? _g2 : ""
|
|
1218
1232
|
);
|
|
1219
1233
|
const fileData = getFileUrl({
|
|
1220
1234
|
part,
|
|
@@ -1261,6 +1275,7 @@ function convertToOpenRouterChatMessages(prompt) {
|
|
|
1261
1275
|
let text = "";
|
|
1262
1276
|
let reasoning = "";
|
|
1263
1277
|
const toolCalls = [];
|
|
1278
|
+
const accumulatedReasoningDetails = [];
|
|
1264
1279
|
for (const part of content) {
|
|
1265
1280
|
switch (part.type) {
|
|
1266
1281
|
case "text": {
|
|
@@ -1286,6 +1301,12 @@ function convertToOpenRouterChatMessages(prompt) {
|
|
|
1286
1301
|
}
|
|
1287
1302
|
case "reasoning": {
|
|
1288
1303
|
reasoning += part.text;
|
|
1304
|
+
const parsedPartProviderOptions = OpenRouterProviderOptionsSchema.safeParse(part.providerOptions);
|
|
1305
|
+
if (parsedPartProviderOptions.success && ((_e = (_d = parsedPartProviderOptions.data) == null ? void 0 : _d.openrouter) == null ? void 0 : _e.reasoning_details)) {
|
|
1306
|
+
accumulatedReasoningDetails.push(
|
|
1307
|
+
...parsedPartProviderOptions.data.openrouter.reasoning_details
|
|
1308
|
+
);
|
|
1309
|
+
}
|
|
1289
1310
|
break;
|
|
1290
1311
|
}
|
|
1291
1312
|
case "file":
|
|
@@ -1296,7 +1317,8 @@ function convertToOpenRouterChatMessages(prompt) {
|
|
|
1296
1317
|
}
|
|
1297
1318
|
}
|
|
1298
1319
|
const parsedProviderOptions = OpenRouterProviderOptionsSchema.safeParse(providerOptions);
|
|
1299
|
-
const messageReasoningDetails = parsedProviderOptions.success ? (
|
|
1320
|
+
const messageReasoningDetails = parsedProviderOptions.success ? (_g = (_f = parsedProviderOptions.data) == null ? void 0 : _f.openrouter) == null ? void 0 : _g.reasoning_details : void 0;
|
|
1321
|
+
const messageAnnotations = parsedProviderOptions.success ? (_i = (_h = parsedProviderOptions.data) == null ? void 0 : _h.openrouter) == null ? void 0 : _i.annotations : void 0;
|
|
1300
1322
|
const finalReasoningDetails = messageReasoningDetails && Array.isArray(messageReasoningDetails) && messageReasoningDetails.length > 0 ? messageReasoningDetails : accumulatedReasoningDetails.length > 0 ? accumulatedReasoningDetails : void 0;
|
|
1301
1323
|
messages.push({
|
|
1302
1324
|
role: "assistant",
|
|
@@ -1304,6 +1326,7 @@ function convertToOpenRouterChatMessages(prompt) {
|
|
|
1304
1326
|
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
|
|
1305
1327
|
reasoning: reasoning || void 0,
|
|
1306
1328
|
reasoning_details: finalReasoningDetails,
|
|
1329
|
+
annotations: messageAnnotations,
|
|
1307
1330
|
cache_control: getCacheControl(providerOptions)
|
|
1308
1331
|
});
|
|
1309
1332
|
break;
|
|
@@ -1315,7 +1338,7 @@ function convertToOpenRouterChatMessages(prompt) {
|
|
|
1315
1338
|
role: "tool",
|
|
1316
1339
|
tool_call_id: toolResponse.toolCallId,
|
|
1317
1340
|
content: content2,
|
|
1318
|
-
cache_control: (
|
|
1341
|
+
cache_control: (_j = getCacheControl(providerOptions)) != null ? _j : getCacheControl(toolResponse.providerOptions)
|
|
1319
1342
|
});
|
|
1320
1343
|
}
|
|
1321
1344
|
break;
|
|
@@ -1574,6 +1597,7 @@ var OpenRouterChatLanguageModel = class {
|
|
|
1574
1597
|
this.specificationVersion = "v2";
|
|
1575
1598
|
this.provider = "openrouter";
|
|
1576
1599
|
this.defaultObjectGenerationMode = "tool";
|
|
1600
|
+
this.supportsImageUrls = true;
|
|
1577
1601
|
this.supportedUrls = {
|
|
1578
1602
|
"image/*": [
|
|
1579
1603
|
/^data:image\/[a-zA-Z]+;base64,/,
|
|
@@ -1631,7 +1655,9 @@ var OpenRouterChatLanguageModel = class {
|
|
|
1631
1655
|
plugins: this.settings.plugins,
|
|
1632
1656
|
web_search_options: this.settings.web_search_options,
|
|
1633
1657
|
// Provider routing settings:
|
|
1634
|
-
provider: this.settings.provider
|
|
1658
|
+
provider: this.settings.provider,
|
|
1659
|
+
// Debug settings:
|
|
1660
|
+
debug: this.settings.debug
|
|
1635
1661
|
}, this.config.extraBody), this.settings.extraBody);
|
|
1636
1662
|
if ((responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null) {
|
|
1637
1663
|
return __spreadProps(__spreadValues({}, baseArgs), {
|
|
@@ -1666,7 +1692,7 @@ var OpenRouterChatLanguageModel = class {
|
|
|
1666
1692
|
return baseArgs;
|
|
1667
1693
|
}
|
|
1668
1694
|
async doGenerate(options) {
|
|
1669
|
-
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w
|
|
1695
|
+
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
|
|
1670
1696
|
const providerOptions = options.providerOptions || {};
|
|
1671
1697
|
const openrouterOptions = providerOptions.openrouter || {};
|
|
1672
1698
|
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
|
|
@@ -1824,6 +1850,9 @@ var OpenRouterChatLanguageModel = class {
|
|
|
1824
1850
|
}
|
|
1825
1851
|
}
|
|
1826
1852
|
}
|
|
1853
|
+
const fileAnnotations = (_k = choice.message.annotations) == null ? void 0 : _k.filter(
|
|
1854
|
+
(a) => a.type === "file"
|
|
1855
|
+
);
|
|
1827
1856
|
return {
|
|
1828
1857
|
content,
|
|
1829
1858
|
finishReason: mapOpenRouterFinishReason(choice.finish_reason),
|
|
@@ -1831,23 +1860,27 @@ var OpenRouterChatLanguageModel = class {
|
|
|
1831
1860
|
warnings: [],
|
|
1832
1861
|
providerMetadata: {
|
|
1833
1862
|
openrouter: OpenRouterProviderMetadataSchema.parse({
|
|
1834
|
-
provider: (
|
|
1835
|
-
reasoning_details: (
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
|
|
1863
|
+
provider: (_l = response.provider) != null ? _l : "",
|
|
1864
|
+
reasoning_details: (_m = choice.message.reasoning_details) != null ? _m : [],
|
|
1865
|
+
annotations: fileAnnotations && fileAnnotations.length > 0 ? fileAnnotations : void 0,
|
|
1866
|
+
usage: __spreadValues(__spreadValues(__spreadValues({
|
|
1867
|
+
promptTokens: (_n = usageInfo.inputTokens) != null ? _n : 0,
|
|
1868
|
+
completionTokens: (_o = usageInfo.outputTokens) != null ? _o : 0,
|
|
1869
|
+
totalTokens: (_p = usageInfo.totalTokens) != null ? _p : 0,
|
|
1870
|
+
cost: (_q = response.usage) == null ? void 0 : _q.cost
|
|
1871
|
+
}, ((_s = (_r = response.usage) == null ? void 0 : _r.prompt_tokens_details) == null ? void 0 : _s.cached_tokens) != null ? {
|
|
1841
1872
|
promptTokensDetails: {
|
|
1842
|
-
cachedTokens:
|
|
1843
|
-
}
|
|
1873
|
+
cachedTokens: response.usage.prompt_tokens_details.cached_tokens
|
|
1874
|
+
}
|
|
1875
|
+
} : {}), ((_u = (_t = response.usage) == null ? void 0 : _t.completion_tokens_details) == null ? void 0 : _u.reasoning_tokens) != null ? {
|
|
1844
1876
|
completionTokensDetails: {
|
|
1845
|
-
reasoningTokens:
|
|
1846
|
-
}
|
|
1877
|
+
reasoningTokens: response.usage.completion_tokens_details.reasoning_tokens
|
|
1878
|
+
}
|
|
1879
|
+
} : {}), ((_w = (_v = response.usage) == null ? void 0 : _v.cost_details) == null ? void 0 : _w.upstream_inference_cost) != null ? {
|
|
1847
1880
|
costDetails: {
|
|
1848
|
-
upstreamInferenceCost:
|
|
1881
|
+
upstreamInferenceCost: response.usage.cost_details.upstream_inference_cost
|
|
1849
1882
|
}
|
|
1850
|
-
}
|
|
1883
|
+
} : {})
|
|
1851
1884
|
})
|
|
1852
1885
|
},
|
|
1853
1886
|
request: { body: args },
|
|
@@ -1904,7 +1937,7 @@ var OpenRouterChatLanguageModel = class {
|
|
|
1904
1937
|
stream: response.pipeThrough(
|
|
1905
1938
|
new TransformStream({
|
|
1906
1939
|
transform(chunk, controller) {
|
|
1907
|
-
var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
|
|
1940
|
+
var _a16, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
|
|
1908
1941
|
if (!chunk.success) {
|
|
1909
1942
|
finishReason = "error";
|
|
1910
1943
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -1954,6 +1987,12 @@ var OpenRouterChatLanguageModel = class {
|
|
|
1954
1987
|
}
|
|
1955
1988
|
openrouterUsage.cost = value.usage.cost;
|
|
1956
1989
|
openrouterUsage.totalTokens = value.usage.total_tokens;
|
|
1990
|
+
const upstreamInferenceCost = (_c = value.usage.cost_details) == null ? void 0 : _c.upstream_inference_cost;
|
|
1991
|
+
if (upstreamInferenceCost != null) {
|
|
1992
|
+
openrouterUsage.costDetails = {
|
|
1993
|
+
upstreamInferenceCost
|
|
1994
|
+
};
|
|
1995
|
+
}
|
|
1957
1996
|
}
|
|
1958
1997
|
const choice = value.choices[0];
|
|
1959
1998
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -1963,16 +2002,18 @@ var OpenRouterChatLanguageModel = class {
|
|
|
1963
2002
|
return;
|
|
1964
2003
|
}
|
|
1965
2004
|
const delta = choice.delta;
|
|
1966
|
-
const emitReasoningChunk = (chunkText) => {
|
|
2005
|
+
const emitReasoningChunk = (chunkText, providerMetadata) => {
|
|
1967
2006
|
if (!reasoningStarted) {
|
|
1968
2007
|
reasoningId = openrouterResponseId || generateId();
|
|
1969
2008
|
controller.enqueue({
|
|
2009
|
+
providerMetadata,
|
|
1970
2010
|
type: "reasoning-start",
|
|
1971
2011
|
id: reasoningId
|
|
1972
2012
|
});
|
|
1973
2013
|
reasoningStarted = true;
|
|
1974
2014
|
}
|
|
1975
2015
|
controller.enqueue({
|
|
2016
|
+
providerMetadata,
|
|
1976
2017
|
type: "reasoning-delta",
|
|
1977
2018
|
delta: chunkText,
|
|
1978
2019
|
id: reasoningId || generateId()
|
|
@@ -1993,23 +2034,28 @@ var OpenRouterChatLanguageModel = class {
|
|
|
1993
2034
|
accumulatedReasoningDetails.push(detail);
|
|
1994
2035
|
}
|
|
1995
2036
|
}
|
|
2037
|
+
const reasoningMetadata = {
|
|
2038
|
+
openrouter: {
|
|
2039
|
+
reasoning_details: delta.reasoning_details
|
|
2040
|
+
}
|
|
2041
|
+
};
|
|
1996
2042
|
for (const detail of delta.reasoning_details) {
|
|
1997
2043
|
switch (detail.type) {
|
|
1998
2044
|
case "reasoning.text" /* Text */: {
|
|
1999
2045
|
if (detail.text) {
|
|
2000
|
-
emitReasoningChunk(detail.text);
|
|
2046
|
+
emitReasoningChunk(detail.text, reasoningMetadata);
|
|
2001
2047
|
}
|
|
2002
2048
|
break;
|
|
2003
2049
|
}
|
|
2004
2050
|
case "reasoning.encrypted" /* Encrypted */: {
|
|
2005
2051
|
if (detail.data) {
|
|
2006
|
-
emitReasoningChunk("[REDACTED]");
|
|
2052
|
+
emitReasoningChunk("[REDACTED]", reasoningMetadata);
|
|
2007
2053
|
}
|
|
2008
2054
|
break;
|
|
2009
2055
|
}
|
|
2010
2056
|
case "reasoning.summary" /* Summary */: {
|
|
2011
2057
|
if (detail.summary) {
|
|
2012
|
-
emitReasoningChunk(detail.summary);
|
|
2058
|
+
emitReasoningChunk(detail.summary, reasoningMetadata);
|
|
2013
2059
|
}
|
|
2014
2060
|
break;
|
|
2015
2061
|
}
|
|
@@ -2064,7 +2110,7 @@ var OpenRouterChatLanguageModel = class {
|
|
|
2064
2110
|
}
|
|
2065
2111
|
if (delta.tool_calls != null) {
|
|
2066
2112
|
for (const toolCallDelta of delta.tool_calls) {
|
|
2067
|
-
const index = (
|
|
2113
|
+
const index = (_d = toolCallDelta.index) != null ? _d : toolCalls.length - 1;
|
|
2068
2114
|
if (toolCalls[index] == null) {
|
|
2069
2115
|
if (toolCallDelta.type !== "function") {
|
|
2070
2116
|
throw new InvalidResponseDataError({
|
|
@@ -2078,7 +2124,7 @@ var OpenRouterChatLanguageModel = class {
|
|
|
2078
2124
|
message: `Expected 'id' to be a string.`
|
|
2079
2125
|
});
|
|
2080
2126
|
}
|
|
2081
|
-
if (((
|
|
2127
|
+
if (((_e = toolCallDelta.function) == null ? void 0 : _e.name) == null) {
|
|
2082
2128
|
throw new InvalidResponseDataError({
|
|
2083
2129
|
data: toolCallDelta,
|
|
2084
2130
|
message: `Expected 'function.name' to be a string.`
|
|
@@ -2089,7 +2135,7 @@ var OpenRouterChatLanguageModel = class {
|
|
|
2089
2135
|
type: "function",
|
|
2090
2136
|
function: {
|
|
2091
2137
|
name: toolCallDelta.function.name,
|
|
2092
|
-
arguments: (
|
|
2138
|
+
arguments: (_f = toolCallDelta.function.arguments) != null ? _f : ""
|
|
2093
2139
|
},
|
|
2094
2140
|
inputStarted: false,
|
|
2095
2141
|
sent: false
|
|
@@ -2101,7 +2147,7 @@ var OpenRouterChatLanguageModel = class {
|
|
|
2101
2147
|
message: `Tool call at index ${index} is missing after creation.`
|
|
2102
2148
|
});
|
|
2103
2149
|
}
|
|
2104
|
-
if (((
|
|
2150
|
+
if (((_g = toolCall2.function) == null ? void 0 : _g.name) != null && ((_h = toolCall2.function) == null ? void 0 : _h.arguments) != null && isParsableJson(toolCall2.function.arguments)) {
|
|
2105
2151
|
toolCall2.inputStarted = true;
|
|
2106
2152
|
controller.enqueue({
|
|
2107
2153
|
type: "tool-input-start",
|
|
@@ -2151,18 +2197,18 @@ var OpenRouterChatLanguageModel = class {
|
|
|
2151
2197
|
toolName: toolCall.function.name
|
|
2152
2198
|
});
|
|
2153
2199
|
}
|
|
2154
|
-
if (((
|
|
2155
|
-
toolCall.function.arguments += (
|
|
2200
|
+
if (((_i = toolCallDelta.function) == null ? void 0 : _i.arguments) != null) {
|
|
2201
|
+
toolCall.function.arguments += (_k = (_j = toolCallDelta.function) == null ? void 0 : _j.arguments) != null ? _k : "";
|
|
2156
2202
|
}
|
|
2157
2203
|
controller.enqueue({
|
|
2158
2204
|
type: "tool-input-delta",
|
|
2159
2205
|
id: toolCall.id,
|
|
2160
|
-
delta: (
|
|
2206
|
+
delta: (_l = toolCallDelta.function.arguments) != null ? _l : ""
|
|
2161
2207
|
});
|
|
2162
|
-
if (((
|
|
2208
|
+
if (((_m = toolCall.function) == null ? void 0 : _m.name) != null && ((_n = toolCall.function) == null ? void 0 : _n.arguments) != null && isParsableJson(toolCall.function.arguments)) {
|
|
2163
2209
|
controller.enqueue({
|
|
2164
2210
|
type: "tool-call",
|
|
2165
|
-
toolCallId: (
|
|
2211
|
+
toolCallId: (_o = toolCall.id) != null ? _o : generateId(),
|
|
2166
2212
|
toolName: toolCall.function.name,
|
|
2167
2213
|
input: toolCall.function.arguments,
|
|
2168
2214
|
providerMetadata: {
|
|
@@ -2378,7 +2424,10 @@ var OpenRouterCompletionChunkSchema = import_v47.z.union([
|
|
|
2378
2424
|
reasoning_tokens: import_v47.z.number()
|
|
2379
2425
|
}).passthrough().nullish(),
|
|
2380
2426
|
total_tokens: import_v47.z.number(),
|
|
2381
|
-
cost: import_v47.z.number().optional()
|
|
2427
|
+
cost: import_v47.z.number().optional(),
|
|
2428
|
+
cost_details: import_v47.z.object({
|
|
2429
|
+
upstream_inference_cost: import_v47.z.number().nullish()
|
|
2430
|
+
}).passthrough().nullish()
|
|
2382
2431
|
}).passthrough().nullish()
|
|
2383
2432
|
}).passthrough(),
|
|
2384
2433
|
OpenRouterErrorResponseSchema
|
|
@@ -2389,6 +2438,7 @@ var OpenRouterCompletionLanguageModel = class {
|
|
|
2389
2438
|
constructor(modelId, settings, config) {
|
|
2390
2439
|
this.specificationVersion = "v2";
|
|
2391
2440
|
this.provider = "openrouter";
|
|
2441
|
+
this.supportsImageUrls = true;
|
|
2392
2442
|
this.supportedUrls = {
|
|
2393
2443
|
"image/*": [
|
|
2394
2444
|
/^data:image\/[a-zA-Z]+;base64,/,
|
|
@@ -2551,7 +2601,7 @@ var OpenRouterCompletionLanguageModel = class {
|
|
|
2551
2601
|
stream: response.pipeThrough(
|
|
2552
2602
|
new TransformStream({
|
|
2553
2603
|
transform(chunk, controller) {
|
|
2554
|
-
var _a15, _b;
|
|
2604
|
+
var _a15, _b, _c;
|
|
2555
2605
|
if (!chunk.success) {
|
|
2556
2606
|
finishReason = "error";
|
|
2557
2607
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
@@ -2585,6 +2635,12 @@ var OpenRouterCompletionLanguageModel = class {
|
|
|
2585
2635
|
}
|
|
2586
2636
|
openrouterUsage.cost = value.usage.cost;
|
|
2587
2637
|
openrouterUsage.totalTokens = value.usage.total_tokens;
|
|
2638
|
+
const upstreamInferenceCost = (_c = value.usage.cost_details) == null ? void 0 : _c.upstream_inference_cost;
|
|
2639
|
+
if (upstreamInferenceCost != null) {
|
|
2640
|
+
openrouterUsage.costDetails = {
|
|
2641
|
+
upstreamInferenceCost
|
|
2642
|
+
};
|
|
2643
|
+
}
|
|
2588
2644
|
}
|
|
2589
2645
|
const choice = value.choices[0];
|
|
2590
2646
|
if ((choice == null ? void 0 : choice.finish_reason) != null) {
|
|
@@ -2680,7 +2736,7 @@ function withUserAgentSuffix(headers, ...userAgentSuffixParts) {
|
|
|
2680
2736
|
}
|
|
2681
2737
|
|
|
2682
2738
|
// src/version.ts
|
|
2683
|
-
var VERSION = false ? "0.0.0-test" : "1.
|
|
2739
|
+
var VERSION = false ? "0.0.0-test" : "1.4.0";
|
|
2684
2740
|
|
|
2685
2741
|
// src/provider.ts
|
|
2686
2742
|
function createOpenRouter(options = {}) {
|