@openrouter/ai-sdk-provider 1.5.3 → 6.0.0-alpha.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.mts CHANGED
@@ -1,634 +1,390 @@
1
- import { LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2Content, LanguageModelV2FinishReason, LanguageModelV2Usage, LanguageModelV2CallWarning, LanguageModelV2ResponseMetadata, SharedV2Headers, LanguageModelV2StreamPart, EmbeddingModelV2, SharedV2ProviderMetadata, ProviderV2 } from '@ai-sdk/provider';
2
- export { LanguageModelV2, LanguageModelV2Prompt } from '@ai-sdk/provider';
3
- import * as models from '@openrouter/sdk/models';
4
- import { z } from 'zod/v4';
5
- import { EncodeOptions, DecodeOptions, JsonValue } from '@toon-format/toon';
6
- export { DecodeOptions, EncodeOptions, JsonValue } from '@toon-format/toon';
1
+ import { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3GenerateResult, LanguageModelV3StreamResult, EmbeddingModelV3, EmbeddingModelV3CallOptions, EmbeddingModelV3Result, ImageModelV3, ImageModelV3CallOptions, SharedV3Warning, ProviderV3 } from '@ai-sdk/provider';
7
2
 
8
- type OpenRouterChatModelId = string;
9
- type OpenRouterChatSettings = {
10
- /**
11
- Modify the likelihood of specified tokens appearing in the completion.
12
-
13
- Accepts a JSON object that maps tokens (specified by their token ID in
14
- the GPT tokenizer) to an associated bias value from -100 to 100. You
15
- can use this tokenizer tool to convert text to token IDs. Mathematically,
16
- the bias is added to the logits generated by the model prior to sampling.
17
- The exact effect will vary per model, but values between -1 and 1 should
18
- decrease or increase likelihood of selection; values like -100 or 100
19
- should result in a ban or exclusive selection of the relevant token.
20
-
21
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
22
- token from being generated.
23
- */
24
- logitBias?: Record<number, number>;
25
- /**
26
- Return the log probabilities of the tokens. Including logprobs will increase
27
- the response size and can slow down response times. However, it can
28
- be useful to understand better how the model is behaving.
29
-
30
- Setting to true will return the log probabilities of the tokens that
31
- were generated.
32
-
33
- Setting to a number will return the log probabilities of the top n
34
- tokens that were generated.
35
- */
36
- logprobs?: boolean | number;
37
- /**
38
- Whether to enable parallel function calling during tool use. Default to true.
39
- */
40
- parallelToolCalls?: boolean;
41
- /**
42
- A unique identifier representing your end-user, which can help OpenRouter to
43
- monitor and detect abuse. Learn more.
44
- */
45
- user?: string;
46
- /**
47
- * Plugin configurations for enabling various capabilities
48
- */
49
- plugins?: Array<{
50
- id: models.IdWeb;
51
- max_results?: number;
52
- search_prompt?: string;
53
- engine?: models.Engine;
54
- } | {
55
- id: models.IdFileParser;
56
- max_files?: number;
57
- pdf?: {
58
- engine?: models.PdfEngine;
59
- };
60
- } | {
61
- id: models.IdModeration;
62
- }>;
3
+ /**
4
+ * OpenRouter chat language model implementing AI SDK V3 LanguageModelV3 interface.
5
+ *
6
+ * Uses the OpenRouter Responses API for both streaming and non-streaming requests.
7
+ */
8
+ declare class OpenRouterChatLanguageModel implements LanguageModelV3 {
9
+ readonly specificationVersion: "v3";
10
+ readonly provider = "openrouter";
11
+ readonly modelId: string;
12
+ private readonly settings;
63
13
  /**
64
- * Built-in web search options for models that support native web search
14
+ * Supported URL patterns by media type.
15
+ * OpenRouter Chat API only supports image URLs natively.
16
+ * PDF URLs are not supported - use PDF data URIs or the Responses API instead.
65
17
  */
66
- web_search_options?: {
67
- /**
68
- * Maximum number of search results to include
69
- */
70
- max_results?: number;
71
- /**
72
- * Custom search prompt to guide the search query
73
- */
74
- search_prompt?: string;
75
- /**
76
- * Search engine to use for web search
77
- * - "native": Use provider's built-in web search
78
- * - "exa": Use Exa's search API
79
- * - undefined: Native if supported, otherwise Exa
80
- * @see https://openrouter.ai/docs/features/web-search
81
- */
82
- engine?: models.Engine;
83
- };
18
+ readonly supportedUrls: Record<string, RegExp[]>;
19
+ constructor(modelId: string, settings: OpenRouterModelSettings);
20
+ doGenerate(options: LanguageModelV3CallOptions): Promise<LanguageModelV3GenerateResult>;
21
+ doStream(options: LanguageModelV3CallOptions): Promise<LanguageModelV3StreamResult>;
22
+ }
23
+
24
+ /**
25
+ * OpenRouter embedding model implementing AI SDK V3 EmbeddingModelV3 interface.
26
+ */
27
+ declare class OpenRouterEmbeddingModel implements EmbeddingModelV3 {
28
+ readonly specificationVersion: "v3";
29
+ readonly provider = "openrouter";
30
+ readonly modelId: string;
31
+ private readonly settings;
84
32
  /**
85
- * Debug options for troubleshooting API requests.
86
- * Only works with streaming requests.
87
- * @see https://openrouter.ai/docs/api-reference/debugging
33
+ * Maximum number of embeddings that can be generated in a single API call.
34
+ * Set to 2048 as a reasonable default for most embedding models.
88
35
  */
89
- debug?: {
90
- /**
91
- * When true, echoes back the request body that was sent to the upstream provider.
92
- * The debug data will be returned as the first chunk in the stream with a `debug.echo_upstream_body` field.
93
- * Sensitive data like user IDs and base64 content will be redacted.
94
- */
95
- echo_upstream_body?: boolean;
96
- };
36
+ readonly maxEmbeddingsPerCall = 2048;
97
37
  /**
98
- * Provider routing preferences to control request routing behavior
38
+ * Whether the model supports parallel calls.
99
39
  */
100
- provider?: {
101
- /**
102
- * List of provider slugs to try in order (e.g. ["anthropic", "openai"])
103
- */
104
- order?: string[];
105
- /**
106
- * Whether to allow backup providers when primary is unavailable (default: true)
107
- */
108
- allow_fallbacks?: boolean;
109
- /**
110
- * Only use providers that support all parameters in your request (default: false)
111
- */
112
- require_parameters?: boolean;
113
- /**
114
- * Control whether to use providers that may store data
115
- */
116
- data_collection?: models.DataCollection;
117
- /**
118
- * List of provider slugs to allow for this request
119
- */
120
- only?: string[];
121
- /**
122
- * List of provider slugs to skip for this request
123
- */
124
- ignore?: string[];
125
- /**
126
- * List of quantization levels to filter by (e.g. ["int4", "int8"])
127
- */
128
- quantizations?: Array<models.Quantization>;
129
- /**
130
- * Sort providers by price, throughput, or latency
131
- */
132
- sort?: models.ProviderSort;
133
- /**
134
- * Maximum pricing you want to pay for this request
135
- */
136
- max_price?: {
137
- prompt?: number | string;
138
- completion?: number | string;
139
- image?: number | string;
140
- audio?: number | string;
141
- request?: number | string;
40
+ readonly supportsParallelCalls = true;
41
+ constructor(modelId: string, settings: OpenRouterModelSettings);
42
+ doEmbed(options: EmbeddingModelV3CallOptions): Promise<EmbeddingModelV3Result>;
43
+ }
44
+
45
+ /**
46
+ * OpenRouter image model implementing AI SDK V3 ImageModelV3 interface.
47
+ *
48
+ * Note: Image generation is Tier 3 functionality. The doGenerate method
49
+ * throws an error with guidance on tracking progress.
50
+ */
51
+ declare class OpenRouterImageModel implements ImageModelV3 {
52
+ readonly specificationVersion: "v3";
53
+ readonly provider = "openrouter";
54
+ readonly modelId: string;
55
+ /**
56
+ * Maximum number of images that can be generated in a single API call.
57
+ */
58
+ readonly maxImagesPerCall = 1;
59
+ constructor(modelId: string, _settings: unknown);
60
+ doGenerate(_options: ImageModelV3CallOptions): Promise<{
61
+ images: Array<string> | Array<Uint8Array>;
62
+ warnings: Array<SharedV3Warning>;
63
+ response: {
64
+ timestamp: Date;
65
+ modelId: string;
66
+ headers: Record<string, string> | undefined;
142
67
  };
143
- /**
144
- * Whether to restrict routing to only ZDR (Zero Data Retention) endpoints.
145
- * When true, only endpoints that do not retain prompts will be used.
146
- */
147
- zdr?: boolean;
148
- };
149
- } & OpenRouterSharedSettings;
68
+ }>;
69
+ }
150
70
 
151
- type OpenRouterEmbeddingModelId = string;
152
- type OpenRouterEmbeddingSettings = {
71
+ /**
72
+ * Settings for configuring an OpenRouter provider instance.
73
+ *
74
+ * @description
75
+ * Configuration options passed to `createOpenRouter()` to customize the provider behavior.
76
+ * All settings are optional - the provider will use sensible defaults and environment
77
+ * variables when settings are not explicitly provided.
78
+ *
79
+ * @example
80
+ * ```ts
81
+ * import { createOpenRouter } from '@openrouter/ai-sdk-provider';
82
+ *
83
+ * const openrouter = createOpenRouter({
84
+ * apiKey: process.env.OPENROUTER_API_KEY,
85
+ * baseURL: 'https://openrouter.ai/api/v1',
86
+ * headers: {
87
+ * 'X-Title': 'My App',
88
+ * 'HTTP-Referer': 'https://myapp.com',
89
+ * },
90
+ * });
91
+ * ```
92
+ */
93
+ interface OpenRouterProviderSettings {
153
94
  /**
154
- * A unique identifier representing your end-user, which can help OpenRouter to
155
- * monitor and detect abuse.
95
+ * API key for OpenRouter. If not provided, will use OPENROUTER_API_KEY env var.
156
96
  */
157
- user?: string;
97
+ apiKey?: string;
158
98
  /**
159
- * Provider routing preferences to control request routing behavior
99
+ * Base URL for the OpenRouter API.
100
+ * @default 'https://openrouter.ai/api/v1'
160
101
  */
161
- provider?: {
162
- /**
163
- * List of provider slugs to try in order (e.g. ["openai", "voyageai"])
164
- */
165
- order?: string[];
166
- /**
167
- * Whether to allow backup providers when primary is unavailable (default: true)
168
- */
169
- allow_fallbacks?: boolean;
170
- /**
171
- * Only use providers that support all parameters in your request (default: false)
172
- */
173
- require_parameters?: boolean;
174
- /**
175
- * Control whether to use providers that may store data
176
- */
177
- data_collection?: 'allow' | 'deny';
178
- /**
179
- * List of provider slugs to allow for this request
180
- */
181
- only?: string[];
182
- /**
183
- * List of provider slugs to skip for this request
184
- */
185
- ignore?: string[];
186
- /**
187
- * Sort providers by price, throughput, or latency
188
- */
189
- sort?: 'price' | 'throughput' | 'latency';
190
- /**
191
- * Maximum pricing you want to pay for this request
192
- */
193
- max_price?: {
194
- prompt?: number | string;
195
- completion?: number | string;
196
- image?: number | string;
197
- audio?: number | string;
198
- request?: number | string;
199
- };
200
- };
201
- } & OpenRouterSharedSettings;
202
-
203
- type OpenRouterProviderOptions = {
204
- models?: string[];
102
+ baseURL?: string;
205
103
  /**
206
- * https://openrouter.ai/docs/use-cases/reasoning-tokens
207
- * One of `max_tokens` or `effort` is required.
208
- * If `exclude` is true, reasoning will be removed from the response. Default is false.
104
+ * Base URL for the OpenRouter API (alias for baseURL).
105
+ * @default 'https://openrouter.ai/api/v1'
106
+ * @deprecated Use baseURL instead.
209
107
  */
210
- reasoning?: {
211
- enabled?: boolean;
212
- exclude?: boolean;
213
- } & ({
214
- max_tokens: number;
215
- } | {
216
- effort: 'high' | 'medium' | 'low';
217
- });
108
+ baseUrl?: string;
218
109
  /**
219
- * A unique identifier representing your end-user, which can
220
- * help OpenRouter to monitor and detect abuse.
110
+ * Custom headers to include in all requests.
221
111
  */
222
- user?: string;
223
- };
224
- type OpenRouterSharedSettings = OpenRouterProviderOptions & {
112
+ headers?: Record<string, string>;
225
113
  /**
226
- * @deprecated use `reasoning` instead
114
+ * Custom fetch implementation.
227
115
  */
228
- includeReasoning?: boolean;
229
- extraBody?: Record<string, unknown>;
116
+ fetch?: typeof globalThis.fetch;
230
117
  /**
231
- * Enable usage accounting to get detailed token usage information.
232
- * https://openrouter.ai/docs/use-cases/usage-accounting
118
+ * Extra body parameters to include in all requests.
233
119
  */
234
- usage?: {
235
- /**
236
- * When true, includes token usage information in the response.
237
- */
238
- include: boolean;
239
- };
240
- };
241
- /**
242
- * Usage accounting response
243
- * @see https://openrouter.ai/docs/use-cases/usage-accounting
244
- */
245
- type OpenRouterUsageAccounting = {
246
- promptTokens: number;
247
- promptTokensDetails?: {
248
- cachedTokens: number;
249
- };
250
- completionTokens: number;
251
- completionTokensDetails?: {
252
- reasoningTokens: number;
253
- };
254
- totalTokens: number;
255
- cost?: number;
256
- costDetails?: {
257
- upstreamInferenceCost: number;
258
- };
259
- };
260
-
261
- type OpenRouterCompletionModelId = string;
262
- type OpenRouterCompletionSettings = {
263
- /**
264
- Modify the likelihood of specified tokens appearing in the completion.
265
-
266
- Accepts a JSON object that maps tokens (specified by their token ID in
267
- the GPT tokenizer) to an associated bias value from -100 to 100. You
268
- can use this tokenizer tool to convert text to token IDs. Mathematically,
269
- the bias is added to the logits generated by the model prior to sampling.
270
- The exact effect will vary per model, but values between -1 and 1 should
271
- decrease or increase likelihood of selection; values like -100 or 100
272
- should result in a ban or exclusive selection of the relevant token.
273
-
274
- As an example, you can pass {"50256": -100} to prevent the <|endoftext|>
275
- token from being generated.
276
- */
277
- logitBias?: Record<number, number>;
278
- /**
279
- Return the log probabilities of the tokens. Including logprobs will increase
280
- the response size and can slow down response times. However, it can
281
- be useful to better understand how the model is behaving.
282
-
283
- Setting to true will return the log probabilities of the tokens that
284
- were generated.
285
-
286
- Setting to a number will return the log probabilities of the top n
287
- tokens that were generated.
288
- */
289
- logprobs?: boolean | number;
290
- /**
291
- The suffix that comes after a completion of inserted text.
292
- */
293
- suffix?: string;
294
- } & OpenRouterSharedSettings;
295
-
296
- declare enum ReasoningFormat {
297
- Unknown = "unknown",
298
- OpenAIResponsesV1 = "openai-responses-v1",
299
- XAIResponsesV1 = "xai-responses-v1",
300
- AnthropicClaudeV1 = "anthropic-claude-v1",
301
- GoogleGeminiV1 = "google-gemini-v1"
302
- }
303
-
304
- declare enum ReasoningDetailType {
305
- Summary = "reasoning.summary",
306
- Encrypted = "reasoning.encrypted",
307
- Text = "reasoning.text"
308
- }
309
- declare const ReasoningDetailUnionSchema: z.ZodUnion<readonly [z.ZodObject<{
310
- type: z.ZodLiteral<ReasoningDetailType.Summary>;
311
- summary: z.ZodString;
312
- id: z.ZodOptional<z.ZodNullable<z.ZodString>>;
313
- format: z.ZodOptional<z.ZodNullable<z.ZodEnum<typeof ReasoningFormat>>>;
314
- index: z.ZodOptional<z.ZodNumber>;
315
- }, z.core.$strip>, z.ZodObject<{
316
- type: z.ZodLiteral<ReasoningDetailType.Encrypted>;
317
- data: z.ZodString;
318
- id: z.ZodOptional<z.ZodNullable<z.ZodString>>;
319
- format: z.ZodOptional<z.ZodNullable<z.ZodEnum<typeof ReasoningFormat>>>;
320
- index: z.ZodOptional<z.ZodNumber>;
321
- }, z.core.$strip>, z.ZodObject<{
322
- type: z.ZodLiteral<ReasoningDetailType.Text>;
323
- text: z.ZodOptional<z.ZodNullable<z.ZodString>>;
324
- signature: z.ZodOptional<z.ZodNullable<z.ZodString>>;
325
- id: z.ZodOptional<z.ZodNullable<z.ZodString>>;
326
- format: z.ZodOptional<z.ZodNullable<z.ZodEnum<typeof ReasoningFormat>>>;
327
- index: z.ZodOptional<z.ZodNumber>;
328
- }, z.core.$strip>]>;
329
- type ReasoningDetailUnion = z.infer<typeof ReasoningDetailUnionSchema>;
330
-
331
- type OpenRouterChatConfig = {
332
- provider: string;
333
- compatibility: 'strict' | 'compatible';
334
- headers: () => Record<string, string | undefined>;
335
- url: (options: {
336
- modelId: string;
337
- path: string;
338
- }) => string;
339
- fetch?: typeof fetch;
340
120
  extraBody?: Record<string, unknown>;
341
- };
342
- declare class OpenRouterChatLanguageModel implements LanguageModelV2 {
343
- readonly specificationVersion: "v2";
344
- readonly provider = "openrouter";
345
- readonly defaultObjectGenerationMode: "tool";
346
- readonly modelId: OpenRouterChatModelId;
347
- readonly supportsImageUrls = true;
348
- readonly supportedUrls: Record<string, RegExp[]>;
349
- readonly settings: OpenRouterChatSettings;
350
- private readonly config;
351
- constructor(modelId: OpenRouterChatModelId, settings: OpenRouterChatSettings, config: OpenRouterChatConfig);
352
- private getArgs;
353
- doGenerate(options: LanguageModelV2CallOptions): Promise<{
354
- content: Array<LanguageModelV2Content>;
355
- finishReason: LanguageModelV2FinishReason;
356
- usage: LanguageModelV2Usage;
357
- warnings: Array<LanguageModelV2CallWarning>;
358
- providerMetadata?: {
359
- openrouter: {
360
- provider: string;
361
- reasoning_details?: ReasoningDetailUnion[];
362
- usage: OpenRouterUsageAccounting;
363
- };
364
- };
365
- request?: {
366
- body?: unknown;
367
- };
368
- response?: LanguageModelV2ResponseMetadata & {
369
- headers?: SharedV2Headers;
370
- body?: unknown;
371
- };
372
- }>;
373
- doStream(options: LanguageModelV2CallOptions): Promise<{
374
- stream: ReadableStream<LanguageModelV2StreamPart>;
375
- warnings: Array<LanguageModelV2CallWarning>;
376
- request?: {
377
- body?: unknown;
378
- };
379
- response?: LanguageModelV2ResponseMetadata & {
380
- headers?: SharedV2Headers;
381
- body?: unknown;
382
- };
383
- }>;
384
- }
385
-
386
- type OpenRouterCompletionConfig = {
387
- provider: string;
388
- compatibility: 'strict' | 'compatible';
389
- headers: () => Record<string, string | undefined>;
390
- url: (options: {
391
- modelId: string;
392
- path: string;
393
- }) => string;
394
- fetch?: typeof fetch;
395
- extraBody?: Record<string, unknown>;
396
- };
397
- declare class OpenRouterCompletionLanguageModel implements LanguageModelV2 {
398
- readonly specificationVersion: "v2";
399
- readonly provider = "openrouter";
400
- readonly modelId: OpenRouterCompletionModelId;
401
- readonly supportsImageUrls = true;
402
- readonly supportedUrls: Record<string, RegExp[]>;
403
- readonly defaultObjectGenerationMode: undefined;
404
- readonly settings: OpenRouterCompletionSettings;
405
- private readonly config;
406
- constructor(modelId: OpenRouterCompletionModelId, settings: OpenRouterCompletionSettings, config: OpenRouterCompletionConfig);
407
- private getArgs;
408
- doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
409
- doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
410
121
  }
411
-
412
- type OpenRouterEmbeddingConfig = {
413
- provider: string;
414
- headers: () => Record<string, string | undefined>;
415
- url: (options: {
416
- modelId: string;
417
- path: string;
418
- }) => string;
419
- fetch?: typeof fetch;
420
- extraBody?: Record<string, unknown>;
421
- };
422
- declare class OpenRouterEmbeddingModel implements EmbeddingModelV2<string> {
423
- readonly specificationVersion: "v2";
424
- readonly provider = "openrouter";
425
- readonly modelId: OpenRouterEmbeddingModelId;
426
- readonly settings: OpenRouterEmbeddingSettings;
427
- readonly maxEmbeddingsPerCall: undefined;
428
- readonly supportsParallelCalls = true;
429
- private readonly config;
430
- constructor(modelId: OpenRouterEmbeddingModelId, settings: OpenRouterEmbeddingSettings, config: OpenRouterEmbeddingConfig);
431
- doEmbed(options: {
432
- values: Array<string>;
433
- abortSignal?: AbortSignal;
434
- headers?: Record<string, string | undefined>;
435
- }): Promise<{
436
- embeddings: Array<Array<number>>;
437
- usage?: {
438
- tokens: number;
439
- };
440
- providerMetadata?: SharedV2ProviderMetadata;
441
- response?: {
442
- headers?: SharedV2Headers;
443
- body?: unknown;
444
- };
445
- }>;
446
- }
447
-
448
- interface OpenRouterProvider extends ProviderV2 {
449
- (modelId: OpenRouterChatModelId, settings?: OpenRouterCompletionSettings): OpenRouterCompletionLanguageModel;
450
- (modelId: OpenRouterChatModelId, settings?: OpenRouterChatSettings): OpenRouterChatLanguageModel;
451
- languageModel(modelId: OpenRouterChatModelId, settings?: OpenRouterCompletionSettings): OpenRouterCompletionLanguageModel;
452
- languageModel(modelId: OpenRouterChatModelId, settings?: OpenRouterChatSettings): OpenRouterChatLanguageModel;
122
+ /**
123
+ * Configuration for an OpenRouter plugin.
124
+ *
125
+ * @description
126
+ * Plugins extend OpenRouter functionality with features like web search, code execution,
127
+ * and more. Each plugin has a unique identifier and optional configuration parameters.
128
+ * Additional plugin-specific properties can be included as needed.
129
+ *
130
+ * @example
131
+ * ```ts
132
+ * const model = openrouter('anthropic/claude-3.5-sonnet', {
133
+ * plugins: [
134
+ * { id: 'web-search' },
135
+ * { id: 'code-interpreter', config: { timeout: 30000 } },
136
+ * ],
137
+ * });
138
+ * ```
139
+ */
140
+ interface OpenRouterPluginConfig {
453
141
  /**
454
- Creates an OpenRouter chat model for text generation.
142
+ * The plugin identifier.
455
143
  */
456
- chat(modelId: OpenRouterChatModelId, settings?: OpenRouterChatSettings): OpenRouterChatLanguageModel;
144
+ id: string;
457
145
  /**
458
- Creates an OpenRouter completion model for text generation.
146
+ * Plugin-specific configuration.
459
147
  */
460
- completion(modelId: OpenRouterCompletionModelId, settings?: OpenRouterCompletionSettings): OpenRouterCompletionLanguageModel;
148
+ config?: Record<string, unknown>;
461
149
  /**
462
- Creates an OpenRouter text embedding model. (AI SDK v5)
150
+ * Allow any additional plugin-specific properties.
463
151
  */
464
- textEmbeddingModel(modelId: OpenRouterEmbeddingModelId, settings?: OpenRouterEmbeddingSettings): OpenRouterEmbeddingModel;
152
+ [key: string]: unknown;
153
+ }
154
+ /**
155
+ * Configuration for OpenRouter's provider routing behavior.
156
+ *
157
+ * @description
158
+ * Controls how OpenRouter selects and falls back between different AI providers
159
+ * when routing requests. Use this to specify provider preferences, enable/disable
160
+ * fallbacks, and require specific provider parameters.
161
+ *
162
+ * @example
163
+ * ```ts
164
+ * const model = openrouter('openai/gpt-4', {
165
+ * provider: {
166
+ * order: ['Azure', 'OpenAI'],
167
+ * allowFallbacks: true,
168
+ * },
169
+ * });
170
+ * ```
171
+ */
172
+ interface OpenRouterProviderRoutingConfig {
173
+ /**
174
+ * Provider order preference.
175
+ */
176
+ order?: string[];
465
177
  /**
466
- Creates an OpenRouter text embedding model. (AI SDK v4 - deprecated, use textEmbeddingModel instead)
467
- @deprecated Use textEmbeddingModel instead
178
+ * Allow fallbacks to other providers.
468
179
  */
469
- embedding(modelId: OpenRouterEmbeddingModelId, settings?: OpenRouterEmbeddingSettings): OpenRouterEmbeddingModel;
180
+ allowFallbacks?: boolean;
181
+ /**
182
+ * Required provider parameters.
183
+ */
184
+ requireParameters?: boolean;
470
185
  }
471
- interface OpenRouterProviderSettings {
186
+ /**
187
+ * Model-specific options for OpenRouter requests.
188
+ *
189
+ * @description
190
+ * Options that can be passed when creating a model to customize its behavior.
191
+ * These include OpenRouter-specific features like plugins, transforms, model
192
+ * fallbacks, and routing configuration. Additional properties are passed through
193
+ * to the underlying API.
194
+ *
195
+ * @example
196
+ * ```ts
197
+ * const model = openrouter('anthropic/claude-3.5-sonnet', {
198
+ * usage: { include: true },
199
+ * transforms: ['middle-out'],
200
+ * models: ['anthropic/claude-3-opus', 'openai/gpt-4'], // fallbacks
201
+ * provider: {
202
+ * order: ['Anthropic'],
203
+ * allowFallbacks: false,
204
+ * },
205
+ * });
206
+ * ```
207
+ */
208
+ interface OpenRouterModelOptions {
472
209
  /**
473
- Base URL for the OpenRouter API calls.
474
- */
475
- baseURL?: string;
210
+ * Usage accounting configuration.
211
+ */
212
+ usage?: {
213
+ /**
214
+ * Whether to include usage information in the response.
215
+ */
216
+ include?: boolean;
217
+ };
476
218
  /**
477
- @deprecated Use `baseURL` instead.
478
- */
479
- baseUrl?: string;
219
+ * OpenRouter plugins to enable.
220
+ */
221
+ plugins?: OpenRouterPluginConfig[];
480
222
  /**
481
- API key for authenticating requests.
482
- */
483
- apiKey?: string;
223
+ * Message transforms to apply.
224
+ */
225
+ transforms?: string[];
484
226
  /**
485
- Custom headers to include in the requests.
486
- */
487
- headers?: Record<string, string>;
227
+ * Fallback model IDs.
228
+ */
229
+ models?: string[];
488
230
  /**
489
- OpenRouter compatibility mode. Should be set to `strict` when using the OpenRouter API,
490
- and `compatible` when using 3rd party providers. In `compatible` mode, newer
491
- information such as streamOptions are not being sent. Defaults to 'compatible'.
231
+ * Routing strategy.
492
232
  */
493
- compatibility?: 'strict' | 'compatible';
233
+ route?: string;
494
234
  /**
495
- Custom fetch implementation. You can use it as a middleware to intercept requests,
496
- or to provide a custom fetch implementation for e.g. testing.
497
- */
498
- fetch?: typeof fetch;
235
+ * Provider routing configuration.
236
+ */
237
+ provider?: OpenRouterProviderRoutingConfig;
499
238
  /**
500
- A JSON object to send as the request body to access OpenRouter features & upstream provider features.
501
- */
502
- extraBody?: Record<string, unknown>;
239
+ * How to handle system messages for reasoning models.
240
+ * - 'system': Standard system message (default)
241
+ * - 'developer': Convert system to developer role for reasoning models
242
+ * - 'remove': Strip system messages entirely
243
+ */
244
+ systemMessageMode?: 'system' | 'developer' | 'remove';
503
245
  /**
504
- * Record of provider slugs to API keys for injecting into provider routing.
505
- * Maps provider slugs (e.g. "anthropic", "openai") to their respective API keys.
246
+ * Allow any additional model-specific options.
247
+ * These are passed through to the API.
506
248
  */
507
- api_keys?: Record<string, string>;
249
+ [key: string]: unknown;
508
250
  }
509
- /**
510
- Create an OpenRouter provider instance.
511
- */
512
- declare function createOpenRouter(options?: OpenRouterProviderSettings): OpenRouterProvider;
513
- /**
514
- Default OpenRouter provider instance. It uses 'strict' compatibility mode.
515
- */
516
- declare const openrouter: OpenRouterProvider;
517
251
 
518
252
  /**
519
- @deprecated Use `createOpenRouter` instead.
253
+ * OpenRouter provider interface extending the AI SDK V3 ProviderV3 interface.
254
+ *
255
+ * The provider is callable - calling it directly is equivalent to calling languageModel().
520
256
  */
521
- declare class OpenRouter {
257
+ interface OpenRouterProvider extends ProviderV3 {
522
258
  /**
523
- Use a different URL prefix for API calls, e.g. to use proxy servers.
524
- The default prefix is `https://openrouter.ai/api/v1`.
259
+ * Create a language model by calling the provider directly.
525
260
  */
526
- readonly baseURL: string;
261
+ (modelId: string, settings?: OpenRouterModelOptions): OpenRouterChatLanguageModel;
527
262
  /**
528
- API key that is being sent using the `Authorization` header.
529
- It defaults to the `OPENROUTER_API_KEY` environment variable.
530
- */
531
- readonly apiKey?: string;
263
+ * Create a language model.
264
+ */
265
+ languageModel(modelId: string, settings?: OpenRouterModelOptions): OpenRouterChatLanguageModel;
266
+ /**
267
+ * Create a chat model (alias for languageModel).
268
+ */
269
+ chat(modelId: string, settings?: OpenRouterModelOptions): OpenRouterChatLanguageModel;
532
270
  /**
533
- Custom headers to include in the requests.
271
+ * Create an embedding model.
534
272
  */
535
- readonly headers?: Record<string, string>;
273
+ embeddingModel(modelId: string, settings?: OpenRouterModelOptions): OpenRouterEmbeddingModel;
536
274
  /**
537
- * Record of provider slugs to API keys for injecting into provider routing.
275
+ * Create a text embedding model.
276
+ * @deprecated Use embeddingModel instead.
538
277
  */
539
- readonly api_keys?: Record<string, string>;
278
+ textEmbeddingModel(modelId: string, settings?: OpenRouterModelOptions): OpenRouterEmbeddingModel;
540
279
  /**
541
- * Creates a new OpenRouter provider instance.
280
+ * Create an image model.
542
281
  */
543
- constructor(options?: OpenRouterProviderSettings);
544
- private get baseConfig();
545
- chat(modelId: OpenRouterChatModelId, settings?: OpenRouterChatSettings): OpenRouterChatLanguageModel;
546
- completion(modelId: OpenRouterCompletionModelId, settings?: OpenRouterCompletionSettings): OpenRouterCompletionLanguageModel;
547
- textEmbeddingModel(modelId: OpenRouterEmbeddingModelId, settings?: OpenRouterEmbeddingSettings): OpenRouterEmbeddingModel;
282
+ imageModel(modelId: string, settings?: OpenRouterModelOptions): OpenRouterImageModel;
548
283
  /**
549
- * @deprecated Use textEmbeddingModel instead
284
+ * Create an image model (alias for imageModel).
550
285
  */
551
- embedding(modelId: OpenRouterEmbeddingModelId, settings?: OpenRouterEmbeddingSettings): OpenRouterEmbeddingModel;
286
+ image(modelId: string, settings?: OpenRouterModelOptions): OpenRouterImageModel;
287
+ /**
288
+ * Create an embedding model (alias for embeddingModel).
289
+ * @deprecated Use embeddingModel instead.
290
+ */
291
+ embedding(modelId: string, settings?: OpenRouterModelOptions): OpenRouterEmbeddingModel;
292
+ }
293
+ /**
294
+ * Internal settings passed to model constructors.
295
+ * Contains resolved API key and normalized configuration.
296
+ */
297
+ interface OpenRouterModelSettings {
298
+ apiKey: string;
299
+ baseURL: string;
300
+ headers?: Record<string, string>;
301
+ fetch?: typeof globalThis.fetch;
302
+ extraBody?: Record<string, unknown>;
303
+ modelOptions?: OpenRouterModelOptions;
552
304
  }
553
-
554
305
  /**
555
- * TOON (Token-Oriented Object Notation) helper utilities for token-efficient
556
- * data encoding in LLM prompts.
306
+ * Creates an OpenRouter provider instance for the AI SDK.
557
307
  *
558
- * TOON achieves ~40% token reduction vs JSON for tabular data while maintaining
559
- * high LLM comprehension accuracy.
308
+ * @description
309
+ * Factory function that creates an OpenRouter provider compatible with the AI SDK v3 provider
310
+ * specification. The provider can create language models, embedding models, and image models
311
+ * that route requests through OpenRouter to various AI providers (OpenAI, Anthropic, Google, etc.).
560
312
  *
561
- * @see https://toonformat.dev
562
- * @see https://github.com/toon-format/toon
313
+ * The returned provider is callable - you can use it directly as a function to create language
314
+ * models, or use its methods for specific model types.
563
315
  *
564
- * @example
316
+ * @param options - Provider settings including API key, base URL, headers, and fetch implementation.
317
+ * If no API key is provided, it will be loaded from the OPENROUTER_API_KEY environment variable.
318
+ * @returns An OpenRouter provider that can create language, embedding, and image models.
319
+ *
320
+ * @example Basic usage with environment variable
565
321
  * ```ts
566
- * import { encodeToon, decodeToon } from '@openrouter/ai-sdk-provider';
322
+ * import { createOpenRouter } from '@openrouter/ai-sdk-provider';
567
323
  *
568
- * // Encode data to TOON format
569
- * const toon = await encodeToon([
570
- * { id: 1, name: 'Alice', score: 95 },
571
- * { id: 2, name: 'Bob', score: 87 },
572
- * ]);
573
- * // Result: [2]{id,name,score}: 1,Alice,95 2,Bob,87
324
+ * // Uses OPENROUTER_API_KEY from environment
325
+ * const openrouter = createOpenRouter();
574
326
  *
575
- * // Decode TOON back to JSON
576
- * const data = await decodeToon(toon);
327
+ * const model = openrouter('anthropic/claude-3.5-sonnet');
577
328
  * ```
578
- */
579
-
580
- type ToonEncodeOptions = EncodeOptions;
581
- type ToonDecodeOptions = DecodeOptions;
582
- /**
583
- * Encodes a JavaScript value into TOON format string.
584
329
  *
585
- * TOON is particularly efficient for uniform arrays of objects (tabular data),
586
- * achieving CSV-like compactness while preserving explicit structure.
330
+ * @example With explicit API key
331
+ * ```ts
332
+ * import { createOpenRouter } from '@openrouter/ai-sdk-provider';
587
333
  *
588
- * @param value - Any JavaScript value (objects, arrays, primitives)
589
- * @param options - Optional encoding configuration
590
- * @returns Promise resolving to TOON formatted string
334
+ * const openrouter = createOpenRouter({
335
+ * apiKey: process.env.OPENROUTER_API_KEY,
336
+ * });
591
337
  *
592
- * @example
338
+ * const model = openrouter('anthropic/claude-3.5-sonnet');
339
+ * ```
340
+ *
341
+ * @example Creating different model types
342
+ * ```ts
343
+ * const openrouter = createOpenRouter();
344
+ *
345
+ * // Language model (callable shorthand)
346
+ * const chat = openrouter('anthropic/claude-3.5-sonnet');
347
+ *
348
+ * // Embedding model
349
+ * const embeddings = openrouter.embeddingModel('openai/text-embedding-3-small');
350
+ *
351
+ * // Image model
352
+ * const image = openrouter.imageModel('openai/dall-e-3');
353
+ * ```
354
+ *
355
+ * @example Model variants
593
356
  * ```ts
594
- * // Simple object
595
- * await encodeToon({ name: 'Alice', age: 30 });
596
- * // name: Alice
597
- * // age: 30
598
- *
599
- * // Tabular array (most efficient)
600
- * await encodeToon([
601
- * { id: 1, name: 'Alice' },
602
- * { id: 2, name: 'Bob' },
603
- * ]);
604
- * // [2]{id,name}: 1,Alice 2,Bob
605
- *
606
- * // With options
607
- * await encodeToon(data, { indent: 4, keyFolding: 'safe' });
357
+ * const openrouter = createOpenRouter();
358
+ *
359
+ * // Online search variant - model has web search capabilities
360
+ * const online = openrouter('anthropic/claude-3.5-sonnet:online');
361
+ *
362
+ * // Nitro variant - faster inference
363
+ * const nitro = openrouter('anthropic/claude-3.5-sonnet:nitro');
364
+ *
365
+ * // Floor pricing variant - routes to cheapest provider
366
+ * const floor = openrouter('anthropic/claude-3.5-sonnet:floor');
367
+ *
368
+ * // Free tier variant
369
+ * const free = openrouter('meta-llama/llama-3-8b-instruct:free');
608
370
  * ```
609
371
  */
610
- declare function encodeToon(value: unknown, options?: ToonEncodeOptions): Promise<string>;
372
+ declare function createOpenRouter(options?: OpenRouterProviderSettings): OpenRouterProvider;
373
+
374
+ declare const VERSION = "6.0.0-alpha.0";
375
+
611
376
  /**
612
- * Decodes a TOON format string into a JavaScript value.
377
+ * Default OpenRouter provider instance.
613
378
  *
614
- * @param input - TOON formatted string
615
- * @param options - Optional decoding configuration
616
- * @returns Promise resolving to parsed JavaScript value
379
+ * Uses OPENROUTER_API_KEY environment variable for authentication.
617
380
  *
618
381
  * @example
619
382
  * ```ts
620
- * // Decode simple object
621
- * await decodeToon('name: Alice\nage: 30');
622
- * // { name: 'Alice', age: 30 }
623
- *
624
- * // Decode tabular array
625
- * await decodeToon('[2]{id,name}: 1,Alice 2,Bob');
626
- * // [{ id: 1, name: 'Alice' }, { id: 2, name: 'Bob' }]
383
+ * import { openrouter } from '@openrouter/ai-sdk-provider';
627
384
  *
628
- * // With options
629
- * await decodeToon(toonString, { strict: false, expandPaths: 'safe' });
385
+ * const model = openrouter('anthropic/claude-3.5-sonnet');
630
386
  * ```
631
387
  */
632
- declare function decodeToon(input: string, options?: ToonDecodeOptions): Promise<JsonValue>;
388
+ declare const openrouter: OpenRouterProvider;
633
389
 
634
- export { OpenRouter, type OpenRouterChatSettings, type OpenRouterCompletionSettings, type OpenRouterEmbeddingModelId, type OpenRouterEmbeddingSettings, type OpenRouterProvider, type OpenRouterProviderOptions, type OpenRouterProviderSettings, type OpenRouterSharedSettings, type OpenRouterUsageAccounting, type ToonDecodeOptions, type ToonEncodeOptions, createOpenRouter, decodeToon, encodeToon, openrouter };
390
+ export { type OpenRouterModelOptions, type OpenRouterPluginConfig, type OpenRouterProvider, type OpenRouterProviderRoutingConfig, type OpenRouterProviderSettings, VERSION, createOpenRouter, openrouter };