@openrouter/ai-sdk-provider 0.7.1 → 1.0.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,6 @@
1
- import { LanguageModelV1 } from '@ai-sdk/provider';
2
- export { LanguageModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2Content, LanguageModelV2FinishReason, LanguageModelV2Usage, LanguageModelV2CallWarning, LanguageModelV2ResponseMetadata, SharedV2Headers, LanguageModelV2StreamPart } from '@ai-sdk/provider';
2
+ export { LanguageModelV2, LanguageModelV2Prompt } from '@ai-sdk/provider';
3
3
 
4
- type OpenRouterLanguageModel = LanguageModelV1;
5
4
  type OpenRouterProviderOptions = {
6
5
  models?: string[];
7
6
  /**
@@ -10,6 +9,7 @@ type OpenRouterProviderOptions = {
10
9
  * If `exclude` is true, reasoning will be removed from the response. Default is false.
11
10
  */
12
11
  reasoning?: {
12
+ enabled?: boolean;
13
13
  exclude?: boolean;
14
14
  } & ({
15
15
  max_tokens: number;
@@ -102,17 +102,18 @@ type OpenRouterCompletionConfig = {
102
102
  fetch?: typeof fetch;
103
103
  extraBody?: Record<string, unknown>;
104
104
  };
105
- declare class OpenRouterCompletionLanguageModel implements LanguageModelV1 {
106
- readonly specificationVersion = "v1";
107
- readonly defaultObjectGenerationMode: undefined;
105
+ declare class OpenRouterCompletionLanguageModel implements LanguageModelV2 {
106
+ readonly specificationVersion: "v2";
107
+ readonly provider = "openrouter";
108
108
  readonly modelId: OpenRouterCompletionModelId;
109
+ readonly supportedUrls: Record<string, RegExp[]>;
110
+ readonly defaultObjectGenerationMode: undefined;
109
111
  readonly settings: OpenRouterCompletionSettings;
110
112
  private readonly config;
111
113
  constructor(modelId: OpenRouterCompletionModelId, settings: OpenRouterCompletionSettings, config: OpenRouterCompletionConfig);
112
- get provider(): string;
113
114
  private getArgs;
114
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
115
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
115
+ doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
116
+ doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
116
117
  }
117
118
 
118
119
  type OpenRouterChatModelId = string;
@@ -132,7 +133,7 @@ type OpenRouterChatSettings = {
132
133
  token from being generated.
133
134
  */
134
135
  logitBias?: Record<number, number>;
135
- /**
136
+ /**s
136
137
  Return the log probabilities of the tokens. Including logprobs will increase
137
138
  the response size and can slow down response times. However, it can
138
139
  be useful to better understand how the model is behaving.
@@ -166,19 +167,45 @@ type OpenRouterChatConfig = {
166
167
  fetch?: typeof fetch;
167
168
  extraBody?: Record<string, unknown>;
168
169
  };
169
- type DoGenerateOutput = Awaited<ReturnType<LanguageModelV1['doGenerate']>>;
170
- type DoStreamOutput = Awaited<ReturnType<LanguageModelV1['doStream']>>;
171
- declare class OpenRouterChatLanguageModel implements LanguageModelV1 {
172
- readonly specificationVersion = "v1";
173
- readonly defaultObjectGenerationMode = "tool";
170
+ declare class OpenRouterChatLanguageModel implements LanguageModelV2 {
171
+ readonly specificationVersion: "v2";
172
+ readonly provider = "openrouter";
173
+ readonly defaultObjectGenerationMode: "tool";
174
174
  readonly modelId: OpenRouterChatModelId;
175
+ readonly supportedUrls: Record<string, RegExp[]>;
175
176
  readonly settings: OpenRouterChatSettings;
176
177
  private readonly config;
177
178
  constructor(modelId: OpenRouterChatModelId, settings: OpenRouterChatSettings, config: OpenRouterChatConfig);
178
- get provider(): string;
179
179
  private getArgs;
180
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<DoGenerateOutput>;
181
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<DoStreamOutput>;
180
+ doGenerate(options: LanguageModelV2CallOptions): Promise<{
181
+ content: Array<LanguageModelV2Content>;
182
+ finishReason: LanguageModelV2FinishReason;
183
+ usage: LanguageModelV2Usage;
184
+ warnings: Array<LanguageModelV2CallWarning>;
185
+ providerMetadata?: {
186
+ openrouter: {
187
+ usage: OpenRouterUsageAccounting;
188
+ };
189
+ };
190
+ request?: {
191
+ body?: unknown;
192
+ };
193
+ response?: LanguageModelV2ResponseMetadata & {
194
+ headers?: SharedV2Headers;
195
+ body?: unknown;
196
+ };
197
+ }>;
198
+ doStream(options: LanguageModelV2CallOptions): Promise<{
199
+ stream: ReadableStream<LanguageModelV2StreamPart>;
200
+ warnings: Array<LanguageModelV2CallWarning>;
201
+ request?: {
202
+ body?: unknown;
203
+ };
204
+ response?: LanguageModelV2ResponseMetadata & {
205
+ headers?: SharedV2Headers;
206
+ body?: unknown;
207
+ };
208
+ }>;
182
209
  }
183
210
 
184
- export { OpenRouterChatLanguageModel, type OpenRouterChatModelId, type OpenRouterChatSettings, OpenRouterCompletionLanguageModel, type OpenRouterCompletionModelId, type OpenRouterCompletionSettings, type OpenRouterLanguageModel, type OpenRouterProviderOptions, type OpenRouterSharedSettings, type OpenRouterUsageAccounting };
211
+ export { OpenRouterChatLanguageModel, type OpenRouterChatModelId, type OpenRouterChatSettings, OpenRouterCompletionLanguageModel, type OpenRouterCompletionModelId, type OpenRouterCompletionSettings, type OpenRouterProviderOptions, type OpenRouterSharedSettings, type OpenRouterUsageAccounting };
@@ -1,7 +1,6 @@
1
- import { LanguageModelV1 } from '@ai-sdk/provider';
2
- export { LanguageModelV1 } from '@ai-sdk/provider';
1
+ import { LanguageModelV2, LanguageModelV2CallOptions, LanguageModelV2Content, LanguageModelV2FinishReason, LanguageModelV2Usage, LanguageModelV2CallWarning, LanguageModelV2ResponseMetadata, SharedV2Headers, LanguageModelV2StreamPart } from '@ai-sdk/provider';
2
+ export { LanguageModelV2, LanguageModelV2Prompt } from '@ai-sdk/provider';
3
3
 
4
- type OpenRouterLanguageModel = LanguageModelV1;
5
4
  type OpenRouterProviderOptions = {
6
5
  models?: string[];
7
6
  /**
@@ -10,6 +9,7 @@ type OpenRouterProviderOptions = {
10
9
  * If `exclude` is true, reasoning will be removed from the response. Default is false.
11
10
  */
12
11
  reasoning?: {
12
+ enabled?: boolean;
13
13
  exclude?: boolean;
14
14
  } & ({
15
15
  max_tokens: number;
@@ -102,17 +102,18 @@ type OpenRouterCompletionConfig = {
102
102
  fetch?: typeof fetch;
103
103
  extraBody?: Record<string, unknown>;
104
104
  };
105
- declare class OpenRouterCompletionLanguageModel implements LanguageModelV1 {
106
- readonly specificationVersion = "v1";
107
- readonly defaultObjectGenerationMode: undefined;
105
+ declare class OpenRouterCompletionLanguageModel implements LanguageModelV2 {
106
+ readonly specificationVersion: "v2";
107
+ readonly provider = "openrouter";
108
108
  readonly modelId: OpenRouterCompletionModelId;
109
+ readonly supportedUrls: Record<string, RegExp[]>;
110
+ readonly defaultObjectGenerationMode: undefined;
109
111
  readonly settings: OpenRouterCompletionSettings;
110
112
  private readonly config;
111
113
  constructor(modelId: OpenRouterCompletionModelId, settings: OpenRouterCompletionSettings, config: OpenRouterCompletionConfig);
112
- get provider(): string;
113
114
  private getArgs;
114
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>>;
115
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>>;
115
+ doGenerate(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doGenerate']>>>;
116
+ doStream(options: LanguageModelV2CallOptions): Promise<Awaited<ReturnType<LanguageModelV2['doStream']>>>;
116
117
  }
117
118
 
118
119
  type OpenRouterChatModelId = string;
@@ -132,7 +133,7 @@ type OpenRouterChatSettings = {
132
133
  token from being generated.
133
134
  */
134
135
  logitBias?: Record<number, number>;
135
- /**
136
+ /**s
136
137
  Return the log probabilities of the tokens. Including logprobs will increase
137
138
  the response size and can slow down response times. However, it can
138
139
  be useful to better understand how the model is behaving.
@@ -166,19 +167,45 @@ type OpenRouterChatConfig = {
166
167
  fetch?: typeof fetch;
167
168
  extraBody?: Record<string, unknown>;
168
169
  };
169
- type DoGenerateOutput = Awaited<ReturnType<LanguageModelV1['doGenerate']>>;
170
- type DoStreamOutput = Awaited<ReturnType<LanguageModelV1['doStream']>>;
171
- declare class OpenRouterChatLanguageModel implements LanguageModelV1 {
172
- readonly specificationVersion = "v1";
173
- readonly defaultObjectGenerationMode = "tool";
170
+ declare class OpenRouterChatLanguageModel implements LanguageModelV2 {
171
+ readonly specificationVersion: "v2";
172
+ readonly provider = "openrouter";
173
+ readonly defaultObjectGenerationMode: "tool";
174
174
  readonly modelId: OpenRouterChatModelId;
175
+ readonly supportedUrls: Record<string, RegExp[]>;
175
176
  readonly settings: OpenRouterChatSettings;
176
177
  private readonly config;
177
178
  constructor(modelId: OpenRouterChatModelId, settings: OpenRouterChatSettings, config: OpenRouterChatConfig);
178
- get provider(): string;
179
179
  private getArgs;
180
- doGenerate(options: Parameters<LanguageModelV1['doGenerate']>[0]): Promise<DoGenerateOutput>;
181
- doStream(options: Parameters<LanguageModelV1['doStream']>[0]): Promise<DoStreamOutput>;
180
+ doGenerate(options: LanguageModelV2CallOptions): Promise<{
181
+ content: Array<LanguageModelV2Content>;
182
+ finishReason: LanguageModelV2FinishReason;
183
+ usage: LanguageModelV2Usage;
184
+ warnings: Array<LanguageModelV2CallWarning>;
185
+ providerMetadata?: {
186
+ openrouter: {
187
+ usage: OpenRouterUsageAccounting;
188
+ };
189
+ };
190
+ request?: {
191
+ body?: unknown;
192
+ };
193
+ response?: LanguageModelV2ResponseMetadata & {
194
+ headers?: SharedV2Headers;
195
+ body?: unknown;
196
+ };
197
+ }>;
198
+ doStream(options: LanguageModelV2CallOptions): Promise<{
199
+ stream: ReadableStream<LanguageModelV2StreamPart>;
200
+ warnings: Array<LanguageModelV2CallWarning>;
201
+ request?: {
202
+ body?: unknown;
203
+ };
204
+ response?: LanguageModelV2ResponseMetadata & {
205
+ headers?: SharedV2Headers;
206
+ body?: unknown;
207
+ };
208
+ }>;
182
209
  }
183
210
 
184
- export { OpenRouterChatLanguageModel, type OpenRouterChatModelId, type OpenRouterChatSettings, OpenRouterCompletionLanguageModel, type OpenRouterCompletionModelId, type OpenRouterCompletionSettings, type OpenRouterLanguageModel, type OpenRouterProviderOptions, type OpenRouterSharedSettings, type OpenRouterUsageAccounting };
211
+ export { OpenRouterChatLanguageModel, type OpenRouterChatModelId, type OpenRouterChatSettings, OpenRouterCompletionLanguageModel, type OpenRouterCompletionModelId, type OpenRouterCompletionSettings, type OpenRouterProviderOptions, type OpenRouterSharedSettings, type OpenRouterUsageAccounting };