@juspay/neurolink 8.23.2 → 8.24.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,507 @@
1
+ import { createOpenRouter } from "@openrouter/ai-sdk-provider";
2
+ import { streamText, Output } from "ai";
3
+ import { AIProviderName } from "../constants/enums.js";
4
+ import { BaseProvider } from "../core/baseProvider.js";
5
+ import { logger } from "../utils/logger.js";
6
+ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
+ import { getProviderModel } from "../utils/providerConfig.js";
8
+ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
10
+ import { createProxyFetch } from "../proxy/proxyFetch.js";
11
+ // Constants
12
+ const MODELS_DISCOVERY_TIMEOUT_MS = 5000; // 5 seconds for model discovery
13
+ // Configuration helpers
14
+ const getOpenRouterConfig = () => {
15
+ const apiKey = process.env.OPENROUTER_API_KEY;
16
+ if (!apiKey) {
17
+ throw new Error("OPENROUTER_API_KEY environment variable is required. " +
18
+ "Get your API key at https://openrouter.ai/keys");
19
+ }
20
+ return {
21
+ apiKey,
22
+ referer: process.env.OPENROUTER_REFERER,
23
+ appName: process.env.OPENROUTER_APP_NAME,
24
+ };
25
+ };
26
+ /**
27
+ * Returns the default model name for OpenRouter.
28
+ *
29
+ * OpenRouter uses a 'provider/model' format for model names.
30
+ * For example:
31
+ * - 'anthropic/claude-3-5-sonnet'
32
+ * - 'openai/gpt-4o'
33
+ * - 'google/gemini-2.0-flash'
34
+ * - 'meta-llama/llama-3-70b-instruct'
35
+ *
36
+ * You can override the default by setting the OPENROUTER_MODEL environment variable.
37
+ */
38
+ const getDefaultOpenRouterModel = () => {
39
+ return getProviderModel("OPENROUTER_MODEL", "anthropic/claude-3-5-sonnet");
40
+ };
41
+ /**
42
+ * OpenRouter Provider - BaseProvider Implementation
43
+ * Provides access to 300+ models from 60+ providers via OpenRouter unified gateway
44
+ */
45
+ export class OpenRouterProvider extends BaseProvider {
46
+ model;
47
+ openRouterClient;
48
+ // Cache for available models to avoid repeated API calls
49
+ static modelsCache = [];
50
+ static modelsCacheTime = 0;
51
+ static MODELS_CACHE_DURATION = 10 * 60 * 1000; // 10 minutes
52
+ // Cache for model capabilities (which models support tools)
53
+ static toolCapableModels = new Set();
54
+ static capabilitiesCached = false;
55
+ constructor(modelName, sdk) {
56
+ super(modelName, AIProviderName.OPENROUTER, sdk);
57
+ // Initialize OpenRouter using the official SDK
58
+ const config = getOpenRouterConfig();
59
+ // Build headers for attribution on openrouter.ai/activity dashboard
60
+ const headers = {};
61
+ if (config.referer) {
62
+ headers["HTTP-Referer"] = config.referer;
63
+ }
64
+ if (config.appName) {
65
+ headers["X-Title"] = config.appName;
66
+ }
67
+ // Create OpenRouter client with optional attribution headers
68
+ this.openRouterClient = createOpenRouter({
69
+ apiKey: config.apiKey,
70
+ ...(Object.keys(headers).length > 0 && { headers }),
71
+ });
72
+ // Initialize model with OpenRouter client
73
+ this.model = this.openRouterClient(this.modelName || getDefaultOpenRouterModel());
74
+ logger.debug("OpenRouter Provider initialized", {
75
+ modelName: this.modelName,
76
+ provider: this.providerName,
77
+ });
78
+ }
79
+ getProviderName() {
80
+ return AIProviderName.OPENROUTER;
81
+ }
82
+ getDefaultModel() {
83
+ return getDefaultOpenRouterModel();
84
+ }
85
+ /**
86
+ * Returns the Vercel AI SDK model instance for OpenRouter
87
+ */
88
+ getAISDKModel() {
89
+ return this.model;
90
+ }
91
+ handleProviderError(error) {
92
+ if (error instanceof TimeoutError) {
93
+ return new Error(`OpenRouter request timed out: ${error.message}`);
94
+ }
95
+ // Check for timeout by error name and message as fallback
96
+ const errorRecord = error;
97
+ if (errorRecord?.name === "TimeoutError" ||
98
+ (typeof errorRecord?.message === "string" &&
99
+ errorRecord.message.includes("Timeout"))) {
100
+ return new Error(`OpenRouter request timed out: ${errorRecord?.message || "Unknown timeout"}`);
101
+ }
102
+ if (typeof errorRecord?.message === "string") {
103
+ if (errorRecord.message.includes("ECONNREFUSED") ||
104
+ errorRecord.message.includes("Failed to fetch")) {
105
+ return new Error("OpenRouter API not available. Please check your network connection and try again.");
106
+ }
107
+ if (errorRecord.message.includes("API_KEY_INVALID") ||
108
+ errorRecord.message.includes("Invalid API key") ||
109
+ errorRecord.message.includes("invalid_api_key") ||
110
+ errorRecord.message.includes("Unauthorized")) {
111
+ return new Error("Invalid OpenRouter API key. Please check your OPENROUTER_API_KEY environment variable. " +
112
+ "Get your key at https://openrouter.ai/keys");
113
+ }
114
+ if (errorRecord.message.includes("rate limit")) {
115
+ return new Error("OpenRouter rate limit exceeded. Please try again later or upgrade your account at https://openrouter.ai/credits");
116
+ }
117
+ if (errorRecord.message.includes("model") &&
118
+ errorRecord.message.includes("not found")) {
119
+ return new Error(`Model '${this.modelName}' not available on OpenRouter. ` +
120
+ "Browse available models at https://openrouter.ai/models");
121
+ }
122
+ if (errorRecord.message.includes("insufficient_credits")) {
123
+ return new Error("Insufficient OpenRouter credits. Add credits at https://openrouter.ai/credits");
124
+ }
125
+ // Tool/function calling errors
126
+ if (errorRecord.message.includes("tool use") ||
127
+ errorRecord.message.includes("tool_use") ||
128
+ errorRecord.message.includes("function_call") ||
129
+ errorRecord.message.includes("tools are not supported") ||
130
+ errorRecord.message.includes("No endpoints found")) {
131
+ return new Error(`Model '${this.modelName}' does not support tool calling. ` +
132
+ "Use a tool-capable model like:\n" +
133
+ " • google/gemini-2.0-flash-exp:free (free)\n" +
134
+ " • meta-llama/llama-3.3-70b-instruct:free (free)\n" +
135
+ " • anthropic/claude-3-5-sonnet (paid)\n" +
136
+ " • openai/gpt-4o (paid)\n" +
137
+ "Or use --disableTools flag. " +
138
+ "See all tool-capable models at https://openrouter.ai/models?supported_parameters=tools");
139
+ }
140
+ }
141
+ return new Error(`OpenRouter error: ${errorRecord?.message || "Unknown error"}`);
142
+ }
143
+ /**
144
+ * OpenRouter supports tools for compatible models
145
+ * Checks cached model capabilities or uses known patterns as fallback
146
+ */
147
+ supportsTools() {
148
+ const modelName = this.modelName || getDefaultOpenRouterModel();
149
+ // If we have cached capabilities, use them
150
+ if (OpenRouterProvider.capabilitiesCached) {
151
+ const supported = OpenRouterProvider.toolCapableModels.has(modelName);
152
+ logger.debug("OpenRouter: Tool support check (cached)", {
153
+ model: modelName,
154
+ supportsTools: supported,
155
+ });
156
+ return supported;
157
+ }
158
+ // Fallback: Known tool-capable model patterns (conservative list)
159
+ const knownToolCapablePatterns = [
160
+ "anthropic/claude",
161
+ "openai/gpt-4",
162
+ "openai/gpt-3.5",
163
+ "openai/o1",
164
+ "openai/o3",
165
+ "openai/o4",
166
+ "google/gemini",
167
+ "google/gemma-3",
168
+ "mistralai/mistral-large",
169
+ "mistralai/mistral-small",
170
+ "mistralai/devstral",
171
+ "meta-llama/llama-3.3",
172
+ "meta-llama/llama-3.2",
173
+ "qwen/qwen3",
174
+ "nvidia/nemotron",
175
+ ];
176
+ const isKnownCapable = knownToolCapablePatterns.some((pattern) => modelName.toLowerCase().includes(pattern.toLowerCase()));
177
+ if (isKnownCapable) {
178
+ logger.debug("OpenRouter: Tool support enabled (pattern match)", {
179
+ model: modelName,
180
+ });
181
+ return true;
182
+ }
183
+ // For unknown models, warn and disable tools (safe default)
184
+ logger.warn("OpenRouter: Unknown model tool capability, disabling tools", {
185
+ model: modelName,
186
+ suggestion: "Use a known tool-capable model like anthropic/claude-3-5-sonnet, openai/gpt-4o, or google/gemini-2.0-flash-exp:free",
187
+ });
188
+ return false;
189
+ }
190
+ /**
191
+ * Provider-specific streaming implementation
192
+ * Note: This is only used when tools are disabled
193
+ */
194
+ async executeStream(options, analysisSchema) {
195
+ this.validateStreamOptions(options);
196
+ const startTime = Date.now();
197
+ let chunkCount = 0; // Track chunk count for debugging
198
+ const timeout = this.getTimeout(options);
199
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
200
+ try {
201
+ // Build message array from options with multimodal support
202
+ // Using protected helper from BaseProvider to eliminate code duplication
203
+ const messages = await this.buildMessagesForStream(options);
204
+ const model = await this.getAISDKModelWithMiddleware(options);
205
+ // Get all available tools (direct + MCP + external) for streaming
206
+ const shouldUseTools = !options.disableTools && this.supportsTools();
207
+ const tools = shouldUseTools ? await this.getAllTools() : {};
208
+ logger.debug(`OpenRouter: Tools for streaming`, {
209
+ shouldUseTools,
210
+ toolCount: Object.keys(tools).length,
211
+ toolNames: Object.keys(tools),
212
+ });
213
+ // Build complete stream options with proper typing
214
+ // Note: maxRetries set to 0 for OpenRouter free tier to prevent SDK's quick retries
215
+ // from consuming rate limits. Our test suite handles retries with appropriate delays.
216
+ let streamOptions = {
217
+ model: model,
218
+ messages: messages,
219
+ temperature: options.temperature,
220
+ maxRetries: 0, // Disable SDK retries - let caller handle rate limit retries with delays
221
+ ...(options.maxTokens && { maxTokens: options.maxTokens }),
222
+ ...(shouldUseTools &&
223
+ Object.keys(tools).length > 0 && {
224
+ tools,
225
+ toolChoice: "auto",
226
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
227
+ }),
228
+ abortSignal: timeoutController?.controller.signal,
229
+ onError: (event) => {
230
+ const error = event.error;
231
+ const errorMessage = error instanceof Error ? error.message : String(error);
232
+ logger.error(`OpenRouter: Stream error`, {
233
+ provider: this.providerName,
234
+ modelName: this.modelName,
235
+ error: errorMessage,
236
+ chunkCount,
237
+ });
238
+ },
239
+ onFinish: (event) => {
240
+ logger.debug(`OpenRouter: Stream finished`, {
241
+ finishReason: event.finishReason,
242
+ totalChunks: chunkCount,
243
+ });
244
+ },
245
+ onChunk: () => {
246
+ chunkCount++;
247
+ },
248
+ onStepFinish: ({ toolCalls, toolResults }) => {
249
+ logger.info("Tool execution completed", { toolResults, toolCalls });
250
+ this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
251
+ logger.warn("OpenRouterProvider: Failed to store tool executions", {
252
+ provider: this.providerName,
253
+ error: error instanceof Error ? error.message : String(error),
254
+ });
255
+ });
256
+ },
257
+ };
258
+ // Add analysisSchema support if provided
259
+ if (analysisSchema) {
260
+ try {
261
+ streamOptions = {
262
+ ...streamOptions,
263
+ experimental_output: Output.object({
264
+ schema: analysisSchema,
265
+ }),
266
+ };
267
+ }
268
+ catch (error) {
269
+ logger.warn("Schema application failed, continuing without schema", {
270
+ error: String(error),
271
+ });
272
+ }
273
+ }
274
+ const result = await streamText(streamOptions);
275
+ timeoutController?.cleanup();
276
+ // Transform stream to content object stream using fullStream (handles both text and tool calls)
277
+ const transformedStream = (async function* () {
278
+ // Try fullStream first (handles both text and tool calls), fallback to textStream
279
+ const streamToUse = result.fullStream || result.textStream;
280
+ for await (const chunk of streamToUse) {
281
+ // Handle different chunk types from fullStream
282
+ if (chunk && typeof chunk === "object") {
283
+ // Check for error chunks first (critical error handling)
284
+ if ("type" in chunk && chunk.type === "error") {
285
+ const errorChunk = chunk;
286
+ logger.error(`OpenRouter: Error chunk received:`, {
287
+ errorType: errorChunk.type,
288
+ errorDetails: errorChunk.error,
289
+ });
290
+ throw new Error(`OpenRouter streaming error: ${errorChunk.error?.message ||
291
+ "Unknown error"}`);
292
+ }
293
+ if ("textDelta" in chunk) {
294
+ // Text delta from fullStream
295
+ const textDelta = chunk.textDelta;
296
+ if (textDelta) {
297
+ yield { content: textDelta };
298
+ }
299
+ }
300
+ else if (chunk.type === "tool-call-streaming-start") {
301
+ // Tool call streaming start event - log for debugging
302
+ const toolCall = chunk;
303
+ logger.debug("OpenRouter: Tool call streaming start", {
304
+ toolCallId: toolCall.toolCallId,
305
+ toolName: toolCall.toolName,
306
+ });
307
+ }
308
+ }
309
+ else if (typeof chunk === "string") {
310
+ // Direct string chunk from textStream fallback
311
+ yield { content: chunk };
312
+ }
313
+ }
314
+ })();
315
+ // Create analytics promise that resolves after stream completion
316
+ const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
317
+ requestId: `openrouter-stream-${Date.now()}`,
318
+ streamingMode: true,
319
+ });
320
+ return {
321
+ stream: transformedStream,
322
+ provider: this.providerName,
323
+ model: this.modelName,
324
+ analytics: analyticsPromise,
325
+ metadata: {
326
+ startTime,
327
+ streamId: `openrouter-${Date.now()}`,
328
+ },
329
+ };
330
+ }
331
+ catch (error) {
332
+ timeoutController?.cleanup();
333
+ throw this.handleProviderError(error);
334
+ }
335
+ }
336
+ /**
337
+ * Get available models from OpenRouter API
338
+ * Dynamically fetches from /api/v1/models endpoint with caching and fallback
339
+ */
340
+ async getAvailableModels() {
341
+ const functionTag = "OpenRouterProvider.getAvailableModels";
342
+ const now = Date.now();
343
+ // Check if cached models are still valid
344
+ if (OpenRouterProvider.modelsCache.length > 0 &&
345
+ now - OpenRouterProvider.modelsCacheTime <
346
+ OpenRouterProvider.MODELS_CACHE_DURATION) {
347
+ logger.debug(`[${functionTag}] Using cached models`, {
348
+ cacheAge: Math.round((now - OpenRouterProvider.modelsCacheTime) / 1000),
349
+ modelCount: OpenRouterProvider.modelsCache.length,
350
+ });
351
+ return OpenRouterProvider.modelsCache;
352
+ }
353
+ // Try to fetch models dynamically
354
+ try {
355
+ const dynamicModels = await this.fetchModelsFromAPI();
356
+ if (dynamicModels.length > 0) {
357
+ // Cache successful result
358
+ OpenRouterProvider.modelsCache = dynamicModels;
359
+ OpenRouterProvider.modelsCacheTime = now;
360
+ logger.debug(`[${functionTag}] Successfully fetched models from API`, {
361
+ modelCount: dynamicModels.length,
362
+ });
363
+ return dynamicModels;
364
+ }
365
+ }
366
+ catch (error) {
367
+ logger.warn(`[${functionTag}] Failed to fetch models from API, using fallback`, {
368
+ error: error instanceof Error ? error.message : String(error),
369
+ });
370
+ }
371
+ // Fallback to hardcoded list if API fetch fails
372
+ const fallbackModels = [
373
+ // Anthropic Claude models
374
+ "anthropic/claude-3-5-sonnet",
375
+ "anthropic/claude-3-5-haiku",
376
+ "anthropic/claude-3-opus",
377
+ // OpenAI models
378
+ "openai/gpt-4o",
379
+ "openai/gpt-4o-mini",
380
+ "openai/gpt-4-turbo",
381
+ // Google models
382
+ "google/gemini-2.0-flash",
383
+ "google/gemini-1.5-pro",
384
+ // Meta Llama models
385
+ "meta-llama/llama-3.1-70b-instruct",
386
+ "meta-llama/llama-3.1-8b-instruct",
387
+ // Mistral models
388
+ "mistralai/mistral-large",
389
+ "mistralai/mixtral-8x7b-instruct",
390
+ ];
391
+ logger.debug(`[${functionTag}] Using fallback model list`, {
392
+ modelCount: fallbackModels.length,
393
+ });
394
+ return fallbackModels;
395
+ }
396
+ /**
397
+ * Fetch available models from OpenRouter API /api/v1/models endpoint
398
+ * @private
399
+ */
400
+ async fetchModelsFromAPI() {
401
+ const functionTag = "OpenRouterProvider.fetchModelsFromAPI";
402
+ const config = getOpenRouterConfig();
403
+ const modelsUrl = "https://openrouter.ai/api/v1/models";
404
+ const controller = new AbortController();
405
+ const timeoutId = setTimeout(() => controller.abort(), MODELS_DISCOVERY_TIMEOUT_MS);
406
+ try {
407
+ logger.debug(`[${functionTag}] Fetching models from ${modelsUrl}`);
408
+ const proxyFetch = createProxyFetch();
409
+ const response = await proxyFetch(modelsUrl, {
410
+ method: "GET",
411
+ headers: {
412
+ Authorization: `Bearer ${config.apiKey}`,
413
+ "Content-Type": "application/json",
414
+ },
415
+ signal: controller.signal,
416
+ });
417
+ clearTimeout(timeoutId);
418
+ if (!response.ok) {
419
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
420
+ }
421
+ const data = await response.json();
422
+ // Parse OpenRouter models response with type guard
423
+ if (!this.isValidModelsResponse(data)) {
424
+ throw new Error("Invalid response format: expected data.data array");
425
+ }
426
+ const models = data.data
427
+ .map((model) => model.id)
428
+ .filter((id) => typeof id === "string" && id.length > 0)
429
+ .sort();
430
+ logger.debug(`[${functionTag}] Successfully parsed models`, {
431
+ totalModels: models.length,
432
+ sampleModels: models.slice(0, 5),
433
+ });
434
+ return models;
435
+ }
436
+ catch (error) {
437
+ clearTimeout(timeoutId);
438
+ if (error instanceof Error && error.name === "AbortError") {
439
+ throw new Error(`Request timed out after ${MODELS_DISCOVERY_TIMEOUT_MS / 1000} seconds`);
440
+ }
441
+ throw error;
442
+ }
443
+ }
444
+ /**
445
+ * Type guard to validate the models API response structure
446
+ * @private
447
+ */
448
+ isValidModelsResponse(data) {
449
+ return (data !== null &&
450
+ typeof data === "object" &&
451
+ "data" in data &&
452
+ Array.isArray(data.data));
453
+ }
454
+ /**
455
+ * Fetch and cache model capabilities from OpenRouter API
456
+ * Call this to enable accurate tool support detection
457
+ */
458
+ async cacheModelCapabilities() {
459
+ const functionTag = "OpenRouterProvider.cacheModelCapabilities";
460
+ if (OpenRouterProvider.capabilitiesCached) {
461
+ return; // Already cached
462
+ }
463
+ try {
464
+ const config = getOpenRouterConfig();
465
+ const modelsUrl = "https://openrouter.ai/api/v1/models";
466
+ const controller = new AbortController();
467
+ const timeoutId = setTimeout(() => controller.abort(), MODELS_DISCOVERY_TIMEOUT_MS);
468
+ const proxyFetch = createProxyFetch();
469
+ const response = await proxyFetch(modelsUrl, {
470
+ method: "GET",
471
+ headers: {
472
+ Authorization: `Bearer ${config.apiKey}`,
473
+ "Content-Type": "application/json",
474
+ },
475
+ signal: controller.signal,
476
+ });
477
+ clearTimeout(timeoutId);
478
+ if (!response.ok) {
479
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
480
+ }
481
+ const data = await response.json();
482
+ if (!this.isValidModelsResponse(data)) {
483
+ throw new Error("Invalid response format");
484
+ }
485
+ // Extract tool-capable models
486
+ const toolCapable = new Set();
487
+ for (const model of data.data) {
488
+ if (model.id && model.supported_parameters?.includes("tools")) {
489
+ toolCapable.add(model.id);
490
+ }
491
+ }
492
+ OpenRouterProvider.toolCapableModels = toolCapable;
493
+ OpenRouterProvider.capabilitiesCached = true;
494
+ logger.debug(`[${functionTag}] Cached model capabilities`, {
495
+ totalModels: data.data.length,
496
+ toolCapableCount: toolCapable.size,
497
+ });
498
+ }
499
+ catch (error) {
500
+ logger.warn(`[${functionTag}] Failed to cache capabilities, using fallback patterns`, {
501
+ error: error instanceof Error ? error.message : String(error),
502
+ });
503
+ // Don't set capabilitiesCached - let it use fallback patterns
504
+ }
505
+ }
506
+ }
507
+ //# sourceMappingURL=openRouter.js.map
@@ -11,7 +11,7 @@ export * from "./taskClassificationTypes.js";
11
11
  export type { NeuroLinkConfig, PerformanceConfig, CacheConfig, FallbackConfig, RetryConfig, AnalyticsConfig, ToolConfig, BackupInfo, BackupMetadata, ConfigValidationResult, ConfigUpdateOptions, } from "./configTypes.js";
12
12
  export type { Unknown, UnknownRecord, UnknownArray, JsonValue, JsonObject, JsonArray, ErrorInfo, Result, FunctionParameters, } from "./common.js";
13
13
  export type { ToolArgs, ToolContext, ToolResult, ToolDefinition, SimpleTool, AvailableTool, ToolInfo, ToolExecution, ToolExecutionResult, ValidationResult, ExecutionContext, CacheOptions, FallbackOptions, } from "./tools.js";
14
- export type { AISDKModel, ProviderError, AIModelProviderConfig, } from "./providers.js";
14
+ export type { AISDKModel, ProviderError, AIModelProviderConfig, OpenRouterConfig, OpenRouterModelInfo, OpenRouterModelsResponse, OpenRouterProviderCache, } from "./providers.js";
15
15
  export type { BaseCommandArgs, GenerateCommandArgs, MCPCommandArgs, ModelsCommandArgs, CommandResult, GenerateResult, StreamChunk, } from "./cli.js";
16
16
  export type { TaskType, TaskClassification, ClassificationScores, ClassificationStats, ClassificationValidation, } from "./taskClassificationTypes.js";
17
17
  export type { MCPTransportType, MCPServerConnectionStatus, MCPServerCategory, MCPServerStatus, MCPDiscoveredServer, MCPConnectedServer, MCPToolInfo, MCPExecutableTool, MCPServerMetadata, MCPToolMetadata, MCPServerRegistryEntry, NeuroLinkMCPTool, NeuroLinkMCPServer, NeuroLinkExecutionContext, MCPServerConfig, DiscoveredMcp, McpMetadata, ToolDiscoveryResult, ExternalToolExecutionOptions, ToolValidationResult, ToolRegistryEvents, CircuitBreakerState, CircuitBreakerConfig, CircuitBreakerStats, CircuitBreakerEvents, McpRegistry, MCPClientResult, FlexibleValidationResult, } from "./mcpTypes.js";
@@ -1063,3 +1063,49 @@ export declare namespace TelemetryTypes {
1063
1063
  recordException(exception: unknown): void;
1064
1064
  }
1065
1065
  }
1066
+ /**
1067
+ * OpenRouter provider configuration
1068
+ */
1069
+ export type OpenRouterConfig = {
1070
+ /** OpenRouter API key */
1071
+ apiKey: string;
1072
+ /** HTTP Referer header for attribution on openrouter.ai/activity */
1073
+ referer?: string;
1074
+ /** App name for X-Title header attribution */
1075
+ appName?: string;
1076
+ };
1077
+ /**
1078
+ * OpenRouter model information from /api/v1/models endpoint
1079
+ */
1080
+ export type OpenRouterModelInfo = {
1081
+ /** Model ID in format 'provider/model-name' */
1082
+ id: string;
1083
+ /** Supported parameters (e.g., 'tools', 'temperature') */
1084
+ supported_parameters?: string[];
1085
+ /** Model name */
1086
+ name?: string;
1087
+ /** Model description */
1088
+ description?: string;
1089
+ /** Pricing information */
1090
+ pricing?: {
1091
+ prompt?: string;
1092
+ completion?: string;
1093
+ };
1094
+ /** Context length */
1095
+ context_length?: number;
1096
+ };
1097
+ /**
1098
+ * OpenRouter models API response
1099
+ */
1100
+ export type OpenRouterModelsResponse = {
1101
+ data: OpenRouterModelInfo[];
1102
+ };
1103
+ /**
1104
+ * OpenRouter provider static cache properties (for testing/internal use)
1105
+ */
1106
+ export type OpenRouterProviderCache = {
1107
+ modelsCache: string[];
1108
+ modelsCacheTime: number;
1109
+ toolCapableModels: Set<string>;
1110
+ capabilitiesCached: boolean;
1111
+ };
@@ -107,6 +107,20 @@ const PDF_PROVIDER_CONFIGS = {
107
107
  requiresCitations: false,
108
108
  apiType: "files-api",
109
109
  },
110
+ openrouter: {
111
+ maxSizeMB: 10,
112
+ maxPages: 100,
113
+ supportsNative: true,
114
+ requiresCitations: false,
115
+ apiType: "files-api",
116
+ },
117
+ or: {
118
+ maxSizeMB: 10,
119
+ maxPages: 100,
120
+ supportsNative: true,
121
+ requiresCitations: false,
122
+ apiType: "files-api",
123
+ },
110
124
  };
111
125
  export class PDFProcessor {
112
126
  // PDF magic bytes: %PDF-
@@ -0,0 +1,56 @@
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import { type Schema, type LanguageModelV1 } from "ai";
3
+ import { AIProviderName } from "../constants/enums.js";
4
+ import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
+ import { BaseProvider } from "../core/baseProvider.js";
6
+ /**
7
+ * OpenRouter Provider - BaseProvider Implementation
8
+ * Provides access to 300+ models from 60+ providers via OpenRouter unified gateway
9
+ */
10
+ export declare class OpenRouterProvider extends BaseProvider {
11
+ private model;
12
+ private openRouterClient;
13
+ private static modelsCache;
14
+ private static modelsCacheTime;
15
+ private static readonly MODELS_CACHE_DURATION;
16
+ private static toolCapableModels;
17
+ private static capabilitiesCached;
18
+ constructor(modelName?: string, sdk?: unknown);
19
+ protected getProviderName(): AIProviderName;
20
+ protected getDefaultModel(): string;
21
+ /**
22
+ * Returns the Vercel AI SDK model instance for OpenRouter
23
+ */
24
+ protected getAISDKModel(): LanguageModelV1;
25
+ handleProviderError(error: unknown): Error;
26
+ /**
27
+ * OpenRouter supports tools for compatible models
28
+ * Checks cached model capabilities or uses known patterns as fallback
29
+ */
30
+ supportsTools(): boolean;
31
+ /**
32
+ * Provider-specific streaming implementation
33
+ * Note: This is only used when tools are disabled
34
+ */
35
+ protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
36
+ /**
37
+ * Get available models from OpenRouter API
38
+ * Dynamically fetches from /api/v1/models endpoint with caching and fallback
39
+ */
40
+ getAvailableModels(): Promise<string[]>;
41
+ /**
42
+ * Fetch available models from OpenRouter API /api/v1/models endpoint
43
+ * @private
44
+ */
45
+ private fetchModelsFromAPI;
46
+ /**
47
+ * Type guard to validate the models API response structure
48
+ * @private
49
+ */
50
+ private isValidModelsResponse;
51
+ /**
52
+ * Fetch and cache model capabilities from OpenRouter API
53
+ * Call this to enable accurate tool support detection
54
+ */
55
+ cacheModelCapabilities(): Promise<void>;
56
+ }