@juspay/neurolink 8.23.2 β†’ 8.25.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/README.md +6 -4
  3. package/dist/adapters/providerImageAdapter.js +37 -0
  4. package/dist/cli/commands/setup.js +50 -0
  5. package/dist/cli/factories/commandFactory.js +3 -0
  6. package/dist/constants/enums.d.ts +19 -0
  7. package/dist/constants/enums.js +25 -0
  8. package/dist/core/baseProvider.d.ts +2 -10
  9. package/dist/core/baseProvider.js +1 -37
  10. package/dist/core/modules/TelemetryHandler.d.ts +3 -2
  11. package/dist/core/modules/TelemetryHandler.js +4 -2
  12. package/dist/factories/providerRegistry.js +6 -1
  13. package/dist/lib/adapters/providerImageAdapter.js +37 -0
  14. package/dist/lib/constants/enums.d.ts +19 -0
  15. package/dist/lib/constants/enums.js +25 -0
  16. package/dist/lib/core/baseProvider.d.ts +2 -10
  17. package/dist/lib/core/baseProvider.js +1 -37
  18. package/dist/lib/core/modules/TelemetryHandler.d.ts +3 -2
  19. package/dist/lib/core/modules/TelemetryHandler.js +4 -2
  20. package/dist/lib/factories/providerRegistry.js +6 -1
  21. package/dist/lib/providers/anthropic.js +1 -1
  22. package/dist/lib/providers/azureOpenai.js +1 -1
  23. package/dist/lib/providers/googleAiStudio.js +1 -1
  24. package/dist/lib/providers/googleVertex.js +1 -1
  25. package/dist/lib/providers/openAI.js +1 -1
  26. package/dist/lib/providers/openRouter.d.ts +56 -0
  27. package/dist/lib/providers/openRouter.js +507 -0
  28. package/dist/lib/types/common.d.ts +1 -0
  29. package/dist/lib/types/index.d.ts +1 -1
  30. package/dist/lib/types/providers.d.ts +46 -0
  31. package/dist/lib/utils/pdfProcessor.js +14 -0
  32. package/dist/providers/anthropic.js +1 -1
  33. package/dist/providers/azureOpenai.js +1 -1
  34. package/dist/providers/googleAiStudio.js +1 -1
  35. package/dist/providers/googleVertex.js +1 -1
  36. package/dist/providers/openAI.js +1 -1
  37. package/dist/providers/openRouter.d.ts +56 -0
  38. package/dist/providers/openRouter.js +506 -0
  39. package/dist/types/common.d.ts +1 -0
  40. package/dist/types/index.d.ts +1 -1
  41. package/dist/types/providers.d.ts +46 -0
  42. package/dist/utils/pdfProcessor.js +14 -0
  43. package/package.json +4 -3
package/CHANGELOG.md CHANGED
@@ -1,3 +1,15 @@
1
+ ## [8.25.0](https://github.com/juspay/neurolink/compare/v8.24.0...v8.25.0) (2025-12-30)
2
+
3
+ ### Features
4
+
5
+ - **(observability):** Add support for custom metadata in Context ([b175249](https://github.com/juspay/neurolink/commit/b175249c61357b0e6d127932bd7824d0bfe6f2ed))
6
+
7
+ ## [8.24.0](https://github.com/juspay/neurolink/compare/v8.23.2...v8.24.0) (2025-12-28)
8
+
9
+ ### Features
10
+
11
+ - **(openrouter):** add OpenRouter provider with 300+ model support ([563611f](https://github.com/juspay/neurolink/commit/563611f84c154e2966aebb6e8a414fcb60a26fd3)), closes [#608](https://github.com/juspay/neurolink/issues/608)
12
+
1
13
  ## [8.23.2](https://github.com/juspay/neurolink/compare/v8.23.1...v8.23.2) (2025-12-27)
2
14
 
3
15
  ### Bug Fixes
package/README.md CHANGED
@@ -11,9 +11,9 @@ Enterprise AI development platform with unified provider access, production-read
11
11
 
12
12
  ## 🧠 What is NeuroLink?
13
13
 
14
- **NeuroLink is the universal AI integration platform that unifies 12 major AI providers and 100+ models under one consistent API.**
14
+ **NeuroLink is the universal AI integration platform that unifies 13 major AI providers and 100+ models under one consistent API.**
15
15
 
16
- Extracted from production systems at Juspay and battle-tested at enterprise scale, NeuroLink provides a production-ready solution for integrating AI into any application. Whether you're building with OpenAI, Anthropic, Google, AWS Bedrock, Azure, or any of our 12 supported providers, NeuroLink gives you a single, consistent interface that works everywhere.
16
+ Extracted from production systems at Juspay and battle-tested at enterprise scale, NeuroLink provides a production-ready solution for integrating AI into any application. Whether you're building with OpenAI, Anthropic, Google, AWS Bedrock, Azure, or any of our 13 supported providers, NeuroLink gives you a single, consistent interface that works everywhere.
17
17
 
18
18
  **Why NeuroLink?** Switch providers with a single parameter change, leverage 64+ built-in tools and MCP servers, deploy with confidence using enterprise features like Redis memory and multi-provider failover, and optimize costs automatically with intelligent routing. Use it via our professional CLI or TypeScript SDKβ€”whichever fits your workflow.
19
19
 
@@ -30,6 +30,7 @@ Extracted from production systems at Juspay and battle-tested at enterprise scal
30
30
  - **PDF File Support** – Process PDF documents with native visual analysis for Vertex AI, Anthropic, Bedrock, AI Studio. β†’ [PDF Guide](docs/features/pdf-support.md)
31
31
  - **LiteLLM Integration** – Access 100+ AI models from all major providers through unified interface. β†’ [Setup Guide](docs/LITELLM-INTEGRATION.md)
32
32
  - **SageMaker Integration** – Deploy and use custom trained models on AWS infrastructure. β†’ [Setup Guide](docs/SAGEMAKER-INTEGRATION.md)
33
+ - **OpenRouter Integration** – Access 300+ models from OpenAI, Anthropic, Google, Meta, and more through a single unified API. β†’ [Setup Guide](docs/getting-started/providers/openrouter.md)
33
34
  - **Human-in-the-loop workflows** – Pause generation for user approval/input before tool execution. β†’ [HITL Guide](docs/features/hitl.md)
34
35
  - **Guardrails middleware** – Block PII, profanity, and unsafe content with built-in filtering. β†’ [Guardrails Guide](docs/features/guardrails.md)
35
36
  - **Context summarization** – Automatic conversation compression for long-running sessions. β†’ [Summarization Guide](docs/CONTEXT-SUMMARIZATION.md)
@@ -55,7 +56,7 @@ NeuroLink is a comprehensive AI development platform. Every feature below is pro
55
56
 
56
57
  ### πŸ€– AI Provider Integration
57
58
 
58
- **12 providers unified under one API** - Switch providers with a single parameter change.
59
+ **13 providers unified under one API** - Switch providers with a single parameter change.
59
60
 
60
61
  | Provider | Models | Free Tier | Tool Support | Status | Documentation |
61
62
  | --------------------- | ------------------------------ | --------------- | ------------ | ------------- | ----------------------------------------------------------------------- |
@@ -66,6 +67,7 @@ NeuroLink is a comprehensive AI development platform. Every feature below is pro
66
67
  | **Google Vertex** | Gemini via GCP | ❌ | βœ… Full | βœ… Production | [Setup Guide](docs/getting-started/provider-setup.md#vertex) |
67
68
  | **Azure OpenAI** | GPT-4, GPT-4o, o1 | ❌ | βœ… Full | βœ… Production | [Setup Guide](docs/getting-started/provider-setup.md#azure) |
68
69
  | **LiteLLM** | 100+ models unified | Varies | βœ… Full | βœ… Production | [Setup Guide](docs/LITELLM-INTEGRATION.md) |
70
+ | **OpenRouter** | 300+ models unified | Varies | βœ… Full | βœ… Production | [Setup Guide](docs/getting-started/provider-setup.md#openrouter) |
69
71
  | **AWS SageMaker** | Custom deployed models | ❌ | βœ… Full | βœ… Production | [Setup Guide](docs/SAGEMAKER-INTEGRATION.md) |
70
72
  | **Mistral AI** | Mistral Large, Small | βœ… Free Tier | βœ… Full | βœ… Production | [Setup Guide](docs/getting-started/provider-setup.md#mistral) |
71
73
  | **Hugging Face** | 100,000+ models | βœ… Free | ⚠️ Partial | βœ… Production | [Setup Guide](docs/getting-started/provider-setup.md#huggingface) |
@@ -297,7 +299,7 @@ Full command and API breakdown lives in [`docs/cli/commands.md`](docs/cli/comman
297
299
 
298
300
  | Capability | Highlights |
299
301
  | ------------------------ | ------------------------------------------------------------------------------------------------------------------------ |
300
- | **Provider unification** | 12+ providers with automatic fallback, cost-aware routing, provider orchestration (Q3). |
302
+ | **Provider unification** | 13+ providers with automatic fallback, cost-aware routing, provider orchestration (Q3). |
301
303
  | **Multimodal pipeline** | Stream images + CSV data + PDF documents across providers with local/remote assets. Auto-detection for mixed file types. |
302
304
  | **Quality & governance** | Auto-evaluation engine (Q3), guardrails middleware (Q4), HITL workflows (Q4), audit logging. |
303
305
  | **Memory & context** | Conversation memory, Mem0 integration, Redis history export (Q4), context summarization (Q4). |
@@ -37,6 +37,7 @@ const IMAGE_LIMITS = {
37
37
  mistral: 10, // Conservative limit for Mistral
38
38
  // Note: Bedrock limit defined for future use when vision support is added
39
39
  bedrock: 20, // Same as Anthropic for Claude models on Bedrock
40
+ openrouter: 10, // Conservative limit, routes to various underlying providers
40
41
  };
41
42
  /**
42
43
  * Vision capability definitions for each provider
@@ -231,6 +232,37 @@ const VISION_CAPABILITIES = {
231
232
  // Groq models via LiteLLM (vision)
232
233
  "groq/llama-3.2-11b-vision-preview",
233
234
  ],
235
+ openrouter: [
236
+ // OpenRouter provides access to vision-capable models from multiple providers
237
+ // Anthropic Claude models (via OpenRouter)
238
+ "anthropic/claude-3-5-sonnet",
239
+ "anthropic/claude-3-5-haiku",
240
+ "anthropic/claude-3-opus",
241
+ "anthropic/claude-3-sonnet",
242
+ "anthropic/claude-3-haiku",
243
+ // OpenAI models (via OpenRouter)
244
+ "openai/gpt-4o",
245
+ "openai/gpt-4o-mini",
246
+ "openai/gpt-4-turbo",
247
+ "openai/gpt-4-vision-preview",
248
+ // Google models (via OpenRouter)
249
+ "google/gemini-2.5-pro",
250
+ "google/gemini-2.5-flash",
251
+ "google/gemini-2.0-flash",
252
+ "google/gemini-2.0-flash-001",
253
+ "google/gemini-1.5-pro",
254
+ "google/gemini-1.5-flash",
255
+ "google/gemini-pro-vision",
256
+ // Meta Llama models (vision-capable via OpenRouter)
257
+ "meta-llama/llama-3.2-90b-vision-instruct",
258
+ "meta-llama/llama-3.2-11b-vision-instruct",
259
+ // Pixtral/Mistral models (via OpenRouter)
260
+ "mistralai/pixtral-12b",
261
+ "mistralai/pixtral-large",
262
+ // Qwen models (via OpenRouter)
263
+ "qwen/qwen-2-vl-72b-instruct",
264
+ "qwen/qwen-2-vl-7b-instruct",
265
+ ],
234
266
  mistral: [
235
267
  // Mistral Large (latest has vision via Pixtral integration)
236
268
  "mistral-large-latest",
@@ -430,6 +462,11 @@ export class ProviderImageAdapter {
430
462
  this.validateImageCount(images.length, "bedrock");
431
463
  adaptedPayload = this.formatForAnthropic(text, images, true);
432
464
  break;
465
+ case "openrouter":
466
+ // OpenRouter routes to underlying providers, use OpenAI format
467
+ this.validateImageCount(images.length, "openrouter");
468
+ adaptedPayload = this.formatForOpenAI(text, images);
469
+ break;
433
470
  default:
434
471
  throw new Error(`Vision not supported for provider: ${provider}`);
435
472
  }
@@ -125,6 +125,19 @@ const PROVIDERS = [
125
125
  pricing: "Free tier β†’ €7 per 1M tokens",
126
126
  setupCommand: "neurolink setup --provider mistral",
127
127
  },
128
+ {
129
+ id: "openrouter",
130
+ name: "OpenRouter",
131
+ emoji: "πŸ”€",
132
+ description: "100+ models, unified API access",
133
+ setupTime: "2 min",
134
+ cost: "Pay-per-use",
135
+ bestFor: "Model variety",
136
+ models: "GPT-4, Claude, Gemini, Llama, Mixtral, etc.",
137
+ strengths: "Access 100+ models via single API, model routing, fallbacks",
138
+ pricing: "Per-model pricing, free tier available",
139
+ setupCommand: "neurolink setup --provider openrouter",
140
+ },
128
141
  ];
129
142
  /**
130
143
  * Main setup command handler
@@ -270,6 +283,9 @@ async function checkExistingConfigurations() {
270
283
  if (process.env.MISTRAL_API_KEY) {
271
284
  configured.push("mistral");
272
285
  }
286
+ if (process.env.OPENROUTER_API_KEY) {
287
+ configured.push("openrouter");
288
+ }
273
289
  return configured;
274
290
  }
275
291
  /**
@@ -381,12 +397,46 @@ async function delegateToProviderSetup(providerId) {
381
397
  case "mistral":
382
398
  await handleMistralSetup(setupArgs);
383
399
  break;
400
+ case "openrouter":
401
+ await handleOpenRouterSetup();
402
+ break;
384
403
  default:
385
404
  throw new Error(`Unknown provider: ${providerId}`);
386
405
  }
387
406
  // After successful setup, show completion message
388
407
  await showSetupCompletion(providerId);
389
408
  }
409
+ /**
410
+ * Handle OpenRouter provider setup
411
+ */
412
+ async function handleOpenRouterSetup() {
413
+ logger.always("");
414
+ logger.always(chalk.blue("πŸ”€ OpenRouter Setup"));
415
+ logger.always("");
416
+ logger.always("OpenRouter provides unified access to 100+ AI models from multiple providers");
417
+ logger.always("including OpenAI, Anthropic, Google, Meta, Mistral, and more.");
418
+ logger.always("");
419
+ logger.always(chalk.yellow("Step 1: Get your API key"));
420
+ logger.always(" 1. Go to https://openrouter.ai/keys");
421
+ logger.always(" 2. Sign in or create an account");
422
+ logger.always(" 3. Create a new API key");
423
+ logger.always("");
424
+ logger.always(chalk.yellow("Step 2: Set the environment variable"));
425
+ logger.always(" Add to your shell profile (~/.bashrc, ~/.zshrc, etc.):");
426
+ logger.always("");
427
+ logger.always(chalk.cyan(" export OPENROUTER_API_KEY=your_api_key_here"));
428
+ logger.always("");
429
+ logger.always(chalk.yellow("Step 3: Test the configuration"));
430
+ logger.always(chalk.cyan(' neurolink generate "Hello!" --provider openrouter --model google/gemini-2.0-flash-exp:free'));
431
+ logger.always("");
432
+ logger.always(chalk.green("Available models include:"));
433
+ logger.always(" β€’ anthropic/claude-3.5-sonnet - Best for analysis");
434
+ logger.always(" β€’ openai/gpt-4o - Industry standard");
435
+ logger.always(" β€’ google/gemini-2.0-flash-exp:free - Free tier");
436
+ logger.always(" β€’ meta-llama/llama-3.1-70b-instruct - Open source");
437
+ logger.always("");
438
+ logger.always(chalk.gray("See all models at: https://openrouter.ai/models"));
439
+ }
390
440
  /**
391
441
  * Show setup completion message
392
442
  */
@@ -29,6 +29,8 @@ export class CLICommandFactory {
29
29
  "auto",
30
30
  "openai",
31
31
  "openai-compatible",
32
+ "openrouter",
33
+ "or",
32
34
  "bedrock",
33
35
  "vertex",
34
36
  "googleVertex",
@@ -797,6 +799,7 @@ export class CLICommandFactory {
797
799
  choices: [
798
800
  "google-ai",
799
801
  "openai",
802
+ "openrouter",
800
803
  "anthropic",
801
804
  "azure",
802
805
  "bedrock",
@@ -5,6 +5,7 @@ export declare enum AIProviderName {
5
5
  BEDROCK = "bedrock",
6
6
  OPENAI = "openai",
7
7
  OPENAI_COMPATIBLE = "openai-compatible",
8
+ OPENROUTER = "openrouter",
8
9
  VERTEX = "vertex",
9
10
  ANTHROPIC = "anthropic",
10
11
  AZURE = "azure",
@@ -16,6 +17,24 @@ export declare enum AIProviderName {
16
17
  SAGEMAKER = "sagemaker",
17
18
  AUTO = "auto"
18
19
  }
20
+ /**
21
+ * Popular Models for OpenRouter (300+ available at openrouter.ai/models)
22
+ * OpenRouter uses 'provider/model' format
23
+ */
24
+ export declare enum OpenRouterModels {
25
+ CLAUDE_3_5_SONNET = "anthropic/claude-3-5-sonnet",
26
+ CLAUDE_3_5_HAIKU = "anthropic/claude-3-5-haiku",
27
+ CLAUDE_3_OPUS = "anthropic/claude-3-opus",
28
+ GPT_4O = "openai/gpt-4o",
29
+ GPT_4O_MINI = "openai/gpt-4o-mini",
30
+ GPT_4_TURBO = "openai/gpt-4-turbo",
31
+ GEMINI_2_0_FLASH = "google/gemini-2.0-flash",
32
+ GEMINI_1_5_PRO = "google/gemini-1.5-pro",
33
+ LLAMA_3_1_70B = "meta-llama/llama-3.1-70b-instruct",
34
+ LLAMA_3_1_8B = "meta-llama/llama-3.1-8b-instruct",
35
+ MISTRAL_LARGE = "mistralai/mistral-large",
36
+ MIXTRAL_8X7B = "mistralai/mixtral-8x7b-instruct"
37
+ }
19
38
  /**
20
39
  * Supported Models for Amazon Bedrock
21
40
  */
@@ -9,6 +9,7 @@ export var AIProviderName;
9
9
  AIProviderName["BEDROCK"] = "bedrock";
10
10
  AIProviderName["OPENAI"] = "openai";
11
11
  AIProviderName["OPENAI_COMPATIBLE"] = "openai-compatible";
12
+ AIProviderName["OPENROUTER"] = "openrouter";
12
13
  AIProviderName["VERTEX"] = "vertex";
13
14
  AIProviderName["ANTHROPIC"] = "anthropic";
14
15
  AIProviderName["AZURE"] = "azure";
@@ -20,6 +21,30 @@ export var AIProviderName;
20
21
  AIProviderName["SAGEMAKER"] = "sagemaker";
21
22
  AIProviderName["AUTO"] = "auto";
22
23
  })(AIProviderName || (AIProviderName = {}));
24
+ /**
25
+ * Popular Models for OpenRouter (300+ available at openrouter.ai/models)
26
+ * OpenRouter uses 'provider/model' format
27
+ */
28
+ export var OpenRouterModels;
29
+ (function (OpenRouterModels) {
30
+ // Anthropic Claude models
31
+ OpenRouterModels["CLAUDE_3_5_SONNET"] = "anthropic/claude-3-5-sonnet";
32
+ OpenRouterModels["CLAUDE_3_5_HAIKU"] = "anthropic/claude-3-5-haiku";
33
+ OpenRouterModels["CLAUDE_3_OPUS"] = "anthropic/claude-3-opus";
34
+ // OpenAI models
35
+ OpenRouterModels["GPT_4O"] = "openai/gpt-4o";
36
+ OpenRouterModels["GPT_4O_MINI"] = "openai/gpt-4o-mini";
37
+ OpenRouterModels["GPT_4_TURBO"] = "openai/gpt-4-turbo";
38
+ // Google models
39
+ OpenRouterModels["GEMINI_2_0_FLASH"] = "google/gemini-2.0-flash";
40
+ OpenRouterModels["GEMINI_1_5_PRO"] = "google/gemini-1.5-pro";
41
+ // Meta Llama models
42
+ OpenRouterModels["LLAMA_3_1_70B"] = "meta-llama/llama-3.1-70b-instruct";
43
+ OpenRouterModels["LLAMA_3_1_8B"] = "meta-llama/llama-3.1-8b-instruct";
44
+ // Mistral models
45
+ OpenRouterModels["MISTRAL_LARGE"] = "mistralai/mistral-large";
46
+ OpenRouterModels["MIXTRAL_8X7B"] = "mistralai/mixtral-8x7b-instruct";
47
+ })(OpenRouterModels || (OpenRouterModels = {}));
23
48
  /**
24
49
  * Supported Models for Amazon Bedrock
25
50
  */
@@ -7,6 +7,7 @@ import type { MiddlewareFactoryOptions } from "../types/middlewareTypes.js";
7
7
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
8
8
  import type { UnknownRecord } from "../types/common.js";
9
9
  import type { NeuroLink } from "../neurolink.js";
10
+ import { TelemetryHandler } from "./modules/TelemetryHandler.js";
10
11
  /**
11
12
  * Abstract base class for all AI providers
12
13
  * Tools are integrated as first-class citizens - always available by default
@@ -26,7 +27,7 @@ export declare abstract class BaseProvider implements AIProvider {
26
27
  private readonly messageBuilder;
27
28
  private readonly streamHandler;
28
29
  private readonly generationHandler;
29
- private readonly telemetryHandler;
30
+ protected readonly telemetryHandler: TelemetryHandler;
30
31
  private readonly utilities;
31
32
  private readonly toolsManager;
32
33
  constructor(modelName?: string, providerName?: AIProviderName, neurolink?: NeuroLink, middleware?: MiddlewareFactoryOptions);
@@ -260,13 +261,4 @@ export declare abstract class BaseProvider implements AIProvider {
260
261
  * @returns Array of prompt chunks
261
262
  */
262
263
  static chunkPrompt(prompt: string, maxChunkSize?: number, overlap?: number): string[];
263
- /**
264
- * Create telemetry configuration for Vercel AI SDK experimental_telemetry
265
- * This enables automatic OpenTelemetry tracing when telemetry is enabled
266
- */
267
- protected getStreamTelemetryConfig(options: StreamOptions | TextGenerationOptions, operationType?: "stream" | "generate"): {
268
- isEnabled: boolean;
269
- functionId?: string;
270
- metadata?: Record<string, string | number | boolean>;
271
- } | undefined;
272
264
  }
@@ -48,7 +48,7 @@ export class BaseProvider {
48
48
  // Initialize composition modules
49
49
  this.messageBuilder = new MessageBuilder(this.providerName, this.modelName);
50
50
  this.streamHandler = new StreamHandler(this.providerName, this.modelName);
51
- this.generationHandler = new GenerationHandler(this.providerName, this.modelName, () => this.supportsTools(), (options, type) => this.getStreamTelemetryConfig(options, type), (toolCalls, toolResults, options, timestamp) => this.handleToolExecutionStorage(toolCalls, toolResults, options, timestamp));
51
+ this.generationHandler = new GenerationHandler(this.providerName, this.modelName, () => this.supportsTools(), (options, type) => this.telemetryHandler.getTelemetryConfig(options, type), (toolCalls, toolResults, options, timestamp) => this.handleToolExecutionStorage(toolCalls, toolResults, options, timestamp));
52
52
  this.telemetryHandler = new TelemetryHandler(this.providerName, this.modelName, this.neurolink);
53
53
  this.utilities = new Utilities(this.providerName, this.modelName, this.defaultTimeout, this.middlewareOptions);
54
54
  this.toolsManager = new ToolsManager(this.providerName, this.directTools, this.neurolink, {
@@ -726,40 +726,4 @@ export class BaseProvider {
726
726
  }
727
727
  return chunks;
728
728
  }
729
- /**
730
- * Create telemetry configuration for Vercel AI SDK experimental_telemetry
731
- * This enables automatic OpenTelemetry tracing when telemetry is enabled
732
- */
733
- getStreamTelemetryConfig(options, operationType = "stream") {
734
- // Check if telemetry is enabled via NeuroLink observability config
735
- if (!this.neurolink?.isTelemetryEnabled()) {
736
- return undefined;
737
- }
738
- const context = options.context;
739
- const traceName = context?.traceName;
740
- const userId = context?.userId;
741
- const functionId = traceName ? traceName : userId ? userId : "guest";
742
- const metadata = {
743
- provider: this.providerName,
744
- model: this.modelName,
745
- toolsEnabled: !options.disableTools,
746
- neurolink: true,
747
- operationType,
748
- originalProvider: this.providerName,
749
- };
750
- // Add sessionId if available
751
- if ("sessionId" in options && options.sessionId) {
752
- const sessionId = options.sessionId;
753
- if (typeof sessionId === "string" ||
754
- typeof sessionId === "number" ||
755
- typeof sessionId === "boolean") {
756
- metadata.sessionId = sessionId;
757
- }
758
- }
759
- return {
760
- isEnabled: true,
761
- functionId,
762
- metadata,
763
- };
764
- }
765
729
  }
@@ -50,9 +50,10 @@ export declare class TelemetryHandler {
50
50
  totalTokens?: number;
51
51
  }): Promise<number>;
52
52
  /**
53
- * Get telemetry configuration for streaming/generation
53
+ * Create telemetry configuration for Vercel AI SDK experimental_telemetry
54
+ * This enables automatic OpenTelemetry tracing when telemetry is enabled
54
55
  */
55
- getStreamTelemetryConfig(options: StreamOptions | TextGenerationOptions, operationType?: "stream" | "generate"): {
56
+ getTelemetryConfig(options: StreamOptions | TextGenerationOptions, operationType?: "stream" | "generate"): {
56
57
  isEnabled: boolean;
57
58
  functionId?: string;
58
59
  metadata?: Record<string, string | number | boolean>;
@@ -113,9 +113,10 @@ export class TelemetryHandler {
113
113
  }
114
114
  }
115
115
  /**
116
- * Get telemetry configuration for streaming/generation
116
+ * Create telemetry configuration for Vercel AI SDK experimental_telemetry
117
+ * This enables automatic OpenTelemetry tracing when telemetry is enabled
117
118
  */
118
- getStreamTelemetryConfig(options, operationType = "stream") {
119
+ getTelemetryConfig(options, operationType = "stream") {
119
120
  // Check if telemetry is enabled via NeuroLink observability config
120
121
  if (!this.neurolink?.isTelemetryEnabled()) {
121
122
  return undefined;
@@ -125,6 +126,7 @@ export class TelemetryHandler {
125
126
  const userId = context?.userId;
126
127
  const functionId = traceName ? traceName : userId ? userId : "guest";
127
128
  const metadata = {
129
+ ...(context?.metadata || {}),
128
130
  provider: this.providerName,
129
131
  model: this.modelName,
130
132
  toolsEnabled: !options.disableTools,
@@ -81,7 +81,12 @@ export class ProviderRegistry {
81
81
  const { OpenAICompatibleProvider } = await import("../providers/openaiCompatible.js");
82
82
  return new OpenAICompatibleProvider(modelName, sdk);
83
83
  }, process.env.OPENAI_COMPATIBLE_MODEL || undefined, // Enable auto-discovery when no model specified
84
- ["openai-compatible", "openrouter", "vllm", "compatible"]);
84
+ ["openai-compatible", "vllm", "compatible"]);
85
+ // Register OpenRouter provider (300+ models from 60+ providers)
86
+ ProviderFactory.registerProvider(AIProviderName.OPENROUTER, async (modelName, _providerName, sdk) => {
87
+ const { OpenRouterProvider } = await import("../providers/openRouter.js");
88
+ return new OpenRouterProvider(modelName, sdk);
89
+ }, process.env.OPENROUTER_MODEL || "anthropic/claude-3-5-sonnet", ["openrouter", "or"]);
85
90
  // Register Amazon SageMaker provider
86
91
  ProviderFactory.registerProvider(AIProviderName.SAGEMAKER, async (modelName, _providerName, _sdk, region) => {
87
92
  const { AmazonSageMakerProvider } = await import("../providers/amazonSagemaker.js");
@@ -37,6 +37,7 @@ const IMAGE_LIMITS = {
37
37
  mistral: 10, // Conservative limit for Mistral
38
38
  // Note: Bedrock limit defined for future use when vision support is added
39
39
  bedrock: 20, // Same as Anthropic for Claude models on Bedrock
40
+ openrouter: 10, // Conservative limit, routes to various underlying providers
40
41
  };
41
42
  /**
42
43
  * Vision capability definitions for each provider
@@ -231,6 +232,37 @@ const VISION_CAPABILITIES = {
231
232
  // Groq models via LiteLLM (vision)
232
233
  "groq/llama-3.2-11b-vision-preview",
233
234
  ],
235
+ openrouter: [
236
+ // OpenRouter provides access to vision-capable models from multiple providers
237
+ // Anthropic Claude models (via OpenRouter)
238
+ "anthropic/claude-3-5-sonnet",
239
+ "anthropic/claude-3-5-haiku",
240
+ "anthropic/claude-3-opus",
241
+ "anthropic/claude-3-sonnet",
242
+ "anthropic/claude-3-haiku",
243
+ // OpenAI models (via OpenRouter)
244
+ "openai/gpt-4o",
245
+ "openai/gpt-4o-mini",
246
+ "openai/gpt-4-turbo",
247
+ "openai/gpt-4-vision-preview",
248
+ // Google models (via OpenRouter)
249
+ "google/gemini-2.5-pro",
250
+ "google/gemini-2.5-flash",
251
+ "google/gemini-2.0-flash",
252
+ "google/gemini-2.0-flash-001",
253
+ "google/gemini-1.5-pro",
254
+ "google/gemini-1.5-flash",
255
+ "google/gemini-pro-vision",
256
+ // Meta Llama models (vision-capable via OpenRouter)
257
+ "meta-llama/llama-3.2-90b-vision-instruct",
258
+ "meta-llama/llama-3.2-11b-vision-instruct",
259
+ // Pixtral/Mistral models (via OpenRouter)
260
+ "mistralai/pixtral-12b",
261
+ "mistralai/pixtral-large",
262
+ // Qwen models (via OpenRouter)
263
+ "qwen/qwen-2-vl-72b-instruct",
264
+ "qwen/qwen-2-vl-7b-instruct",
265
+ ],
234
266
  mistral: [
235
267
  // Mistral Large (latest has vision via Pixtral integration)
236
268
  "mistral-large-latest",
@@ -430,6 +462,11 @@ export class ProviderImageAdapter {
430
462
  this.validateImageCount(images.length, "bedrock");
431
463
  adaptedPayload = this.formatForAnthropic(text, images, true);
432
464
  break;
465
+ case "openrouter":
466
+ // OpenRouter routes to underlying providers, use OpenAI format
467
+ this.validateImageCount(images.length, "openrouter");
468
+ adaptedPayload = this.formatForOpenAI(text, images);
469
+ break;
433
470
  default:
434
471
  throw new Error(`Vision not supported for provider: ${provider}`);
435
472
  }
@@ -5,6 +5,7 @@ export declare enum AIProviderName {
5
5
  BEDROCK = "bedrock",
6
6
  OPENAI = "openai",
7
7
  OPENAI_COMPATIBLE = "openai-compatible",
8
+ OPENROUTER = "openrouter",
8
9
  VERTEX = "vertex",
9
10
  ANTHROPIC = "anthropic",
10
11
  AZURE = "azure",
@@ -16,6 +17,24 @@ export declare enum AIProviderName {
16
17
  SAGEMAKER = "sagemaker",
17
18
  AUTO = "auto"
18
19
  }
20
+ /**
21
+ * Popular Models for OpenRouter (300+ available at openrouter.ai/models)
22
+ * OpenRouter uses 'provider/model' format
23
+ */
24
+ export declare enum OpenRouterModels {
25
+ CLAUDE_3_5_SONNET = "anthropic/claude-3-5-sonnet",
26
+ CLAUDE_3_5_HAIKU = "anthropic/claude-3-5-haiku",
27
+ CLAUDE_3_OPUS = "anthropic/claude-3-opus",
28
+ GPT_4O = "openai/gpt-4o",
29
+ GPT_4O_MINI = "openai/gpt-4o-mini",
30
+ GPT_4_TURBO = "openai/gpt-4-turbo",
31
+ GEMINI_2_0_FLASH = "google/gemini-2.0-flash",
32
+ GEMINI_1_5_PRO = "google/gemini-1.5-pro",
33
+ LLAMA_3_1_70B = "meta-llama/llama-3.1-70b-instruct",
34
+ LLAMA_3_1_8B = "meta-llama/llama-3.1-8b-instruct",
35
+ MISTRAL_LARGE = "mistralai/mistral-large",
36
+ MIXTRAL_8X7B = "mistralai/mixtral-8x7b-instruct"
37
+ }
19
38
  /**
20
39
  * Supported Models for Amazon Bedrock
21
40
  */
@@ -9,6 +9,7 @@ export var AIProviderName;
9
9
  AIProviderName["BEDROCK"] = "bedrock";
10
10
  AIProviderName["OPENAI"] = "openai";
11
11
  AIProviderName["OPENAI_COMPATIBLE"] = "openai-compatible";
12
+ AIProviderName["OPENROUTER"] = "openrouter";
12
13
  AIProviderName["VERTEX"] = "vertex";
13
14
  AIProviderName["ANTHROPIC"] = "anthropic";
14
15
  AIProviderName["AZURE"] = "azure";
@@ -20,6 +21,30 @@ export var AIProviderName;
20
21
  AIProviderName["SAGEMAKER"] = "sagemaker";
21
22
  AIProviderName["AUTO"] = "auto";
22
23
  })(AIProviderName || (AIProviderName = {}));
24
+ /**
25
+ * Popular Models for OpenRouter (300+ available at openrouter.ai/models)
26
+ * OpenRouter uses 'provider/model' format
27
+ */
28
+ export var OpenRouterModels;
29
+ (function (OpenRouterModels) {
30
+ // Anthropic Claude models
31
+ OpenRouterModels["CLAUDE_3_5_SONNET"] = "anthropic/claude-3-5-sonnet";
32
+ OpenRouterModels["CLAUDE_3_5_HAIKU"] = "anthropic/claude-3-5-haiku";
33
+ OpenRouterModels["CLAUDE_3_OPUS"] = "anthropic/claude-3-opus";
34
+ // OpenAI models
35
+ OpenRouterModels["GPT_4O"] = "openai/gpt-4o";
36
+ OpenRouterModels["GPT_4O_MINI"] = "openai/gpt-4o-mini";
37
+ OpenRouterModels["GPT_4_TURBO"] = "openai/gpt-4-turbo";
38
+ // Google models
39
+ OpenRouterModels["GEMINI_2_0_FLASH"] = "google/gemini-2.0-flash";
40
+ OpenRouterModels["GEMINI_1_5_PRO"] = "google/gemini-1.5-pro";
41
+ // Meta Llama models
42
+ OpenRouterModels["LLAMA_3_1_70B"] = "meta-llama/llama-3.1-70b-instruct";
43
+ OpenRouterModels["LLAMA_3_1_8B"] = "meta-llama/llama-3.1-8b-instruct";
44
+ // Mistral models
45
+ OpenRouterModels["MISTRAL_LARGE"] = "mistralai/mistral-large";
46
+ OpenRouterModels["MIXTRAL_8X7B"] = "mistralai/mixtral-8x7b-instruct";
47
+ })(OpenRouterModels || (OpenRouterModels = {}));
23
48
  /**
24
49
  * Supported Models for Amazon Bedrock
25
50
  */
@@ -7,6 +7,7 @@ import type { MiddlewareFactoryOptions } from "../types/middlewareTypes.js";
7
7
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
8
8
  import type { UnknownRecord } from "../types/common.js";
9
9
  import type { NeuroLink } from "../neurolink.js";
10
+ import { TelemetryHandler } from "./modules/TelemetryHandler.js";
10
11
  /**
11
12
  * Abstract base class for all AI providers
12
13
  * Tools are integrated as first-class citizens - always available by default
@@ -26,7 +27,7 @@ export declare abstract class BaseProvider implements AIProvider {
26
27
  private readonly messageBuilder;
27
28
  private readonly streamHandler;
28
29
  private readonly generationHandler;
29
- private readonly telemetryHandler;
30
+ protected readonly telemetryHandler: TelemetryHandler;
30
31
  private readonly utilities;
31
32
  private readonly toolsManager;
32
33
  constructor(modelName?: string, providerName?: AIProviderName, neurolink?: NeuroLink, middleware?: MiddlewareFactoryOptions);
@@ -260,13 +261,4 @@ export declare abstract class BaseProvider implements AIProvider {
260
261
  * @returns Array of prompt chunks
261
262
  */
262
263
  static chunkPrompt(prompt: string, maxChunkSize?: number, overlap?: number): string[];
263
- /**
264
- * Create telemetry configuration for Vercel AI SDK experimental_telemetry
265
- * This enables automatic OpenTelemetry tracing when telemetry is enabled
266
- */
267
- protected getStreamTelemetryConfig(options: StreamOptions | TextGenerationOptions, operationType?: "stream" | "generate"): {
268
- isEnabled: boolean;
269
- functionId?: string;
270
- metadata?: Record<string, string | number | boolean>;
271
- } | undefined;
272
264
  }
@@ -48,7 +48,7 @@ export class BaseProvider {
48
48
  // Initialize composition modules
49
49
  this.messageBuilder = new MessageBuilder(this.providerName, this.modelName);
50
50
  this.streamHandler = new StreamHandler(this.providerName, this.modelName);
51
- this.generationHandler = new GenerationHandler(this.providerName, this.modelName, () => this.supportsTools(), (options, type) => this.getStreamTelemetryConfig(options, type), (toolCalls, toolResults, options, timestamp) => this.handleToolExecutionStorage(toolCalls, toolResults, options, timestamp));
51
+ this.generationHandler = new GenerationHandler(this.providerName, this.modelName, () => this.supportsTools(), (options, type) => this.telemetryHandler.getTelemetryConfig(options, type), (toolCalls, toolResults, options, timestamp) => this.handleToolExecutionStorage(toolCalls, toolResults, options, timestamp));
52
52
  this.telemetryHandler = new TelemetryHandler(this.providerName, this.modelName, this.neurolink);
53
53
  this.utilities = new Utilities(this.providerName, this.modelName, this.defaultTimeout, this.middlewareOptions);
54
54
  this.toolsManager = new ToolsManager(this.providerName, this.directTools, this.neurolink, {
@@ -726,41 +726,5 @@ export class BaseProvider {
726
726
  }
727
727
  return chunks;
728
728
  }
729
- /**
730
- * Create telemetry configuration for Vercel AI SDK experimental_telemetry
731
- * This enables automatic OpenTelemetry tracing when telemetry is enabled
732
- */
733
- getStreamTelemetryConfig(options, operationType = "stream") {
734
- // Check if telemetry is enabled via NeuroLink observability config
735
- if (!this.neurolink?.isTelemetryEnabled()) {
736
- return undefined;
737
- }
738
- const context = options.context;
739
- const traceName = context?.traceName;
740
- const userId = context?.userId;
741
- const functionId = traceName ? traceName : userId ? userId : "guest";
742
- const metadata = {
743
- provider: this.providerName,
744
- model: this.modelName,
745
- toolsEnabled: !options.disableTools,
746
- neurolink: true,
747
- operationType,
748
- originalProvider: this.providerName,
749
- };
750
- // Add sessionId if available
751
- if ("sessionId" in options && options.sessionId) {
752
- const sessionId = options.sessionId;
753
- if (typeof sessionId === "string" ||
754
- typeof sessionId === "number" ||
755
- typeof sessionId === "boolean") {
756
- metadata.sessionId = sessionId;
757
- }
758
- }
759
- return {
760
- isEnabled: true,
761
- functionId,
762
- metadata,
763
- };
764
- }
765
729
  }
766
730
  //# sourceMappingURL=baseProvider.js.map
@@ -50,9 +50,10 @@ export declare class TelemetryHandler {
50
50
  totalTokens?: number;
51
51
  }): Promise<number>;
52
52
  /**
53
- * Get telemetry configuration for streaming/generation
53
+ * Create telemetry configuration for Vercel AI SDK experimental_telemetry
54
+ * This enables automatic OpenTelemetry tracing when telemetry is enabled
54
55
  */
55
- getStreamTelemetryConfig(options: StreamOptions | TextGenerationOptions, operationType?: "stream" | "generate"): {
56
+ getTelemetryConfig(options: StreamOptions | TextGenerationOptions, operationType?: "stream" | "generate"): {
56
57
  isEnabled: boolean;
57
58
  functionId?: string;
58
59
  metadata?: Record<string, string | number | boolean>;