@juspay/neurolink 7.5.0 → 7.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,9 +1,22 @@
1
- # [7.5.0](https://github.com/juspay/neurolink/compare/v7.4.0...v7.5.0) (2025-08-06)
1
+ ## [7.6.1](https://github.com/juspay/neurolink/compare/v7.6.0...v7.6.1) (2025-08-09)
2
+
3
+
4
+ ### Bug Fixes
5
+
6
+ * **docs:** resolve documentation deployment and broken links ([e78d7e8](https://github.com/juspay/neurolink/commit/e78d7e8da6ff16ee266a88beec70a26b67145da2))
7
+
8
+ # [7.6.0](https://github.com/juspay/neurolink/compare/v7.5.0...v7.6.0) (2025-08-09)
2
9
 
3
10
 
4
11
  ### Features
5
12
 
6
- * **providers:** add LiteLLM provider integration with access to 100+ AI models ([8918f8e](https://github.com/juspay/neurolink/commit/8918f8efc853a2fa42b75838259b22d8022f02b3))
13
+ * **openai-compatible:** add OpenAI Compatible provider with intelligent model auto-discovery ([3041d26](https://github.com/juspay/neurolink/commit/3041d26fb33881e5962cb1f13d3d06f021f642f2))
14
+
15
+ # [7.5.0](https://github.com/juspay/neurolink/compare/v7.4.0...v7.5.0) (2025-08-06)
16
+
17
+ ### Features
18
+
19
+ - **providers:** add LiteLLM provider integration with access to 100+ AI models ([8918f8e](https://github.com/juspay/neurolink/commit/8918f8efc853a2fa42b75838259b22d8022f02b3))
7
20
 
8
21
  # [7.4.0](https://github.com/juspay/neurolink/compare/v7.3.8...v7.4.0) (2025-08-06)
9
22
 
package/README.md CHANGED
@@ -3,7 +3,7 @@
3
3
  [![NPM Version](https://img.shields.io/npm/v/@juspay/neurolink)](https://www.npmjs.com/package/@juspay/neurolink)
4
4
  [![Downloads](https://img.shields.io/npm/dm/@juspay/neurolink)](https://www.npmjs.com/package/@juspay/neurolink)
5
5
  [![GitHub Stars](https://img.shields.io/github/stars/juspay/neurolink)](https://github.com/juspay/neurolink/stargazers)
6
- [![License](https://img.shields.io/npm/l/@juspay/neurolink)](https://github.com/juspay/neurolink/blob/main/LICENSE)
6
+ [![License](https://img.shields.io/npm/l/@juspay/neurolink)](https://github.com/juspay/neurolink/blob/release/LICENSE)
7
7
  [![TypeScript](https://img.shields.io/badge/TypeScript-Ready-blue)](https://www.typescriptlang.org/)
8
8
  [![CI](https://github.com/juspay/neurolink/workflows/CI/badge.svg)](https://github.com/juspay/neurolink/actions)
9
9
 
@@ -62,7 +62,17 @@ export LITELLM_API_KEY="sk-anything"
62
62
  npx @juspay/neurolink generate "Hello, AI" --provider litellm --model "openai/gpt-4o"
63
63
  npx @juspay/neurolink generate "Hello, AI" --provider litellm --model "anthropic/claude-3-5-sonnet"
64
64
 
65
- # Option 2: Direct Provider - Quick setup with Google AI Studio (free tier)
65
+ # Option 2: OpenAI Compatible - Use any OpenAI-compatible endpoint with auto-discovery
66
+ export OPENAI_COMPATIBLE_BASE_URL="https://api.openrouter.ai/api/v1"
67
+ export OPENAI_COMPATIBLE_API_KEY="sk-or-v1-your-api-key"
68
+ # Auto-discovers available models via /v1/models endpoint
69
+ npx @juspay/neurolink generate "Hello, AI" --provider openai-compatible
70
+
71
+ # Or specify a model explicitly
72
+ export OPENAI_COMPATIBLE_MODEL="claude-3-5-sonnet"
73
+ npx @juspay/neurolink generate "Hello, AI" --provider openai-compatible
74
+
75
+ # Option 3: Direct Provider - Quick setup with Google AI Studio (free tier)
66
76
  export GOOGLE_AI_API_KEY="AIza-your-google-ai-api-key"
67
77
  npx @juspay/neurolink generate "Hello, AI" --provider google-ai
68
78
 
@@ -215,9 +225,10 @@ npx @juspay/neurolink status
215
225
  ## ✨ Key Features
216
226
 
217
227
  - 🔗 **LiteLLM Integration** - **Access 100+ AI models** from all major providers through unified interface
228
+ - 🔍 **Smart Model Auto-Discovery** - OpenAI Compatible provider automatically detects available models via `/v1/models` endpoint
218
229
  - 🏭 **Factory Pattern Architecture** - Unified provider management with BaseProvider inheritance
219
230
  - 🔧 **Tools-First Design** - All providers automatically include 6 direct tools (getCurrentTime, readFile, listDirectory, calculateMath, writeFile, searchFiles)
220
- - 🔄 **10 AI Providers** - OpenAI, Bedrock, Vertex AI, Google AI Studio, Anthropic, Azure, **LiteLLM**, Hugging Face, Ollama, Mistral AI
231
+ - 🔄 **11 AI Providers** - OpenAI, Bedrock, Vertex AI, Google AI Studio, Anthropic, Azure, **LiteLLM**, **OpenAI Compatible**, Hugging Face, Ollama, Mistral AI
221
232
  - 💰 **Cost Optimization** - Automatic selection of cheapest models and LiteLLM routing
222
233
  - ⚡ **Automatic Fallback** - Never fail when providers are down, intelligent provider switching
223
234
  - 🖥️ **CLI + SDK** - Use from command line or integrate programmatically with TypeScript support
@@ -464,18 +475,19 @@ cd neurolink-demo && node server.js
464
475
 
465
476
  ## 🏗️ Supported Providers & Models
466
477
 
467
- | Provider | Models | Auth Method | Free Tier | Tool Support | Key Benefit |
468
- | -------------------- | --------------------------------- | ------------------ | --------- | ------------ | -------------------- |
469
- | **🔗 LiteLLM** 🆕 | **100+ Models** (All Providers) | Proxy Server | Varies | ✅ Full | **Universal Access** |
470
- | **Google AI Studio** | Gemini 2.5 Flash/Pro | API Key | | ✅ Full | Free Tier Available |
471
- | **OpenAI** | GPT-4o, GPT-4o-mini | API Key | | ✅ Full | Industry Standard |
472
- | **Anthropic** | Claude 3.5 Sonnet | API Key | ❌ | ✅ Full | Advanced Reasoning |
473
- | **Amazon Bedrock** | Claude 3.5/3.7 Sonnet | AWS Credentials | ❌ | ✅ Full\* | Enterprise Scale |
474
- | **Google Vertex AI** | Gemini 2.5 Flash | Service Account | ❌ | ✅ Full | Enterprise Google |
475
- | **Azure OpenAI** | GPT-4, GPT-3.5 | API Key + Endpoint | ❌ | ✅ Full | Microsoft Ecosystem |
476
- | **Ollama** 🆕 | Llama 3.2, Gemma, Mistral (Local) | None (Local) | | ⚠️ Partial | Complete Privacy |
477
- | **Hugging Face** 🆕 | 100,000+ open source models | API Key | ✅ | ⚠️ Partial | Open Source |
478
- | **Mistral AI** 🆕 | Tiny, Small, Medium, Large | API Key | ✅ | Full | European/GDPR |
478
+ | Provider | Models | Auth Method | Free Tier | Tool Support | Key Benefit |
479
+ | --------------------------- | ---------------------------------- | ------------------ | --------- | ------------ | -------------------------------- |
480
+ | **🔗 LiteLLM** 🆕 | **100+ Models** (All Providers) | Proxy Server | Varies | ✅ Full | **Universal Access** |
481
+ | **🔗 OpenAI Compatible** 🆕 | **Any OpenAI-compatible endpoint** | API Key + Base URL | Varies | ✅ Full | **Auto-Discovery + Flexibility** |
482
+ | **Google AI Studio** | Gemini 2.5 Flash/Pro | API Key | | ✅ Full | Free Tier Available |
483
+ | **OpenAI** | GPT-4o, GPT-4o-mini | API Key | ❌ | ✅ Full | Industry Standard |
484
+ | **Anthropic** | Claude 3.5 Sonnet | API Key | ❌ | ✅ Full | Advanced Reasoning |
485
+ | **Amazon Bedrock** | Claude 3.5/3.7 Sonnet | AWS Credentials | ❌ | ✅ Full\* | Enterprise Scale |
486
+ | **Google Vertex AI** | Gemini 2.5 Flash | Service Account | ❌ | ✅ Full | Enterprise Google |
487
+ | **Azure OpenAI** | GPT-4, GPT-3.5 | API Key + Endpoint | | Full | Microsoft Ecosystem |
488
+ | **Ollama** 🆕 | Llama 3.2, Gemma, Mistral (Local) | None (Local) | ✅ | ⚠️ Partial | Complete Privacy |
489
+ | **Hugging Face** 🆕 | 100,000+ open source models | API Key | ✅ | ⚠️ Partial | Open Source |
490
+ | **Mistral AI** 🆕 | Tiny, Small, Medium, Large | API Key | ✅ | ✅ Full | European/GDPR |
479
491
 
480
492
  **Tool Support Legend:**
481
493
 
@@ -486,6 +498,31 @@ cd neurolink-demo && node server.js
486
498
 
487
499
  **✨ Auto-Selection**: NeuroLink automatically chooses the best available provider based on speed, reliability, and configuration.
488
500
 
501
+ ### 🔍 Smart Model Auto-Discovery (OpenAI Compatible)
502
+
503
+ The OpenAI Compatible provider includes intelligent model discovery that automatically detects available models from any endpoint:
504
+
505
+ ```bash
506
+ # Setup - no model specified
507
+ export OPENAI_COMPATIBLE_BASE_URL="https://api.your-endpoint.ai/v1"
508
+ export OPENAI_COMPATIBLE_API_KEY="your-api-key"
509
+
510
+ # Auto-discovers and uses first available model
511
+ npx @juspay/neurolink generate "Hello!" --provider openai-compatible
512
+ # → 🔍 Auto-discovered model: claude-sonnet-4 from 3 available models
513
+
514
+ # Or specify explicitly to skip discovery
515
+ export OPENAI_COMPATIBLE_MODEL="gemini-2.5-pro"
516
+ npx @juspay/neurolink generate "Hello!" --provider openai-compatible
517
+ ```
518
+
519
+ **How it works:**
520
+
521
+ - Queries `/v1/models` endpoint to discover available models
522
+ - Automatically selects the first available model when none specified
523
+ - Falls back gracefully if discovery fails
524
+ - Works with any OpenAI-compatible service (OpenRouter, vLLM, LiteLLM, etc.)
525
+
489
526
  ## 🎯 Production Features
490
527
 
491
528
  ### Enterprise-Grade Reliability
@@ -17,6 +17,7 @@ export class CLICommandFactory {
17
17
  choices: [
18
18
  "auto",
19
19
  "openai",
20
+ "openai-compatible",
20
21
  "bedrock",
21
22
  "vertex",
22
23
  "googleVertex",
@@ -153,6 +153,29 @@ export const PROVIDER_CONFIGS = [
153
153
  },
154
154
  ],
155
155
  },
156
+ {
157
+ id: AIProviderName.OPENAI_COMPATIBLE,
158
+ name: "OpenAI Compatible",
159
+ description: "Any OpenAI-compatible endpoint (OpenRouter, vLLM, etc.)",
160
+ envVars: [
161
+ {
162
+ key: "OPENAI_COMPATIBLE_BASE_URL",
163
+ prompt: "OpenAI-compatible endpoint URL (e.g., https://api.openrouter.ai/api/v1)",
164
+ secure: false,
165
+ },
166
+ {
167
+ key: "OPENAI_COMPATIBLE_API_KEY",
168
+ prompt: "API Key for the OpenAI-compatible service",
169
+ secure: true,
170
+ },
171
+ {
172
+ key: "OPENAI_COMPATIBLE_MODEL",
173
+ prompt: "Model name (optional - will auto-discover if not specified)",
174
+ secure: false,
175
+ optional: true,
176
+ },
177
+ ],
178
+ },
156
179
  ];
157
180
  /**
158
181
  * Run the interactive setup wizard
@@ -44,7 +44,7 @@ export class DynamicModelProvider {
44
44
  async initialize() {
45
45
  const sources = [
46
46
  process.env.MODEL_CONFIG_URL || "http://localhost:3001/api/v1/models",
47
- "https://raw.githubusercontent.com/sachinsharma92/neurolink/main/config/models.json",
47
+ `https://raw.githubusercontent.com/${process.env.MODEL_CONFIG_GITHUB_REPO || "juspay/neurolink"}/${process.env.MODEL_CONFIG_GITHUB_BRANCH || "release"}/config/models.json`,
48
48
  "./config/models.json", // Local fallback
49
49
  ];
50
50
  for (const source of sources) {
@@ -76,7 +76,7 @@ export class DynamicModelProvider {
76
76
  // Load from URL
77
77
  const response = await fetch(source, {
78
78
  headers: {
79
- "User-Agent": "NeuroLink/1.0 (+https://github.com/sachinsharma92/neurolink)",
79
+ "User-Agent": "NeuroLink/1.0 (+https://github.com/juspay/neurolink)",
80
80
  },
81
81
  });
82
82
  if (!response.ok) {
@@ -42,6 +42,7 @@ export interface TextGenerationResult {
42
42
  export declare enum AIProviderName {
43
43
  BEDROCK = "bedrock",
44
44
  OPENAI = "openai",
45
+ OPENAI_COMPATIBLE = "openai-compatible",
45
46
  VERTEX = "vertex",
46
47
  ANTHROPIC = "anthropic",
47
48
  AZURE = "azure",
@@ -5,6 +5,7 @@ export var AIProviderName;
5
5
  (function (AIProviderName) {
6
6
  AIProviderName["BEDROCK"] = "bedrock";
7
7
  AIProviderName["OPENAI"] = "openai";
8
+ AIProviderName["OPENAI_COMPATIBLE"] = "openai-compatible";
8
9
  AIProviderName["VERTEX"] = "vertex";
9
10
  AIProviderName["ANTHROPIC"] = "anthropic";
10
11
  AIProviderName["AZURE"] = "azure";
@@ -76,6 +76,12 @@ export class ProviderRegistry {
76
76
  const { LiteLLMProvider } = await import("../providers/litellm.js");
77
77
  return new LiteLLMProvider(modelName, sdk);
78
78
  }, process.env.LITELLM_MODEL || "openai/gpt-4o-mini", ["litellm"]);
79
+ // Register OpenAI Compatible provider
80
+ ProviderFactory.registerProvider(AIProviderName.OPENAI_COMPATIBLE, async (modelName, providerName, sdk) => {
81
+ const { OpenAICompatibleProvider } = await import("../providers/openaiCompatible.js");
82
+ return new OpenAICompatibleProvider(modelName, sdk);
83
+ }, process.env.OPENAI_COMPATIBLE_MODEL || undefined, // Enable auto-discovery when no model specified
84
+ ["openai-compatible", "openrouter", "vllm", "compatible"]);
79
85
  logger.debug("All providers registered successfully");
80
86
  this.registered = true;
81
87
  }
@@ -44,7 +44,7 @@ export class DynamicModelProvider {
44
44
  async initialize() {
45
45
  const sources = [
46
46
  process.env.MODEL_CONFIG_URL || "http://localhost:3001/api/v1/models",
47
- "https://raw.githubusercontent.com/sachinsharma92/neurolink/main/config/models.json",
47
+ `https://raw.githubusercontent.com/${process.env.MODEL_CONFIG_GITHUB_REPO || "juspay/neurolink"}/${process.env.MODEL_CONFIG_GITHUB_BRANCH || "release"}/config/models.json`,
48
48
  "./config/models.json", // Local fallback
49
49
  ];
50
50
  for (const source of sources) {
@@ -76,7 +76,7 @@ export class DynamicModelProvider {
76
76
  // Load from URL
77
77
  const response = await fetch(source, {
78
78
  headers: {
79
- "User-Agent": "NeuroLink/1.0 (+https://github.com/sachinsharma92/neurolink)",
79
+ "User-Agent": "NeuroLink/1.0 (+https://github.com/juspay/neurolink)",
80
80
  },
81
81
  });
82
82
  if (!response.ok) {
@@ -42,6 +42,7 @@ export interface TextGenerationResult {
42
42
  export declare enum AIProviderName {
43
43
  BEDROCK = "bedrock",
44
44
  OPENAI = "openai",
45
+ OPENAI_COMPATIBLE = "openai-compatible",
45
46
  VERTEX = "vertex",
46
47
  ANTHROPIC = "anthropic",
47
48
  AZURE = "azure",
@@ -5,6 +5,7 @@ export var AIProviderName;
5
5
  (function (AIProviderName) {
6
6
  AIProviderName["BEDROCK"] = "bedrock";
7
7
  AIProviderName["OPENAI"] = "openai";
8
+ AIProviderName["OPENAI_COMPATIBLE"] = "openai-compatible";
8
9
  AIProviderName["VERTEX"] = "vertex";
9
10
  AIProviderName["ANTHROPIC"] = "anthropic";
10
11
  AIProviderName["AZURE"] = "azure";
@@ -76,6 +76,12 @@ export class ProviderRegistry {
76
76
  const { LiteLLMProvider } = await import("../providers/litellm.js");
77
77
  return new LiteLLMProvider(modelName, sdk);
78
78
  }, process.env.LITELLM_MODEL || "openai/gpt-4o-mini", ["litellm"]);
79
+ // Register OpenAI Compatible provider
80
+ ProviderFactory.registerProvider(AIProviderName.OPENAI_COMPATIBLE, async (modelName, providerName, sdk) => {
81
+ const { OpenAICompatibleProvider } = await import("../providers/openaiCompatible.js");
82
+ return new OpenAICompatibleProvider(modelName, sdk);
83
+ }, process.env.OPENAI_COMPATIBLE_MODEL || undefined, // Enable auto-discovery when no model specified
84
+ ["openai-compatible", "openrouter", "vllm", "compatible"]);
79
85
  logger.debug("All providers registered successfully");
80
86
  this.registered = true;
81
87
  }
@@ -5,6 +5,7 @@
5
5
  export { GoogleVertexProvider as GoogleVertexAI } from "./googleVertex.js";
6
6
  export { AmazonBedrockProvider as AmazonBedrock } from "./amazonBedrock.js";
7
7
  export { OpenAIProvider as OpenAI } from "./openAI.js";
8
+ export { OpenAICompatibleProvider as OpenAICompatible } from "./openaiCompatible.js";
8
9
  export { AnthropicProvider as AnthropicProvider } from "./anthropic.js";
9
10
  export { AzureOpenAIProvider } from "./azureOpenai.js";
10
11
  export { GoogleAIStudioProvider as GoogleAIStudio } from "./googleAiStudio.js";
@@ -19,6 +20,7 @@ export declare const PROVIDERS: {
19
20
  readonly vertex: "GoogleVertexAI";
20
21
  readonly bedrock: "AmazonBedrock";
21
22
  readonly openai: "OpenAI";
23
+ readonly "openai-compatible": "OpenAICompatible";
22
24
  readonly anthropic: "AnthropicProvider";
23
25
  readonly azure: "AzureOpenAIProvider";
24
26
  readonly "google-ai": "GoogleAIStudio";
@@ -5,6 +5,7 @@
5
5
  export { GoogleVertexProvider as GoogleVertexAI } from "./googleVertex.js";
6
6
  export { AmazonBedrockProvider as AmazonBedrock } from "./amazonBedrock.js";
7
7
  export { OpenAIProvider as OpenAI } from "./openAI.js";
8
+ export { OpenAICompatibleProvider as OpenAICompatible } from "./openaiCompatible.js";
8
9
  export { AnthropicProvider as AnthropicProvider } from "./anthropic.js";
9
10
  export { AzureOpenAIProvider } from "./azureOpenai.js";
10
11
  export { GoogleAIStudioProvider as GoogleAIStudio } from "./googleAiStudio.js";
@@ -18,6 +19,7 @@ export const PROVIDERS = {
18
19
  vertex: "GoogleVertexAI",
19
20
  bedrock: "AmazonBedrock",
20
21
  openai: "OpenAI",
22
+ "openai-compatible": "OpenAICompatible",
21
23
  anthropic: "AnthropicProvider",
22
24
  azure: "AzureOpenAIProvider",
23
25
  "google-ai": "GoogleAIStudio",
@@ -0,0 +1,49 @@
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import { type Schema, type LanguageModelV1 } from "ai";
3
+ import type { AIProviderName } from "../core/types.js";
4
+ import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
+ import { BaseProvider } from "../core/baseProvider.js";
6
+ /**
7
+ * OpenAI Compatible Provider - BaseProvider Implementation
8
+ * Provides access to any OpenAI-compatible endpoint (OpenRouter, vLLM, LiteLLM, etc.)
9
+ */
10
+ export declare class OpenAICompatibleProvider extends BaseProvider {
11
+ private model?;
12
+ private config;
13
+ private discoveredModel?;
14
+ private customOpenAI;
15
+ constructor(modelName?: string, sdk?: unknown);
16
+ protected getProviderName(): AIProviderName;
17
+ protected getDefaultModel(): string;
18
+ /**
19
+ * Returns the Vercel AI SDK model instance for OpenAI Compatible endpoints
20
+ * Handles auto-discovery if no model was specified
21
+ */
22
+ protected getAISDKModel(): Promise<LanguageModelV1>;
23
+ protected handleProviderError(error: unknown): Error;
24
+ /**
25
+ * OpenAI Compatible endpoints support tools for compatible models
26
+ */
27
+ supportsTools(): boolean;
28
+ /**
29
+ * Provider-specific streaming implementation
30
+ * Note: This is only used when tools are disabled
31
+ */
32
+ protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
33
+ /**
34
+ * Get available models from OpenAI Compatible endpoint
35
+ *
36
+ * Fetches from the /v1/models endpoint to discover available models.
37
+ * This is useful for auto-discovery when no model is specified.
38
+ */
39
+ getAvailableModels(): Promise<string[]>;
40
+ /**
41
+ * Get the first available model for auto-selection
42
+ */
43
+ getFirstAvailableModel(): Promise<string>;
44
+ /**
45
+ * Fallback models when discovery fails
46
+ */
47
+ private getFallbackModels;
48
+ private validateStreamOptions;
49
+ }
@@ -0,0 +1,260 @@
1
+ import { createOpenAI } from "@ai-sdk/openai";
2
+ import { streamText } from "ai";
3
+ import { BaseProvider } from "../core/baseProvider.js";
4
+ import { logger } from "../utils/logger.js";
5
+ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
+ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
8
+ // Constants
9
+ const FALLBACK_OPENAI_COMPATIBLE_MODEL = "gpt-3.5-turbo";
10
+ // Configuration helpers
11
+ const getOpenAICompatibleConfig = () => {
12
+ const baseURL = process.env.OPENAI_COMPATIBLE_BASE_URL;
13
+ const apiKey = process.env.OPENAI_COMPATIBLE_API_KEY;
14
+ if (!baseURL) {
15
+ throw new Error("OPENAI_COMPATIBLE_BASE_URL environment variable is required. " +
16
+ "Please set it to your OpenAI-compatible endpoint (e.g., https://api.openrouter.ai/api/v1)");
17
+ }
18
+ if (!apiKey) {
19
+ throw new Error("OPENAI_COMPATIBLE_API_KEY environment variable is required. " +
20
+ "Please set it to your API key for the OpenAI-compatible service.");
21
+ }
22
+ return {
23
+ baseURL,
24
+ apiKey,
25
+ };
26
+ };
27
+ /**
28
+ * Returns the default model name for OpenAI Compatible endpoints.
29
+ *
30
+ * Returns undefined if no model is specified via OPENAI_COMPATIBLE_MODEL environment variable,
31
+ * which triggers auto-discovery from the /v1/models endpoint.
32
+ */
33
+ const getDefaultOpenAICompatibleModel = () => {
34
+ return process.env.OPENAI_COMPATIBLE_MODEL || undefined;
35
+ };
36
+ /**
37
+ * OpenAI Compatible Provider - BaseProvider Implementation
38
+ * Provides access to any OpenAI-compatible endpoint (OpenRouter, vLLM, LiteLLM, etc.)
39
+ */
40
+ export class OpenAICompatibleProvider extends BaseProvider {
41
+ model;
42
+ config;
43
+ discoveredModel;
44
+ customOpenAI;
45
+ constructor(modelName, sdk) {
46
+ super(modelName, "openai-compatible", sdk);
47
+ // Initialize OpenAI Compatible configuration
48
+ this.config = getOpenAICompatibleConfig();
49
+ // Create OpenAI SDK instance configured for custom endpoint
50
+ // This allows us to use any OpenAI-compatible API by simply changing the baseURL
51
+ this.customOpenAI = createOpenAI({
52
+ baseURL: this.config.baseURL,
53
+ apiKey: this.config.apiKey,
54
+ });
55
+ logger.debug("OpenAI Compatible Provider initialized", {
56
+ modelName: this.modelName,
57
+ provider: this.providerName,
58
+ baseURL: this.config.baseURL,
59
+ });
60
+ }
61
+ getProviderName() {
62
+ return "openai-compatible";
63
+ }
64
+ getDefaultModel() {
65
+ // Return empty string when no model is explicitly configured to enable auto-discovery
66
+ return getDefaultOpenAICompatibleModel() || "";
67
+ }
68
+ /**
69
+ * Returns the Vercel AI SDK model instance for OpenAI Compatible endpoints
70
+ * Handles auto-discovery if no model was specified
71
+ */
72
+ async getAISDKModel() {
73
+ // If model instance doesn't exist yet, create it
74
+ if (!this.model) {
75
+ let modelToUse;
76
+ // Check if a model was explicitly specified via constructor or env var
77
+ const explicitModel = this.modelName || getDefaultOpenAICompatibleModel();
78
+ // Treat empty string as no model specified (trigger auto-discovery)
79
+ if (explicitModel && explicitModel.trim() !== "") {
80
+ // Use the explicitly specified model
81
+ modelToUse = explicitModel;
82
+ logger.debug(`Using specified model: ${modelToUse}`);
83
+ }
84
+ else {
85
+ // No model specified, auto-discover from endpoint
86
+ try {
87
+ const availableModels = await this.getAvailableModels();
88
+ if (availableModels.length > 0) {
89
+ this.discoveredModel = availableModels[0];
90
+ modelToUse = this.discoveredModel;
91
+ logger.info(`🔍 Auto-discovered model: ${modelToUse} from ${availableModels.length} available models`);
92
+ }
93
+ else {
94
+ // Fall back to a common default if no models discovered
95
+ modelToUse = FALLBACK_OPENAI_COMPATIBLE_MODEL;
96
+ logger.warn(`No models discovered, using fallback: ${modelToUse}`);
97
+ }
98
+ }
99
+ catch (error) {
100
+ logger.warn("Model auto-discovery failed, using fallback:", error);
101
+ modelToUse = FALLBACK_OPENAI_COMPATIBLE_MODEL;
102
+ }
103
+ }
104
+ // Create the model instance
105
+ this.model = this.customOpenAI(modelToUse);
106
+ }
107
+ return this.model;
108
+ }
109
+ handleProviderError(error) {
110
+ if (error instanceof TimeoutError) {
111
+ return new Error(`OpenAI Compatible request timed out: ${error.message}`);
112
+ }
113
+ // Check for timeout by error name and message as fallback
114
+ const errorRecord = error;
115
+ if (errorRecord?.name === "TimeoutError" ||
116
+ (typeof errorRecord?.message === "string" &&
117
+ errorRecord.message.includes("Timeout"))) {
118
+ return new Error(`OpenAI Compatible request timed out: ${errorRecord?.message || "Unknown timeout"}`);
119
+ }
120
+ if (typeof errorRecord?.message === "string") {
121
+ if (errorRecord.message.includes("ECONNREFUSED") ||
122
+ errorRecord.message.includes("Failed to fetch")) {
123
+ return new Error(`OpenAI Compatible endpoint not available. Please check your OPENAI_COMPATIBLE_BASE_URL: ${this.config.baseURL}`);
124
+ }
125
+ if (errorRecord.message.includes("API_KEY_INVALID") ||
126
+ errorRecord.message.includes("Invalid API key") ||
127
+ errorRecord.message.includes("Unauthorized")) {
128
+ return new Error("Invalid OpenAI Compatible API key. Please check your OPENAI_COMPATIBLE_API_KEY environment variable.");
129
+ }
130
+ if (errorRecord.message.includes("rate limit")) {
131
+ return new Error("OpenAI Compatible rate limit exceeded. Please try again later.");
132
+ }
133
+ if (errorRecord.message.includes("model") &&
134
+ (errorRecord.message.includes("not found") ||
135
+ errorRecord.message.includes("does not exist"))) {
136
+ return new Error(`Model '${this.modelName}' not available on OpenAI Compatible endpoint. ` +
137
+ "Please check available models or use getAvailableModels() to see supported models.");
138
+ }
139
+ }
140
+ return new Error(`OpenAI Compatible error: ${errorRecord?.message || "Unknown error"}`);
141
+ }
142
+ /**
143
+ * OpenAI Compatible endpoints support tools for compatible models
144
+ */
145
+ supportsTools() {
146
+ return true;
147
+ }
148
+ /**
149
+ * Provider-specific streaming implementation
150
+ * Note: This is only used when tools are disabled
151
+ */
152
+ async executeStream(options, analysisSchema) {
153
+ this.validateStreamOptions(options);
154
+ const startTime = Date.now();
155
+ const timeout = this.getTimeout(options);
156
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
157
+ try {
158
+ const model = await this.getAISDKModel();
159
+ const result = await streamText({
160
+ model,
161
+ prompt: options.input.text,
162
+ system: options.systemPrompt,
163
+ temperature: options.temperature,
164
+ maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
165
+ tools: options.tools,
166
+ toolChoice: "auto",
167
+ abortSignal: timeoutController?.controller.signal,
168
+ });
169
+ timeoutController?.cleanup();
170
+ // Transform stream to match StreamResult interface
171
+ const transformedStream = async function* () {
172
+ for await (const chunk of result.textStream) {
173
+ yield { content: chunk };
174
+ }
175
+ };
176
+ // Create analytics promise that resolves after stream completion
177
+ const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
178
+ requestId: `openai-compatible-stream-${Date.now()}`,
179
+ streamingMode: true,
180
+ });
181
+ return {
182
+ stream: transformedStream(),
183
+ provider: this.providerName,
184
+ model: this.modelName,
185
+ analytics: analyticsPromise,
186
+ metadata: {
187
+ startTime,
188
+ streamId: `openai-compatible-${Date.now()}`,
189
+ },
190
+ };
191
+ }
192
+ catch (error) {
193
+ timeoutController?.cleanup();
194
+ throw this.handleProviderError(error);
195
+ }
196
+ }
197
+ /**
198
+ * Get available models from OpenAI Compatible endpoint
199
+ *
200
+ * Fetches from the /v1/models endpoint to discover available models.
201
+ * This is useful for auto-discovery when no model is specified.
202
+ */
203
+ async getAvailableModels() {
204
+ try {
205
+ const modelsUrl = new URL("/v1/models", this.config.baseURL).toString();
206
+ logger.debug(`Fetching available models from: ${modelsUrl}`);
207
+ const response = await fetch(modelsUrl, {
208
+ headers: {
209
+ Authorization: `Bearer ${this.config.apiKey}`,
210
+ "Content-Type": "application/json",
211
+ },
212
+ });
213
+ if (!response.ok) {
214
+ logger.warn(`Models endpoint returned ${response.status}: ${response.statusText}`);
215
+ return this.getFallbackModels();
216
+ }
217
+ const data = await response.json();
218
+ if (!data.data || !Array.isArray(data.data)) {
219
+ logger.warn("Invalid models response format");
220
+ return this.getFallbackModels();
221
+ }
222
+ const models = data.data.map((model) => model.id).filter(Boolean);
223
+ logger.debug(`Discovered ${models.length} models:`, models);
224
+ return models.length > 0 ? models : this.getFallbackModels();
225
+ }
226
+ catch (error) {
227
+ logger.warn(`Failed to fetch models from OpenAI Compatible endpoint:`, error);
228
+ return this.getFallbackModels();
229
+ }
230
+ }
231
+ /**
232
+ * Get the first available model for auto-selection
233
+ */
234
+ async getFirstAvailableModel() {
235
+ const models = await this.getAvailableModels();
236
+ return models[0] || FALLBACK_OPENAI_COMPATIBLE_MODEL;
237
+ }
238
+ /**
239
+ * Fallback models when discovery fails
240
+ */
241
+ getFallbackModels() {
242
+ return [
243
+ "gpt-4o",
244
+ "gpt-4o-mini",
245
+ "gpt-4-turbo",
246
+ FALLBACK_OPENAI_COMPATIBLE_MODEL,
247
+ "claude-3-5-sonnet",
248
+ "claude-3-haiku",
249
+ "gemini-pro",
250
+ ];
251
+ }
252
+ // ===================
253
+ // PRIVATE VALIDATION METHODS
254
+ // ===================
255
+ validateStreamOptions(options) {
256
+ if (!options.input?.text || options.input.text.trim().length === 0) {
257
+ throw new Error("Input text is required and cannot be empty");
258
+ }
259
+ }
260
+ }
@@ -91,6 +91,10 @@ export declare function createAzureAPIKeyConfig(): ProviderConfigOptions;
91
91
  * Creates Azure OpenAI Endpoint configuration
92
92
  */
93
93
  export declare function createAzureEndpointConfig(): ProviderConfigOptions;
94
+ /**
95
+ * Creates OpenAI Compatible provider configuration
96
+ */
97
+ export declare function createOpenAICompatibleConfig(): ProviderConfigOptions;
94
98
  /**
95
99
  * Creates Google Vertex Project ID configuration
96
100
  */
@@ -273,6 +273,26 @@ export function createAzureEndpointConfig() {
273
273
  ],
274
274
  };
275
275
  }
276
+ /**
277
+ * Creates OpenAI Compatible provider configuration
278
+ */
279
+ export function createOpenAICompatibleConfig() {
280
+ return {
281
+ providerName: "OpenAI Compatible",
282
+ envVarName: "OPENAI_COMPATIBLE_API_KEY",
283
+ setupUrl: "https://openrouter.ai/",
284
+ description: "OpenAI-compatible API credentials",
285
+ instructions: [
286
+ "1. Set OPENAI_COMPATIBLE_BASE_URL to your endpoint (e.g., https://api.openrouter.ai/api/v1)",
287
+ "2. Get API key from your OpenAI-compatible service:",
288
+ " • OpenRouter: https://openrouter.ai/keys",
289
+ " • vLLM: Use any value for local deployments",
290
+ " • LiteLLM: Check your LiteLLM server configuration",
291
+ "3. Set OPENAI_COMPATIBLE_API_KEY to your API key",
292
+ "4. Optionally set OPENAI_COMPATIBLE_MODEL (will auto-discover if not set)",
293
+ ],
294
+ };
295
+ }
276
296
  /**
277
297
  * Creates Google Vertex Project ID configuration
278
298
  */
@@ -5,6 +5,7 @@
5
5
  export { GoogleVertexProvider as GoogleVertexAI } from "./googleVertex.js";
6
6
  export { AmazonBedrockProvider as AmazonBedrock } from "./amazonBedrock.js";
7
7
  export { OpenAIProvider as OpenAI } from "./openAI.js";
8
+ export { OpenAICompatibleProvider as OpenAICompatible } from "./openaiCompatible.js";
8
9
  export { AnthropicProvider as AnthropicProvider } from "./anthropic.js";
9
10
  export { AzureOpenAIProvider } from "./azureOpenai.js";
10
11
  export { GoogleAIStudioProvider as GoogleAIStudio } from "./googleAiStudio.js";
@@ -19,6 +20,7 @@ export declare const PROVIDERS: {
19
20
  readonly vertex: "GoogleVertexAI";
20
21
  readonly bedrock: "AmazonBedrock";
21
22
  readonly openai: "OpenAI";
23
+ readonly "openai-compatible": "OpenAICompatible";
22
24
  readonly anthropic: "AnthropicProvider";
23
25
  readonly azure: "AzureOpenAIProvider";
24
26
  readonly "google-ai": "GoogleAIStudio";
@@ -5,6 +5,7 @@
5
5
  export { GoogleVertexProvider as GoogleVertexAI } from "./googleVertex.js";
6
6
  export { AmazonBedrockProvider as AmazonBedrock } from "./amazonBedrock.js";
7
7
  export { OpenAIProvider as OpenAI } from "./openAI.js";
8
+ export { OpenAICompatibleProvider as OpenAICompatible } from "./openaiCompatible.js";
8
9
  export { AnthropicProvider as AnthropicProvider } from "./anthropic.js";
9
10
  export { AzureOpenAIProvider } from "./azureOpenai.js";
10
11
  export { GoogleAIStudioProvider as GoogleAIStudio } from "./googleAiStudio.js";
@@ -18,6 +19,7 @@ export const PROVIDERS = {
18
19
  vertex: "GoogleVertexAI",
19
20
  bedrock: "AmazonBedrock",
20
21
  openai: "OpenAI",
22
+ "openai-compatible": "OpenAICompatible",
21
23
  anthropic: "AnthropicProvider",
22
24
  azure: "AzureOpenAIProvider",
23
25
  "google-ai": "GoogleAIStudio",
@@ -0,0 +1,49 @@
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import { type Schema, type LanguageModelV1 } from "ai";
3
+ import type { AIProviderName } from "../core/types.js";
4
+ import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
+ import { BaseProvider } from "../core/baseProvider.js";
6
+ /**
7
+ * OpenAI Compatible Provider - BaseProvider Implementation
8
+ * Provides access to any OpenAI-compatible endpoint (OpenRouter, vLLM, LiteLLM, etc.)
9
+ */
10
+ export declare class OpenAICompatibleProvider extends BaseProvider {
11
+ private model?;
12
+ private config;
13
+ private discoveredModel?;
14
+ private customOpenAI;
15
+ constructor(modelName?: string, sdk?: unknown);
16
+ protected getProviderName(): AIProviderName;
17
+ protected getDefaultModel(): string;
18
+ /**
19
+ * Returns the Vercel AI SDK model instance for OpenAI Compatible endpoints
20
+ * Handles auto-discovery if no model was specified
21
+ */
22
+ protected getAISDKModel(): Promise<LanguageModelV1>;
23
+ protected handleProviderError(error: unknown): Error;
24
+ /**
25
+ * OpenAI Compatible endpoints support tools for compatible models
26
+ */
27
+ supportsTools(): boolean;
28
+ /**
29
+ * Provider-specific streaming implementation
30
+ * Note: This is only used when tools are disabled
31
+ */
32
+ protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
33
+ /**
34
+ * Get available models from OpenAI Compatible endpoint
35
+ *
36
+ * Fetches from the /v1/models endpoint to discover available models.
37
+ * This is useful for auto-discovery when no model is specified.
38
+ */
39
+ getAvailableModels(): Promise<string[]>;
40
+ /**
41
+ * Get the first available model for auto-selection
42
+ */
43
+ getFirstAvailableModel(): Promise<string>;
44
+ /**
45
+ * Fallback models when discovery fails
46
+ */
47
+ private getFallbackModels;
48
+ private validateStreamOptions;
49
+ }
@@ -0,0 +1,261 @@
1
+ import { createOpenAI } from "@ai-sdk/openai";
2
+ import { streamText } from "ai";
3
+ import { BaseProvider } from "../core/baseProvider.js";
4
+ import { logger } from "../utils/logger.js";
5
+ import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
6
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
+ import { validateApiKey, getProviderModel } from "../utils/providerConfig.js";
8
+ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
+ // Constants
10
+ const FALLBACK_OPENAI_COMPATIBLE_MODEL = "gpt-3.5-turbo";
11
+ // Configuration helpers
12
+ const getOpenAICompatibleConfig = () => {
13
+ const baseURL = process.env.OPENAI_COMPATIBLE_BASE_URL;
14
+ const apiKey = process.env.OPENAI_COMPATIBLE_API_KEY;
15
+ if (!baseURL) {
16
+ throw new Error("OPENAI_COMPATIBLE_BASE_URL environment variable is required. " +
17
+ "Please set it to your OpenAI-compatible endpoint (e.g., https://api.openrouter.ai/api/v1)");
18
+ }
19
+ if (!apiKey) {
20
+ throw new Error("OPENAI_COMPATIBLE_API_KEY environment variable is required. " +
21
+ "Please set it to your API key for the OpenAI-compatible service.");
22
+ }
23
+ return {
24
+ baseURL,
25
+ apiKey,
26
+ };
27
+ };
28
+ /**
29
+ * Returns the default model name for OpenAI Compatible endpoints.
30
+ *
31
+ * Returns undefined if no model is specified via OPENAI_COMPATIBLE_MODEL environment variable,
32
+ * which triggers auto-discovery from the /v1/models endpoint.
33
+ */
34
+ const getDefaultOpenAICompatibleModel = () => {
35
+ return process.env.OPENAI_COMPATIBLE_MODEL || undefined;
36
+ };
37
+ /**
38
+ * OpenAI Compatible Provider - BaseProvider Implementation
39
+ * Provides access to any OpenAI-compatible endpoint (OpenRouter, vLLM, LiteLLM, etc.)
40
+ */
41
+ export class OpenAICompatibleProvider extends BaseProvider {
42
+ model;
43
+ config;
44
+ discoveredModel;
45
+ customOpenAI;
46
+ constructor(modelName, sdk) {
47
+ super(modelName, "openai-compatible", sdk);
48
+ // Initialize OpenAI Compatible configuration
49
+ this.config = getOpenAICompatibleConfig();
50
+ // Create OpenAI SDK instance configured for custom endpoint
51
+ // This allows us to use any OpenAI-compatible API by simply changing the baseURL
52
+ this.customOpenAI = createOpenAI({
53
+ baseURL: this.config.baseURL,
54
+ apiKey: this.config.apiKey,
55
+ });
56
+ logger.debug("OpenAI Compatible Provider initialized", {
57
+ modelName: this.modelName,
58
+ provider: this.providerName,
59
+ baseURL: this.config.baseURL,
60
+ });
61
+ }
62
+ getProviderName() {
63
+ return "openai-compatible";
64
+ }
65
+ getDefaultModel() {
66
+ // Return empty string when no model is explicitly configured to enable auto-discovery
67
+ return getDefaultOpenAICompatibleModel() || "";
68
+ }
69
+ /**
70
+ * Returns the Vercel AI SDK model instance for OpenAI Compatible endpoints
71
+ * Handles auto-discovery if no model was specified
72
+ */
73
+ async getAISDKModel() {
74
+ // If model instance doesn't exist yet, create it
75
+ if (!this.model) {
76
+ let modelToUse;
77
+ // Check if a model was explicitly specified via constructor or env var
78
+ const explicitModel = this.modelName || getDefaultOpenAICompatibleModel();
79
+ // Treat empty string as no model specified (trigger auto-discovery)
80
+ if (explicitModel && explicitModel.trim() !== "") {
81
+ // Use the explicitly specified model
82
+ modelToUse = explicitModel;
83
+ logger.debug(`Using specified model: ${modelToUse}`);
84
+ }
85
+ else {
86
+ // No model specified, auto-discover from endpoint
87
+ try {
88
+ const availableModels = await this.getAvailableModels();
89
+ if (availableModels.length > 0) {
90
+ this.discoveredModel = availableModels[0];
91
+ modelToUse = this.discoveredModel;
92
+ logger.info(`🔍 Auto-discovered model: ${modelToUse} from ${availableModels.length} available models`);
93
+ }
94
+ else {
95
+ // Fall back to a common default if no models discovered
96
+ modelToUse = FALLBACK_OPENAI_COMPATIBLE_MODEL;
97
+ logger.warn(`No models discovered, using fallback: ${modelToUse}`);
98
+ }
99
+ }
100
+ catch (error) {
101
+ logger.warn("Model auto-discovery failed, using fallback:", error);
102
+ modelToUse = FALLBACK_OPENAI_COMPATIBLE_MODEL;
103
+ }
104
+ }
105
+ // Create the model instance
106
+ this.model = this.customOpenAI(modelToUse);
107
+ }
108
+ return this.model;
109
+ }
110
+ handleProviderError(error) {
111
+ if (error instanceof TimeoutError) {
112
+ return new Error(`OpenAI Compatible request timed out: ${error.message}`);
113
+ }
114
+ // Check for timeout by error name and message as fallback
115
+ const errorRecord = error;
116
+ if (errorRecord?.name === "TimeoutError" ||
117
+ (typeof errorRecord?.message === "string" &&
118
+ errorRecord.message.includes("Timeout"))) {
119
+ return new Error(`OpenAI Compatible request timed out: ${errorRecord?.message || "Unknown timeout"}`);
120
+ }
121
+ if (typeof errorRecord?.message === "string") {
122
+ if (errorRecord.message.includes("ECONNREFUSED") ||
123
+ errorRecord.message.includes("Failed to fetch")) {
124
+ return new Error(`OpenAI Compatible endpoint not available. Please check your OPENAI_COMPATIBLE_BASE_URL: ${this.config.baseURL}`);
125
+ }
126
+ if (errorRecord.message.includes("API_KEY_INVALID") ||
127
+ errorRecord.message.includes("Invalid API key") ||
128
+ errorRecord.message.includes("Unauthorized")) {
129
+ return new Error("Invalid OpenAI Compatible API key. Please check your OPENAI_COMPATIBLE_API_KEY environment variable.");
130
+ }
131
+ if (errorRecord.message.includes("rate limit")) {
132
+ return new Error("OpenAI Compatible rate limit exceeded. Please try again later.");
133
+ }
134
+ if (errorRecord.message.includes("model") &&
135
+ (errorRecord.message.includes("not found") ||
136
+ errorRecord.message.includes("does not exist"))) {
137
+ return new Error(`Model '${this.modelName}' not available on OpenAI Compatible endpoint. ` +
138
+ "Please check available models or use getAvailableModels() to see supported models.");
139
+ }
140
+ }
141
+ return new Error(`OpenAI Compatible error: ${errorRecord?.message || "Unknown error"}`);
142
+ }
143
+ /**
144
+ * OpenAI Compatible endpoints support tools for compatible models
145
+ */
146
+ supportsTools() {
147
+ return true;
148
+ }
149
+ /**
150
+ * Provider-specific streaming implementation
151
+ * Note: This is only used when tools are disabled
152
+ */
153
+ async executeStream(options, analysisSchema) {
154
+ this.validateStreamOptions(options);
155
+ const startTime = Date.now();
156
+ const timeout = this.getTimeout(options);
157
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
158
+ try {
159
+ const model = await this.getAISDKModel();
160
+ const result = await streamText({
161
+ model,
162
+ prompt: options.input.text,
163
+ system: options.systemPrompt,
164
+ temperature: options.temperature,
165
+ maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
166
+ tools: options.tools,
167
+ toolChoice: "auto",
168
+ abortSignal: timeoutController?.controller.signal,
169
+ });
170
+ timeoutController?.cleanup();
171
+ // Transform stream to match StreamResult interface
172
+ const transformedStream = async function* () {
173
+ for await (const chunk of result.textStream) {
174
+ yield { content: chunk };
175
+ }
176
+ };
177
+ // Create analytics promise that resolves after stream completion
178
+ const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
179
+ requestId: `openai-compatible-stream-${Date.now()}`,
180
+ streamingMode: true,
181
+ });
182
+ return {
183
+ stream: transformedStream(),
184
+ provider: this.providerName,
185
+ model: this.modelName,
186
+ analytics: analyticsPromise,
187
+ metadata: {
188
+ startTime,
189
+ streamId: `openai-compatible-${Date.now()}`,
190
+ },
191
+ };
192
+ }
193
+ catch (error) {
194
+ timeoutController?.cleanup();
195
+ throw this.handleProviderError(error);
196
+ }
197
+ }
198
+ /**
199
+ * Get available models from OpenAI Compatible endpoint
200
+ *
201
+ * Fetches from the /v1/models endpoint to discover available models.
202
+ * This is useful for auto-discovery when no model is specified.
203
+ */
204
+ async getAvailableModels() {
205
+ try {
206
+ const modelsUrl = new URL("/v1/models", this.config.baseURL).toString();
207
+ logger.debug(`Fetching available models from: ${modelsUrl}`);
208
+ const response = await fetch(modelsUrl, {
209
+ headers: {
210
+ Authorization: `Bearer ${this.config.apiKey}`,
211
+ "Content-Type": "application/json",
212
+ },
213
+ });
214
+ if (!response.ok) {
215
+ logger.warn(`Models endpoint returned ${response.status}: ${response.statusText}`);
216
+ return this.getFallbackModels();
217
+ }
218
+ const data = await response.json();
219
+ if (!data.data || !Array.isArray(data.data)) {
220
+ logger.warn("Invalid models response format");
221
+ return this.getFallbackModels();
222
+ }
223
+ const models = data.data.map((model) => model.id).filter(Boolean);
224
+ logger.debug(`Discovered ${models.length} models:`, models);
225
+ return models.length > 0 ? models : this.getFallbackModels();
226
+ }
227
+ catch (error) {
228
+ logger.warn(`Failed to fetch models from OpenAI Compatible endpoint:`, error);
229
+ return this.getFallbackModels();
230
+ }
231
+ }
232
+ /**
233
+ * Get the first available model for auto-selection
234
+ */
235
+ async getFirstAvailableModel() {
236
+ const models = await this.getAvailableModels();
237
+ return models[0] || FALLBACK_OPENAI_COMPATIBLE_MODEL;
238
+ }
239
+ /**
240
+ * Fallback models when discovery fails
241
+ */
242
+ getFallbackModels() {
243
+ return [
244
+ "gpt-4o",
245
+ "gpt-4o-mini",
246
+ "gpt-4-turbo",
247
+ FALLBACK_OPENAI_COMPATIBLE_MODEL,
248
+ "claude-3-5-sonnet",
249
+ "claude-3-haiku",
250
+ "gemini-pro",
251
+ ];
252
+ }
253
+ // ===================
254
+ // PRIVATE VALIDATION METHODS
255
+ // ===================
256
+ validateStreamOptions(options) {
257
+ if (!options.input?.text || options.input.text.trim().length === 0) {
258
+ throw new Error("Input text is required and cannot be empty");
259
+ }
260
+ }
261
+ }
@@ -91,6 +91,10 @@ export declare function createAzureAPIKeyConfig(): ProviderConfigOptions;
91
91
  * Creates Azure OpenAI Endpoint configuration
92
92
  */
93
93
  export declare function createAzureEndpointConfig(): ProviderConfigOptions;
94
+ /**
95
+ * Creates OpenAI Compatible provider configuration
96
+ */
97
+ export declare function createOpenAICompatibleConfig(): ProviderConfigOptions;
94
98
  /**
95
99
  * Creates Google Vertex Project ID configuration
96
100
  */
@@ -273,6 +273,26 @@ export function createAzureEndpointConfig() {
273
273
  ],
274
274
  };
275
275
  }
276
+ /**
277
+ * Creates OpenAI Compatible provider configuration
278
+ */
279
+ export function createOpenAICompatibleConfig() {
280
+ return {
281
+ providerName: "OpenAI Compatible",
282
+ envVarName: "OPENAI_COMPATIBLE_API_KEY",
283
+ setupUrl: "https://openrouter.ai/",
284
+ description: "OpenAI-compatible API credentials",
285
+ instructions: [
286
+ "1. Set OPENAI_COMPATIBLE_BASE_URL to your endpoint (e.g., https://api.openrouter.ai/api/v1)",
287
+ "2. Get API key from your OpenAI-compatible service:",
288
+ " • OpenRouter: https://openrouter.ai/keys",
289
+ " • vLLM: Use any value for local deployments",
290
+ " • LiteLLM: Check your LiteLLM server configuration",
291
+ "3. Set OPENAI_COMPATIBLE_API_KEY to your API key",
292
+ "4. Optionally set OPENAI_COMPATIBLE_MODEL (will auto-discover if not set)",
293
+ ],
294
+ };
295
+ }
276
296
  /**
277
297
  * Creates Google Vertex Project ID configuration
278
298
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.5.0",
3
+ "version": "7.6.1",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",