@juspay/neurolink 7.4.0 → 7.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -143,6 +143,10 @@ export function hasProviderEnvVars(provider) {
143
143
  case "mistral-ai":
144
144
  case "mistralai":
145
145
  return !!process.env.MISTRAL_API_KEY;
146
+ case "litellm":
147
+ // LiteLLM requires a proxy server, which can be checked for availability
148
+ // Default base URL is assumed, or can be configured via environment
149
+ return true; // LiteLLM proxy availability will be checked during usage
146
150
  default:
147
151
  return false;
148
152
  }
package/dist/neurolink.js CHANGED
@@ -701,6 +701,7 @@ export class NeuroLink {
701
701
  "huggingface",
702
702
  "ollama",
703
703
  "mistral",
704
+ "litellm",
704
705
  ];
705
706
  // 🚀 PERFORMANCE FIX: Test providers with controlled concurrency
706
707
  // This reduces total time from 16s (sequential) to ~3s (parallel) while preventing resource exhaustion
@@ -5,6 +5,7 @@
5
5
  export { GoogleVertexProvider as GoogleVertexAI } from "./googleVertex.js";
6
6
  export { AmazonBedrockProvider as AmazonBedrock } from "./amazonBedrock.js";
7
7
  export { OpenAIProvider as OpenAI } from "./openAI.js";
8
+ export { OpenAICompatibleProvider as OpenAICompatible } from "./openaiCompatible.js";
8
9
  export { AnthropicProvider as AnthropicProvider } from "./anthropic.js";
9
10
  export { AzureOpenAIProvider } from "./azureOpenai.js";
10
11
  export { GoogleAIStudioProvider as GoogleAIStudio } from "./googleAiStudio.js";
@@ -19,6 +20,7 @@ export declare const PROVIDERS: {
19
20
  readonly vertex: "GoogleVertexAI";
20
21
  readonly bedrock: "AmazonBedrock";
21
22
  readonly openai: "OpenAI";
23
+ readonly "openai-compatible": "OpenAICompatible";
22
24
  readonly anthropic: "AnthropicProvider";
23
25
  readonly azure: "AzureOpenAIProvider";
24
26
  readonly "google-ai": "GoogleAIStudio";
@@ -5,6 +5,7 @@
5
5
  export { GoogleVertexProvider as GoogleVertexAI } from "./googleVertex.js";
6
6
  export { AmazonBedrockProvider as AmazonBedrock } from "./amazonBedrock.js";
7
7
  export { OpenAIProvider as OpenAI } from "./openAI.js";
8
+ export { OpenAICompatibleProvider as OpenAICompatible } from "./openaiCompatible.js";
8
9
  export { AnthropicProvider as AnthropicProvider } from "./anthropic.js";
9
10
  export { AzureOpenAIProvider } from "./azureOpenai.js";
10
11
  export { GoogleAIStudioProvider as GoogleAIStudio } from "./googleAiStudio.js";
@@ -18,6 +19,7 @@ export const PROVIDERS = {
18
19
  vertex: "GoogleVertexAI",
19
20
  bedrock: "AmazonBedrock",
20
21
  openai: "OpenAI",
22
+ "openai-compatible": "OpenAICompatible",
21
23
  anthropic: "AnthropicProvider",
22
24
  azure: "AzureOpenAIProvider",
23
25
  "google-ai": "GoogleAIStudio",
@@ -0,0 +1,43 @@
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import { type Schema, type LanguageModelV1 } from "ai";
3
+ import type { AIProviderName } from "../core/types.js";
4
+ import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
+ import { BaseProvider } from "../core/baseProvider.js";
6
+ /**
7
+ * LiteLLM Provider - BaseProvider Implementation
8
+ * Provides access to 100+ models via LiteLLM proxy server
9
+ */
10
+ export declare class LiteLLMProvider extends BaseProvider {
11
+ private model;
12
+ constructor(modelName?: string, sdk?: unknown);
13
+ protected getProviderName(): AIProviderName;
14
+ protected getDefaultModel(): string;
15
+ /**
16
+ * Returns the Vercel AI SDK model instance for LiteLLM
17
+ */
18
+ protected getAISDKModel(): LanguageModelV1;
19
+ protected handleProviderError(error: unknown): Error;
20
+ /**
21
+ * LiteLLM supports tools for compatible models
22
+ */
23
+ supportsTools(): boolean;
24
+ /**
25
+ * Provider-specific streaming implementation
26
+ * Note: This is only used when tools are disabled
27
+ */
28
+ protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
29
+ /**
30
+ * Get available models from LiteLLM proxy server
31
+ *
32
+ * TODO: Implement dynamic fetching from LiteLLM's /v1/models endpoint.
33
+ * Currently returns a hardcoded list of commonly available models.
34
+ *
35
+ * Implementation would involve:
36
+ * 1. Fetch from `${baseURL}/v1/models`
37
+ * 2. Parse response to extract model IDs
38
+ * 3. Handle network errors gracefully
39
+ * 4. Cache results to avoid repeated API calls
40
+ */
41
+ getAvailableModels(): Promise<string[]>;
42
+ private validateStreamOptions;
43
+ }
@@ -0,0 +1,188 @@
1
+ import { openai, createOpenAI } from "@ai-sdk/openai";
2
+ import { streamText, Output } from "ai";
3
+ import { BaseProvider } from "../core/baseProvider.js";
4
+ import { logger } from "../utils/logger.js";
5
+ import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
6
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
+ import { validateApiKey, getProviderModel } from "../utils/providerConfig.js";
8
+ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
+ // Configuration helpers
10
+ const getLiteLLMConfig = () => {
11
+ return {
12
+ baseURL: process.env.LITELLM_BASE_URL || "http://localhost:4000",
13
+ apiKey: process.env.LITELLM_API_KEY || "sk-anything",
14
+ };
15
+ };
16
+ /**
17
+ * Returns the default model name for LiteLLM.
18
+ *
19
+ * LiteLLM uses a 'provider/model' format for model names.
20
+ * For example:
21
+ * - 'openai/gpt-4o-mini'
22
+ * - 'openai/gpt-3.5-turbo'
23
+ * - 'anthropic/claude-3-sonnet-20240229'
24
+ * - 'google/gemini-pro'
25
+ *
26
+ * You can override the default by setting the LITELLM_MODEL environment variable.
27
+ */
28
+ const getDefaultLiteLLMModel = () => {
29
+ return getProviderModel("LITELLM_MODEL", "openai/gpt-4o-mini");
30
+ };
31
+ /**
32
+ * LiteLLM Provider - BaseProvider Implementation
33
+ * Provides access to 100+ models via LiteLLM proxy server
34
+ */
35
+ export class LiteLLMProvider extends BaseProvider {
36
+ model;
37
+ constructor(modelName, sdk) {
38
+ super(modelName, "litellm", sdk);
39
+ // Initialize LiteLLM using OpenAI SDK with explicit configuration
40
+ const config = getLiteLLMConfig();
41
+ // Create OpenAI SDK instance configured for LiteLLM proxy
42
+ // LiteLLM acts as a proxy server that implements the OpenAI-compatible API.
43
+ // To communicate with LiteLLM instead of the default OpenAI endpoint, we use createOpenAI
44
+ // with a custom baseURL and apiKey. This ensures all requests are routed through the LiteLLM
45
+ // proxy, allowing access to multiple models and custom authentication.
46
+ const customOpenAI = createOpenAI({
47
+ baseURL: config.baseURL,
48
+ apiKey: config.apiKey,
49
+ });
50
+ this.model = customOpenAI(this.modelName || getDefaultLiteLLMModel());
51
+ logger.debug("LiteLLM Provider initialized", {
52
+ modelName: this.modelName,
53
+ provider: this.providerName,
54
+ baseURL: config.baseURL,
55
+ });
56
+ }
57
+ getProviderName() {
58
+ return "litellm";
59
+ }
60
+ getDefaultModel() {
61
+ return getDefaultLiteLLMModel();
62
+ }
63
+ /**
64
+ * Returns the Vercel AI SDK model instance for LiteLLM
65
+ */
66
+ getAISDKModel() {
67
+ return this.model;
68
+ }
69
+ handleProviderError(error) {
70
+ if (error instanceof TimeoutError) {
71
+ return new Error(`LiteLLM request timed out: ${error.message}`);
72
+ }
73
+ // Check for timeout by error name and message as fallback
74
+ const errorRecord = error;
75
+ if (errorRecord?.name === "TimeoutError" ||
76
+ (typeof errorRecord?.message === "string" &&
77
+ errorRecord.message.includes("Timeout"))) {
78
+ return new Error(`LiteLLM request timed out: ${errorRecord?.message || "Unknown timeout"}`);
79
+ }
80
+ if (typeof errorRecord?.message === "string") {
81
+ if (errorRecord.message.includes("ECONNREFUSED") ||
82
+ errorRecord.message.includes("Failed to fetch")) {
83
+ return new Error("LiteLLM proxy server not available. Please start the LiteLLM proxy server at " +
84
+ `${process.env.LITELLM_BASE_URL || "http://localhost:4000"}`);
85
+ }
86
+ if (errorRecord.message.includes("API_KEY_INVALID") ||
87
+ errorRecord.message.includes("Invalid API key")) {
88
+ return new Error("Invalid LiteLLM configuration. Please check your LITELLM_API_KEY environment variable.");
89
+ }
90
+ if (errorRecord.message.includes("rate limit")) {
91
+ return new Error("LiteLLM rate limit exceeded. Please try again later.");
92
+ }
93
+ if (errorRecord.message.includes("model") &&
94
+ errorRecord.message.includes("not found")) {
95
+ return new Error(`Model '${this.modelName}' not available in LiteLLM proxy. ` +
96
+ "Please check your LiteLLM configuration and ensure the model is configured.");
97
+ }
98
+ }
99
+ return new Error(`LiteLLM error: ${errorRecord?.message || "Unknown error"}`);
100
+ }
101
+ /**
102
+ * LiteLLM supports tools for compatible models
103
+ */
104
+ supportsTools() {
105
+ return true;
106
+ }
107
+ /**
108
+ * Provider-specific streaming implementation
109
+ * Note: This is only used when tools are disabled
110
+ */
111
+ async executeStream(options, analysisSchema) {
112
+ this.validateStreamOptions(options);
113
+ const startTime = Date.now();
114
+ const timeout = this.getTimeout(options);
115
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
116
+ try {
117
+ const result = await streamText({
118
+ model: this.model,
119
+ prompt: options.input.text,
120
+ system: options.systemPrompt,
121
+ temperature: options.temperature,
122
+ maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
123
+ tools: options.tools,
124
+ toolChoice: "auto",
125
+ abortSignal: timeoutController?.controller.signal,
126
+ });
127
+ timeoutController?.cleanup();
128
+ // Transform stream to match StreamResult interface
129
+ const transformedStream = async function* () {
130
+ for await (const chunk of result.textStream) {
131
+ yield { content: chunk };
132
+ }
133
+ };
134
+ // Create analytics promise that resolves after stream completion
135
+ const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
136
+ requestId: `litellm-stream-${Date.now()}`,
137
+ streamingMode: true,
138
+ });
139
+ return {
140
+ stream: transformedStream(),
141
+ provider: this.providerName,
142
+ model: this.modelName,
143
+ analytics: analyticsPromise,
144
+ metadata: {
145
+ startTime,
146
+ streamId: `litellm-${Date.now()}`,
147
+ },
148
+ };
149
+ }
150
+ catch (error) {
151
+ timeoutController?.cleanup();
152
+ throw this.handleProviderError(error);
153
+ }
154
+ }
155
+ /**
156
+ * Get available models from LiteLLM proxy server
157
+ *
158
+ * TODO: Implement dynamic fetching from LiteLLM's /v1/models endpoint.
159
+ * Currently returns a hardcoded list of commonly available models.
160
+ *
161
+ * Implementation would involve:
162
+ * 1. Fetch from `${baseURL}/v1/models`
163
+ * 2. Parse response to extract model IDs
164
+ * 3. Handle network errors gracefully
165
+ * 4. Cache results to avoid repeated API calls
166
+ */
167
+ async getAvailableModels() {
168
+ // Hardcoded list of commonly available models
169
+ // TODO: Replace with dynamic fetch from LiteLLM proxy /v1/models endpoint
170
+ return [
171
+ "openai/gpt-4o",
172
+ "openai/gpt-4o-mini",
173
+ "anthropic/claude-3-5-sonnet",
174
+ "anthropic/claude-3-haiku",
175
+ "google/gemini-2.0-flash",
176
+ "mistral/mistral-large",
177
+ "mistral/mistral-medium",
178
+ ];
179
+ }
180
+ // ===================
181
+ // PRIVATE VALIDATION METHODS
182
+ // ===================
183
+ validateStreamOptions(options) {
184
+ if (!options.input?.text || options.input.text.trim().length === 0) {
185
+ throw new Error("Input text is required and cannot be empty");
186
+ }
187
+ }
188
+ }
@@ -0,0 +1,49 @@
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import { type Schema, type LanguageModelV1 } from "ai";
3
+ import type { AIProviderName } from "../core/types.js";
4
+ import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
+ import { BaseProvider } from "../core/baseProvider.js";
6
+ /**
7
+ * OpenAI Compatible Provider - BaseProvider Implementation
8
+ * Provides access to any OpenAI-compatible endpoint (OpenRouter, vLLM, LiteLLM, etc.)
9
+ */
10
+ export declare class OpenAICompatibleProvider extends BaseProvider {
11
+ private model?;
12
+ private config;
13
+ private discoveredModel?;
14
+ private customOpenAI;
15
+ constructor(modelName?: string, sdk?: unknown);
16
+ protected getProviderName(): AIProviderName;
17
+ protected getDefaultModel(): string;
18
+ /**
19
+ * Returns the Vercel AI SDK model instance for OpenAI Compatible endpoints
20
+ * Handles auto-discovery if no model was specified
21
+ */
22
+ protected getAISDKModel(): Promise<LanguageModelV1>;
23
+ protected handleProviderError(error: unknown): Error;
24
+ /**
25
+ * OpenAI Compatible endpoints support tools for compatible models
26
+ */
27
+ supportsTools(): boolean;
28
+ /**
29
+ * Provider-specific streaming implementation
30
+ * Note: This is only used when tools are disabled
31
+ */
32
+ protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
33
+ /**
34
+ * Get available models from OpenAI Compatible endpoint
35
+ *
36
+ * Fetches from the /v1/models endpoint to discover available models.
37
+ * This is useful for auto-discovery when no model is specified.
38
+ */
39
+ getAvailableModels(): Promise<string[]>;
40
+ /**
41
+ * Get the first available model for auto-selection
42
+ */
43
+ getFirstAvailableModel(): Promise<string>;
44
+ /**
45
+ * Fallback models when discovery fails
46
+ */
47
+ private getFallbackModels;
48
+ private validateStreamOptions;
49
+ }
@@ -0,0 +1,261 @@
1
+ import { createOpenAI } from "@ai-sdk/openai";
2
+ import { streamText } from "ai";
3
+ import { BaseProvider } from "../core/baseProvider.js";
4
+ import { logger } from "../utils/logger.js";
5
+ import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
6
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
+ import { validateApiKey, getProviderModel } from "../utils/providerConfig.js";
8
+ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
+ // Constants
10
+ const FALLBACK_OPENAI_COMPATIBLE_MODEL = "gpt-3.5-turbo";
11
+ // Configuration helpers
12
+ const getOpenAICompatibleConfig = () => {
13
+ const baseURL = process.env.OPENAI_COMPATIBLE_BASE_URL;
14
+ const apiKey = process.env.OPENAI_COMPATIBLE_API_KEY;
15
+ if (!baseURL) {
16
+ throw new Error("OPENAI_COMPATIBLE_BASE_URL environment variable is required. " +
17
+ "Please set it to your OpenAI-compatible endpoint (e.g., https://api.openrouter.ai/api/v1)");
18
+ }
19
+ if (!apiKey) {
20
+ throw new Error("OPENAI_COMPATIBLE_API_KEY environment variable is required. " +
21
+ "Please set it to your API key for the OpenAI-compatible service.");
22
+ }
23
+ return {
24
+ baseURL,
25
+ apiKey,
26
+ };
27
+ };
28
+ /**
29
+ * Returns the default model name for OpenAI Compatible endpoints.
30
+ *
31
+ * Returns undefined if no model is specified via OPENAI_COMPATIBLE_MODEL environment variable,
32
+ * which triggers auto-discovery from the /v1/models endpoint.
33
+ */
34
+ const getDefaultOpenAICompatibleModel = () => {
35
+ return process.env.OPENAI_COMPATIBLE_MODEL || undefined;
36
+ };
37
+ /**
38
+ * OpenAI Compatible Provider - BaseProvider Implementation
39
+ * Provides access to any OpenAI-compatible endpoint (OpenRouter, vLLM, LiteLLM, etc.)
40
+ */
41
+ export class OpenAICompatibleProvider extends BaseProvider {
42
+ model;
43
+ config;
44
+ discoveredModel;
45
+ customOpenAI;
46
+ constructor(modelName, sdk) {
47
+ super(modelName, "openai-compatible", sdk);
48
+ // Initialize OpenAI Compatible configuration
49
+ this.config = getOpenAICompatibleConfig();
50
+ // Create OpenAI SDK instance configured for custom endpoint
51
+ // This allows us to use any OpenAI-compatible API by simply changing the baseURL
52
+ this.customOpenAI = createOpenAI({
53
+ baseURL: this.config.baseURL,
54
+ apiKey: this.config.apiKey,
55
+ });
56
+ logger.debug("OpenAI Compatible Provider initialized", {
57
+ modelName: this.modelName,
58
+ provider: this.providerName,
59
+ baseURL: this.config.baseURL,
60
+ });
61
+ }
62
+ getProviderName() {
63
+ return "openai-compatible";
64
+ }
65
+ getDefaultModel() {
66
+ // Return empty string when no model is explicitly configured to enable auto-discovery
67
+ return getDefaultOpenAICompatibleModel() || "";
68
+ }
69
+ /**
70
+ * Returns the Vercel AI SDK model instance for OpenAI Compatible endpoints
71
+ * Handles auto-discovery if no model was specified
72
+ */
73
+ async getAISDKModel() {
74
+ // If model instance doesn't exist yet, create it
75
+ if (!this.model) {
76
+ let modelToUse;
77
+ // Check if a model was explicitly specified via constructor or env var
78
+ const explicitModel = this.modelName || getDefaultOpenAICompatibleModel();
79
+ // Treat empty string as no model specified (trigger auto-discovery)
80
+ if (explicitModel && explicitModel.trim() !== "") {
81
+ // Use the explicitly specified model
82
+ modelToUse = explicitModel;
83
+ logger.debug(`Using specified model: ${modelToUse}`);
84
+ }
85
+ else {
86
+ // No model specified, auto-discover from endpoint
87
+ try {
88
+ const availableModels = await this.getAvailableModels();
89
+ if (availableModels.length > 0) {
90
+ this.discoveredModel = availableModels[0];
91
+ modelToUse = this.discoveredModel;
92
+ logger.info(`🔍 Auto-discovered model: ${modelToUse} from ${availableModels.length} available models`);
93
+ }
94
+ else {
95
+ // Fall back to a common default if no models discovered
96
+ modelToUse = FALLBACK_OPENAI_COMPATIBLE_MODEL;
97
+ logger.warn(`No models discovered, using fallback: ${modelToUse}`);
98
+ }
99
+ }
100
+ catch (error) {
101
+ logger.warn("Model auto-discovery failed, using fallback:", error);
102
+ modelToUse = FALLBACK_OPENAI_COMPATIBLE_MODEL;
103
+ }
104
+ }
105
+ // Create the model instance
106
+ this.model = this.customOpenAI(modelToUse);
107
+ }
108
+ return this.model;
109
+ }
110
+ handleProviderError(error) {
111
+ if (error instanceof TimeoutError) {
112
+ return new Error(`OpenAI Compatible request timed out: ${error.message}`);
113
+ }
114
+ // Check for timeout by error name and message as fallback
115
+ const errorRecord = error;
116
+ if (errorRecord?.name === "TimeoutError" ||
117
+ (typeof errorRecord?.message === "string" &&
118
+ errorRecord.message.includes("Timeout"))) {
119
+ return new Error(`OpenAI Compatible request timed out: ${errorRecord?.message || "Unknown timeout"}`);
120
+ }
121
+ if (typeof errorRecord?.message === "string") {
122
+ if (errorRecord.message.includes("ECONNREFUSED") ||
123
+ errorRecord.message.includes("Failed to fetch")) {
124
+ return new Error(`OpenAI Compatible endpoint not available. Please check your OPENAI_COMPATIBLE_BASE_URL: ${this.config.baseURL}`);
125
+ }
126
+ if (errorRecord.message.includes("API_KEY_INVALID") ||
127
+ errorRecord.message.includes("Invalid API key") ||
128
+ errorRecord.message.includes("Unauthorized")) {
129
+ return new Error("Invalid OpenAI Compatible API key. Please check your OPENAI_COMPATIBLE_API_KEY environment variable.");
130
+ }
131
+ if (errorRecord.message.includes("rate limit")) {
132
+ return new Error("OpenAI Compatible rate limit exceeded. Please try again later.");
133
+ }
134
+ if (errorRecord.message.includes("model") &&
135
+ (errorRecord.message.includes("not found") ||
136
+ errorRecord.message.includes("does not exist"))) {
137
+ return new Error(`Model '${this.modelName}' not available on OpenAI Compatible endpoint. ` +
138
+ "Please check available models or use getAvailableModels() to see supported models.");
139
+ }
140
+ }
141
+ return new Error(`OpenAI Compatible error: ${errorRecord?.message || "Unknown error"}`);
142
+ }
143
+ /**
144
+ * OpenAI Compatible endpoints support tools for compatible models
145
+ */
146
+ supportsTools() {
147
+ return true;
148
+ }
149
+ /**
150
+ * Provider-specific streaming implementation
151
+ * Note: This is only used when tools are disabled
152
+ */
153
+ async executeStream(options, analysisSchema) {
154
+ this.validateStreamOptions(options);
155
+ const startTime = Date.now();
156
+ const timeout = this.getTimeout(options);
157
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
158
+ try {
159
+ const model = await this.getAISDKModel();
160
+ const result = await streamText({
161
+ model,
162
+ prompt: options.input.text,
163
+ system: options.systemPrompt,
164
+ temperature: options.temperature,
165
+ maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
166
+ tools: options.tools,
167
+ toolChoice: "auto",
168
+ abortSignal: timeoutController?.controller.signal,
169
+ });
170
+ timeoutController?.cleanup();
171
+ // Transform stream to match StreamResult interface
172
+ const transformedStream = async function* () {
173
+ for await (const chunk of result.textStream) {
174
+ yield { content: chunk };
175
+ }
176
+ };
177
+ // Create analytics promise that resolves after stream completion
178
+ const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
179
+ requestId: `openai-compatible-stream-${Date.now()}`,
180
+ streamingMode: true,
181
+ });
182
+ return {
183
+ stream: transformedStream(),
184
+ provider: this.providerName,
185
+ model: this.modelName,
186
+ analytics: analyticsPromise,
187
+ metadata: {
188
+ startTime,
189
+ streamId: `openai-compatible-${Date.now()}`,
190
+ },
191
+ };
192
+ }
193
+ catch (error) {
194
+ timeoutController?.cleanup();
195
+ throw this.handleProviderError(error);
196
+ }
197
+ }
198
+ /**
199
+ * Get available models from OpenAI Compatible endpoint
200
+ *
201
+ * Fetches from the /v1/models endpoint to discover available models.
202
+ * This is useful for auto-discovery when no model is specified.
203
+ */
204
+ async getAvailableModels() {
205
+ try {
206
+ const modelsUrl = new URL("/v1/models", this.config.baseURL).toString();
207
+ logger.debug(`Fetching available models from: ${modelsUrl}`);
208
+ const response = await fetch(modelsUrl, {
209
+ headers: {
210
+ Authorization: `Bearer ${this.config.apiKey}`,
211
+ "Content-Type": "application/json",
212
+ },
213
+ });
214
+ if (!response.ok) {
215
+ logger.warn(`Models endpoint returned ${response.status}: ${response.statusText}`);
216
+ return this.getFallbackModels();
217
+ }
218
+ const data = await response.json();
219
+ if (!data.data || !Array.isArray(data.data)) {
220
+ logger.warn("Invalid models response format");
221
+ return this.getFallbackModels();
222
+ }
223
+ const models = data.data.map((model) => model.id).filter(Boolean);
224
+ logger.debug(`Discovered ${models.length} models:`, models);
225
+ return models.length > 0 ? models : this.getFallbackModels();
226
+ }
227
+ catch (error) {
228
+ logger.warn(`Failed to fetch models from OpenAI Compatible endpoint:`, error);
229
+ return this.getFallbackModels();
230
+ }
231
+ }
232
+ /**
233
+ * Get the first available model for auto-selection
234
+ */
235
+ async getFirstAvailableModel() {
236
+ const models = await this.getAvailableModels();
237
+ return models[0] || FALLBACK_OPENAI_COMPATIBLE_MODEL;
238
+ }
239
+ /**
240
+ * Fallback models when discovery fails
241
+ */
242
+ getFallbackModels() {
243
+ return [
244
+ "gpt-4o",
245
+ "gpt-4o-mini",
246
+ "gpt-4-turbo",
247
+ FALLBACK_OPENAI_COMPATIBLE_MODEL,
248
+ "claude-3-5-sonnet",
249
+ "claude-3-haiku",
250
+ "gemini-pro",
251
+ ];
252
+ }
253
+ // ===================
254
+ // PRIVATE VALIDATION METHODS
255
+ // ===================
256
+ validateStreamOptions(options) {
257
+ if (!options.input?.text || options.input.text.trim().length === 0) {
258
+ throw new Error("Input text is required and cannot be empty");
259
+ }
260
+ }
261
+ }
@@ -91,6 +91,10 @@ export declare function createAzureAPIKeyConfig(): ProviderConfigOptions;
91
91
  * Creates Azure OpenAI Endpoint configuration
92
92
  */
93
93
  export declare function createAzureEndpointConfig(): ProviderConfigOptions;
94
+ /**
95
+ * Creates OpenAI Compatible provider configuration
96
+ */
97
+ export declare function createOpenAICompatibleConfig(): ProviderConfigOptions;
94
98
  /**
95
99
  * Creates Google Vertex Project ID configuration
96
100
  */
@@ -273,6 +273,26 @@ export function createAzureEndpointConfig() {
273
273
  ],
274
274
  };
275
275
  }
276
+ /**
277
+ * Creates OpenAI Compatible provider configuration
278
+ */
279
+ export function createOpenAICompatibleConfig() {
280
+ return {
281
+ providerName: "OpenAI Compatible",
282
+ envVarName: "OPENAI_COMPATIBLE_API_KEY",
283
+ setupUrl: "https://openrouter.ai/",
284
+ description: "OpenAI-compatible API credentials",
285
+ instructions: [
286
+ "1. Set OPENAI_COMPATIBLE_BASE_URL to your endpoint (e.g., https://api.openrouter.ai/api/v1)",
287
+ "2. Get API key from your OpenAI-compatible service:",
288
+ " • OpenRouter: https://openrouter.ai/keys",
289
+ " • vLLM: Use any value for local deployments",
290
+ " • LiteLLM: Check your LiteLLM server configuration",
291
+ "3. Set OPENAI_COMPATIBLE_API_KEY to your API key",
292
+ "4. Optionally set OPENAI_COMPATIBLE_MODEL (will auto-discover if not set)",
293
+ ],
294
+ };
295
+ }
276
296
  /**
277
297
  * Creates Google Vertex Project ID configuration
278
298
  */
@@ -143,6 +143,10 @@ export function hasProviderEnvVars(provider) {
143
143
  case "mistral-ai":
144
144
  case "mistralai":
145
145
  return !!process.env.MISTRAL_API_KEY;
146
+ case "litellm":
147
+ // LiteLLM requires a proxy server, which can be checked for availability
148
+ // Default base URL is assumed, or can be configured via environment
149
+ return true; // LiteLLM proxy availability will be checked during usage
146
150
  default:
147
151
  return false;
148
152
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "7.4.0",
3
+ "version": "7.6.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",
@@ -252,5 +252,9 @@
252
252
  "darwin",
253
253
  "linux",
254
254
  "win32"
255
- ]
255
+ ],
256
+ "prettier": {
257
+ "tabWidth": 2,
258
+ "useTabs": false
259
+ }
256
260
  }