@juspay/neurolink 4.1.1 → 4.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -2
- package/README.md +1 -12
- package/dist/cli/commands/mcp.d.ts +11 -0
- package/dist/cli/commands/mcp.js +332 -223
- package/dist/cli/index.js +69 -8
- package/dist/core/factory.js +2 -2
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/lib/core/factory.js +2 -2
- package/dist/lib/index.d.ts +1 -1
- package/dist/lib/index.js +1 -1
- package/dist/lib/mcp/context-manager.d.ts +6 -0
- package/dist/lib/mcp/context-manager.js +8 -0
- package/dist/lib/mcp/contracts/mcpContract.d.ts +1 -0
- package/dist/lib/mcp/external-client.js +6 -2
- package/dist/lib/mcp/initialize.d.ts +2 -1
- package/dist/lib/mcp/initialize.js +8 -7
- package/dist/lib/mcp/orchestrator.js +9 -0
- package/dist/lib/mcp/registry.d.ts +1 -1
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +1 -1
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +3 -3
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +1 -1
- package/dist/lib/mcp/session-manager.js +1 -1
- package/dist/lib/mcp/session-persistence.js +1 -1
- package/dist/lib/mcp/tool-registry.d.ts +31 -11
- package/dist/lib/mcp/tool-registry.js +226 -38
- package/dist/lib/mcp/unified-mcp.d.ts +12 -2
- package/dist/lib/mcp/unified-registry.d.ts +21 -7
- package/dist/lib/mcp/unified-registry.js +179 -17
- package/dist/lib/neurolink.js +17 -25
- package/dist/lib/providers/googleVertexAI.js +19 -1
- package/dist/lib/providers/openAI.js +18 -1
- package/dist/lib/utils/provider-setup-messages.d.ts +8 -0
- package/dist/lib/utils/provider-setup-messages.js +120 -0
- package/dist/lib/utils/provider-validation.d.ts +35 -0
- package/dist/lib/utils/provider-validation.js +625 -0
- package/dist/lib/utils/providerUtils-fixed.js +20 -1
- package/dist/lib/utils/providerUtils.d.ts +2 -2
- package/dist/lib/utils/providerUtils.js +38 -7
- package/dist/lib/utils/timeout-manager.d.ts +75 -0
- package/dist/lib/utils/timeout-manager.js +244 -0
- package/dist/mcp/context-manager.d.ts +6 -0
- package/dist/mcp/context-manager.js +8 -0
- package/dist/mcp/contracts/mcpContract.d.ts +1 -0
- package/dist/mcp/external-client.js +6 -2
- package/dist/mcp/initialize.d.ts +2 -1
- package/dist/mcp/initialize.js +8 -7
- package/dist/mcp/orchestrator.js +9 -0
- package/dist/mcp/registry.d.ts +1 -1
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +1 -1
- package/dist/mcp/servers/ai-providers/ai-core-server.js +3 -3
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +1 -1
- package/dist/mcp/session-manager.js +1 -1
- package/dist/mcp/session-persistence.js +1 -1
- package/dist/mcp/tool-registry.d.ts +31 -11
- package/dist/mcp/tool-registry.js +226 -38
- package/dist/mcp/unified-mcp.d.ts +12 -2
- package/dist/mcp/unified-registry.d.ts +21 -7
- package/dist/mcp/unified-registry.js +179 -17
- package/dist/neurolink.js +17 -25
- package/dist/providers/googleVertexAI.js +19 -1
- package/dist/providers/openAI.js +18 -1
- package/dist/utils/provider-setup-messages.d.ts +8 -0
- package/dist/utils/provider-setup-messages.js +120 -0
- package/dist/utils/provider-validation.d.ts +35 -0
- package/dist/utils/provider-validation.js +625 -0
- package/dist/utils/providerUtils-fixed.js +20 -1
- package/dist/utils/providerUtils.d.ts +2 -2
- package/dist/utils/providerUtils.js +38 -7
- package/dist/utils/timeout-manager.d.ts +75 -0
- package/dist/utils/timeout-manager.js +244 -0
- package/package.json +1 -1
package/dist/lib/neurolink.js
CHANGED
|
@@ -73,6 +73,12 @@ export class NeuroLink {
|
|
|
73
73
|
* Tools are ENABLED BY DEFAULT for natural AI behavior
|
|
74
74
|
*/
|
|
75
75
|
async generateText(options) {
|
|
76
|
+
// 🔧 FIX: Add input validation
|
|
77
|
+
if (!options ||
|
|
78
|
+
typeof options.prompt !== "string" ||
|
|
79
|
+
options.prompt.trim() === "") {
|
|
80
|
+
throw new Error("options.prompt is required and must be a non-empty string");
|
|
81
|
+
}
|
|
76
82
|
// Tools are DEFAULT behavior unless explicitly disabled
|
|
77
83
|
if (options.disableTools === true) {
|
|
78
84
|
return this.generateTextRegular(options);
|
|
@@ -101,7 +107,7 @@ export class NeuroLink {
|
|
|
101
107
|
try {
|
|
102
108
|
mcpLogger.debug(`[${functionTag}] Starting MCP-enabled generation`, {
|
|
103
109
|
provider: providerName,
|
|
104
|
-
prompt: options.prompt
|
|
110
|
+
prompt: (options.prompt?.substring(0, 100) || "No prompt") + "...",
|
|
105
111
|
contextId: context.sessionId,
|
|
106
112
|
});
|
|
107
113
|
// Get available tools from tool registry (simplified approach)
|
|
@@ -205,22 +211,15 @@ export class NeuroLink {
|
|
|
205
211
|
"ollama",
|
|
206
212
|
];
|
|
207
213
|
const requestedProvider = options.provider === "auto" ? undefined : options.provider;
|
|
208
|
-
//
|
|
209
|
-
const localProviders = ["ollama"];
|
|
210
|
-
// If specific provider requested, check if we should allow fallback
|
|
214
|
+
// If specific provider requested, only use that provider (no fallback)
|
|
211
215
|
const tryProviders = requestedProvider
|
|
212
|
-
?
|
|
213
|
-
? [requestedProvider] // No fallback for local providers
|
|
214
|
-
: [
|
|
215
|
-
requestedProvider,
|
|
216
|
-
...providerPriority.filter((p) => p !== requestedProvider),
|
|
217
|
-
]
|
|
216
|
+
? [requestedProvider] // Only use the requested provider, no fallback
|
|
218
217
|
: providerPriority;
|
|
219
218
|
logger.debug(`[${functionTag}] Starting text generation`, {
|
|
220
219
|
requestedProvider: requestedProvider || "auto",
|
|
221
220
|
tryProviders,
|
|
222
|
-
allowFallback: !requestedProvider
|
|
223
|
-
promptLength: options.prompt
|
|
221
|
+
allowFallback: !requestedProvider,
|
|
222
|
+
promptLength: options.prompt?.length || 0,
|
|
224
223
|
});
|
|
225
224
|
let lastError = null;
|
|
226
225
|
for (const providerName of tryProviders) {
|
|
@@ -228,7 +227,7 @@ export class NeuroLink {
|
|
|
228
227
|
logger.debug(`[${functionTag}] Attempting provider`, {
|
|
229
228
|
provider: providerName,
|
|
230
229
|
});
|
|
231
|
-
const provider = await AIProviderFactory.createProvider(providerName, options.model);
|
|
230
|
+
const provider = await AIProviderFactory.createProvider(providerName, options.model, false);
|
|
232
231
|
const result = await provider.generateText({
|
|
233
232
|
prompt: options.prompt,
|
|
234
233
|
model: options.model,
|
|
@@ -343,21 +342,14 @@ Note: Tool integration is currently in development. Please provide helpful respo
|
|
|
343
342
|
"ollama",
|
|
344
343
|
];
|
|
345
344
|
const requestedProvider = options.provider === "auto" ? undefined : options.provider;
|
|
346
|
-
//
|
|
347
|
-
const localProviders = ["ollama"];
|
|
348
|
-
// If specific provider requested, check if we should allow fallback
|
|
345
|
+
// If specific provider requested, only use that provider (no fallback)
|
|
349
346
|
const tryProviders = requestedProvider
|
|
350
|
-
?
|
|
351
|
-
? [requestedProvider] // No fallback for local providers
|
|
352
|
-
: [
|
|
353
|
-
requestedProvider,
|
|
354
|
-
...providerPriority.filter((p) => p !== requestedProvider),
|
|
355
|
-
]
|
|
347
|
+
? [requestedProvider] // Only use the requested provider, no fallback
|
|
356
348
|
: providerPriority;
|
|
357
349
|
logger.debug(`[${functionTag}] Starting stream generation`, {
|
|
358
350
|
requestedProvider: requestedProvider || "auto",
|
|
359
351
|
tryProviders,
|
|
360
|
-
allowFallback: !requestedProvider
|
|
352
|
+
allowFallback: !requestedProvider,
|
|
361
353
|
promptLength: options.prompt.length,
|
|
362
354
|
});
|
|
363
355
|
let lastError = null;
|
|
@@ -366,7 +358,7 @@ Note: Tool integration is currently in development. Please provide helpful respo
|
|
|
366
358
|
logger.debug(`[${functionTag}] Attempting provider`, {
|
|
367
359
|
provider: providerName,
|
|
368
360
|
});
|
|
369
|
-
const provider = await AIProviderFactory.createProvider(providerName, options.model);
|
|
361
|
+
const provider = await AIProviderFactory.createProvider(providerName, options.model, false);
|
|
370
362
|
const result = await provider.streamText({
|
|
371
363
|
prompt: options.prompt,
|
|
372
364
|
model: options.model,
|
|
@@ -430,7 +422,7 @@ Note: Tool integration is currently in development. Please provide helpful respo
|
|
|
430
422
|
*/
|
|
431
423
|
async testProvider(providerName, testPrompt = "test") {
|
|
432
424
|
try {
|
|
433
|
-
const provider = await AIProviderFactory.createProvider(providerName);
|
|
425
|
+
const provider = await AIProviderFactory.createProvider(providerName, null, false); // Disable MCP for simple testing
|
|
434
426
|
await provider.generateText({
|
|
435
427
|
prompt: testPrompt,
|
|
436
428
|
enableAnalytics: false,
|
|
@@ -35,7 +35,25 @@ const DEFAULT_SYSTEM_CONTEXT = {
|
|
|
35
35
|
const getGCPVertexBreezeProjectId = () => {
|
|
36
36
|
const projectId = process.env.GOOGLE_VERTEX_PROJECT;
|
|
37
37
|
if (!projectId) {
|
|
38
|
-
|
|
38
|
+
// 🔧 FIX: Enhanced error message with setup instructions
|
|
39
|
+
throw new Error(`❌ VERTEX Provider Configuration Error
|
|
40
|
+
|
|
41
|
+
Missing required environment variables: GOOGLE_VERTEX_PROJECT
|
|
42
|
+
|
|
43
|
+
🔧 Step 1: Get Credentials
|
|
44
|
+
Set up Google Cloud project and download service account JSON
|
|
45
|
+
|
|
46
|
+
💡 Step 2: Add to your .env file (or export in CLI):
|
|
47
|
+
GOOGLE_APPLICATION_CREDENTIALS="/path/to/service-account.json"
|
|
48
|
+
GOOGLE_VERTEX_PROJECT="your-gcp-project-id"
|
|
49
|
+
GOOGLE_VERTEX_LOCATION="us-central1"
|
|
50
|
+
# Optional:
|
|
51
|
+
VERTEX_MODEL="gemini-2.5-pro"
|
|
52
|
+
|
|
53
|
+
🚀 Step 3: Test the setup:
|
|
54
|
+
npx neurolink generate "Hello" --provider vertex
|
|
55
|
+
|
|
56
|
+
📖 Full setup guide: https://docs.neurolink.ai/providers/vertex`);
|
|
39
57
|
}
|
|
40
58
|
return projectId;
|
|
41
59
|
};
|
|
@@ -12,7 +12,24 @@ const DEFAULT_SYSTEM_CONTEXT = {
|
|
|
12
12
|
const getOpenAIApiKey = () => {
|
|
13
13
|
const apiKey = process.env.OPENAI_API_KEY;
|
|
14
14
|
if (!apiKey) {
|
|
15
|
-
|
|
15
|
+
// 🔧 FIX: Enhanced error message with setup instructions
|
|
16
|
+
throw new Error(`❌ OPENAI Provider Configuration Error
|
|
17
|
+
|
|
18
|
+
Missing required environment variables: OPENAI_API_KEY
|
|
19
|
+
|
|
20
|
+
🔧 Step 1: Get Credentials
|
|
21
|
+
Get your API key from https://platform.openai.com/api-keys
|
|
22
|
+
|
|
23
|
+
💡 Step 2: Add to your .env file (or export in CLI):
|
|
24
|
+
OPENAI_API_KEY="sk-proj-your-openai-api-key"
|
|
25
|
+
# Optional:
|
|
26
|
+
OPENAI_MODEL="gpt-4o"
|
|
27
|
+
OPENAI_BASE_URL="https://api.openai.com"
|
|
28
|
+
|
|
29
|
+
🚀 Step 3: Test the setup:
|
|
30
|
+
npx neurolink generate "Hello" --provider openai
|
|
31
|
+
|
|
32
|
+
📖 Full setup guide: https://docs.neurolink.ai/providers/openai`);
|
|
16
33
|
}
|
|
17
34
|
return apiKey;
|
|
18
35
|
};
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Enhanced Provider Setup Messages
|
|
3
|
+
* Provides detailed setup instructions for AI providers
|
|
4
|
+
*/
|
|
5
|
+
/**
|
|
6
|
+
* Generate enhanced error message with setup instructions
|
|
7
|
+
*/
|
|
8
|
+
export declare function getProviderSetupMessage(provider: string, missingVars: string[]): string;
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Enhanced Provider Setup Messages
|
|
3
|
+
* Provides detailed setup instructions for AI providers
|
|
4
|
+
*/
|
|
5
|
+
/**
|
|
6
|
+
* Generate enhanced error message with setup instructions
|
|
7
|
+
*/
|
|
8
|
+
export function getProviderSetupMessage(provider, missingVars) {
|
|
9
|
+
const providerSetup = {
|
|
10
|
+
openai: {
|
|
11
|
+
guide: "Get your API key from https://platform.openai.com/api-keys",
|
|
12
|
+
envVars: [
|
|
13
|
+
'OPENAI_API_KEY="sk-proj-your-openai-api-key"',
|
|
14
|
+
"# Optional:",
|
|
15
|
+
'OPENAI_MODEL="gpt-4o"',
|
|
16
|
+
'OPENAI_BASE_URL="https://api.openai.com"',
|
|
17
|
+
],
|
|
18
|
+
},
|
|
19
|
+
anthropic: {
|
|
20
|
+
guide: "Get your API key from https://console.anthropic.com/",
|
|
21
|
+
envVars: [
|
|
22
|
+
'ANTHROPIC_API_KEY="sk-ant-api03-your-anthropic-key"',
|
|
23
|
+
"# Optional:",
|
|
24
|
+
'ANTHROPIC_MODEL="claude-3-5-sonnet-20241022"',
|
|
25
|
+
],
|
|
26
|
+
},
|
|
27
|
+
"google-ai": {
|
|
28
|
+
guide: "Get your API key from https://aistudio.google.com/app/apikey",
|
|
29
|
+
envVars: [
|
|
30
|
+
'GOOGLE_AI_API_KEY="AIza-your-google-ai-api-key"',
|
|
31
|
+
"# Optional:",
|
|
32
|
+
'GOOGLE_AI_MODEL="gemini-2.5-pro"',
|
|
33
|
+
],
|
|
34
|
+
},
|
|
35
|
+
vertex: {
|
|
36
|
+
guide: "Set up Google Cloud project and download service account JSON",
|
|
37
|
+
envVars: [
|
|
38
|
+
'GOOGLE_APPLICATION_CREDENTIALS="/path/to/service-account.json"',
|
|
39
|
+
'GOOGLE_VERTEX_PROJECT="your-gcp-project-id"',
|
|
40
|
+
'GOOGLE_VERTEX_LOCATION="us-central1"',
|
|
41
|
+
"# Optional:",
|
|
42
|
+
'VERTEX_MODEL="gemini-2.5-pro"',
|
|
43
|
+
],
|
|
44
|
+
},
|
|
45
|
+
bedrock: {
|
|
46
|
+
guide: "Set up AWS credentials and request model access in Bedrock console",
|
|
47
|
+
envVars: [
|
|
48
|
+
'AWS_ACCESS_KEY_ID="AKIA..."',
|
|
49
|
+
'AWS_SECRET_ACCESS_KEY="your-aws-secret-key"',
|
|
50
|
+
'AWS_REGION="us-east-1"',
|
|
51
|
+
"# Use full inference profile ARN for Anthropic models:",
|
|
52
|
+
'BEDROCK_MODEL="arn:aws:bedrock:us-east-1:123456789:inference-profile/us.anthropic.claude-3-5-sonnet-20241022-v2:0"',
|
|
53
|
+
"# Or simple name for Amazon models:",
|
|
54
|
+
'# BEDROCK_MODEL="amazon.titan-text-express-v1"',
|
|
55
|
+
],
|
|
56
|
+
},
|
|
57
|
+
azure: {
|
|
58
|
+
guide: "Set up Azure OpenAI resource and create deployment",
|
|
59
|
+
envVars: [
|
|
60
|
+
'AZURE_OPENAI_API_KEY="your-azure-openai-key"',
|
|
61
|
+
'AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/"',
|
|
62
|
+
'AZURE_OPENAI_DEPLOYMENT_ID="your-deployment-name"',
|
|
63
|
+
"# Optional:",
|
|
64
|
+
'AZURE_MODEL="gpt-4o"',
|
|
65
|
+
'AZURE_API_VERSION="2024-02-15-preview"',
|
|
66
|
+
],
|
|
67
|
+
},
|
|
68
|
+
huggingface: {
|
|
69
|
+
guide: "Get your API token from https://huggingface.co/settings/tokens",
|
|
70
|
+
envVars: [
|
|
71
|
+
'HUGGINGFACE_API_KEY="hf_your_huggingface_token"',
|
|
72
|
+
"# Optional:",
|
|
73
|
+
'HUGGINGFACE_MODEL="microsoft/DialoGPT-medium"',
|
|
74
|
+
'HUGGINGFACE_ENDPOINT="https://api-inference.huggingface.co"',
|
|
75
|
+
],
|
|
76
|
+
},
|
|
77
|
+
mistral: {
|
|
78
|
+
guide: "Get your API key from https://mistral.ai/platform",
|
|
79
|
+
envVars: [
|
|
80
|
+
'MISTRAL_API_KEY="your_mistral_api_key"',
|
|
81
|
+
"# Optional:",
|
|
82
|
+
'MISTRAL_MODEL="mistral-small"',
|
|
83
|
+
'MISTRAL_ENDPOINT="https://api.mistral.ai"',
|
|
84
|
+
],
|
|
85
|
+
},
|
|
86
|
+
ollama: {
|
|
87
|
+
guide: "Install Ollama and pull models locally",
|
|
88
|
+
envVars: [
|
|
89
|
+
"# Ollama runs locally - no API key needed",
|
|
90
|
+
'OLLAMA_BASE_URL="http://localhost:11434"',
|
|
91
|
+
'OLLAMA_MODEL="llama2"',
|
|
92
|
+
"",
|
|
93
|
+
"# First install and start Ollama:",
|
|
94
|
+
"# macOS: brew install ollama",
|
|
95
|
+
"# Linux: curl -fsSL https://ollama.ai/install.sh | sh",
|
|
96
|
+
"# Then pull a model: ollama pull llama2",
|
|
97
|
+
],
|
|
98
|
+
},
|
|
99
|
+
};
|
|
100
|
+
const setup = providerSetup[provider];
|
|
101
|
+
if (!setup) {
|
|
102
|
+
return `❌ ${provider.toUpperCase()} Provider Configuration Error\nMissing variables: ${missingVars.join(", ")}\nCheck provider documentation for setup instructions.`;
|
|
103
|
+
}
|
|
104
|
+
return `
|
|
105
|
+
❌ ${provider.toUpperCase()} Provider Configuration Error
|
|
106
|
+
|
|
107
|
+
Missing required environment variables: ${missingVars.join(", ")}
|
|
108
|
+
|
|
109
|
+
🔧 Step 1: Get Credentials
|
|
110
|
+
${setup.guide}
|
|
111
|
+
|
|
112
|
+
💡 Step 2: Add to your .env file (or export in CLI):
|
|
113
|
+
${setup.envVars.join("\n")}
|
|
114
|
+
|
|
115
|
+
🚀 Step 3: Test the setup:
|
|
116
|
+
npx neurolink generate "Hello" --provider ${provider}
|
|
117
|
+
|
|
118
|
+
📖 Full setup guide: https://docs.neurolink.ai/providers/${provider}
|
|
119
|
+
`;
|
|
120
|
+
}
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Enhanced Provider Validation Utilities
|
|
3
|
+
*
|
|
4
|
+
* Fixes false positives in provider status checking by implementing:
|
|
5
|
+
* - API key format validation
|
|
6
|
+
* - Lightweight authentication checks
|
|
7
|
+
* - Proper error classification
|
|
8
|
+
* - Rate-limit friendly validation
|
|
9
|
+
*/
|
|
10
|
+
export interface ProviderValidationResult {
|
|
11
|
+
configured: boolean;
|
|
12
|
+
formatValid: boolean;
|
|
13
|
+
authenticated: boolean;
|
|
14
|
+
available: boolean;
|
|
15
|
+
error?: string;
|
|
16
|
+
errorType?: "config" | "format" | "auth" | "network" | "quota" | "unknown";
|
|
17
|
+
responseTime?: number;
|
|
18
|
+
details?: Record<string, any>;
|
|
19
|
+
}
|
|
20
|
+
/**
|
|
21
|
+
* Validate API key format for a specific provider
|
|
22
|
+
*/
|
|
23
|
+
export declare function validateApiKeyFormat(provider: string, apiKey: string): boolean;
|
|
24
|
+
/**
|
|
25
|
+
* Comprehensive provider validation that prevents false positives
|
|
26
|
+
*/
|
|
27
|
+
export declare function validateProvider(provider: string): Promise<ProviderValidationResult>;
|
|
28
|
+
/**
|
|
29
|
+
* Batch validate multiple providers efficiently
|
|
30
|
+
*/
|
|
31
|
+
export declare function validateProviders(providers: string[]): Promise<Record<string, ProviderValidationResult>>;
|
|
32
|
+
/**
|
|
33
|
+
* Check if provider validation should be cached (to avoid rate limits)
|
|
34
|
+
*/
|
|
35
|
+
export declare function shouldCacheValidation(result: ProviderValidationResult): boolean;
|