@juspay/neurolink 1.2.4 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/CHANGELOG.md +170 -0
  2. package/README.md +96 -232
  3. package/dist/cli/commands/config.d.ts +403 -0
  4. package/dist/cli/commands/config.js +567 -0
  5. package/dist/cli/commands/mcp.d.ts +7 -0
  6. package/dist/cli/commands/mcp.js +434 -0
  7. package/dist/cli/index.d.ts +9 -0
  8. package/dist/cli/index.js +16 -9
  9. package/dist/core/factory.js +6 -2
  10. package/dist/core/types.d.ts +12 -2
  11. package/dist/core/types.js +11 -0
  12. package/dist/mcp/context-manager.d.ts +164 -0
  13. package/dist/mcp/context-manager.js +273 -0
  14. package/dist/mcp/factory.d.ts +144 -0
  15. package/dist/mcp/factory.js +141 -0
  16. package/dist/mcp/orchestrator.d.ts +170 -0
  17. package/dist/mcp/orchestrator.js +372 -0
  18. package/dist/mcp/registry.d.ts +188 -0
  19. package/dist/mcp/registry.js +373 -0
  20. package/dist/mcp/servers/ai-providers/ai-analysis-tools.d.ts +21 -0
  21. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +215 -0
  22. package/dist/mcp/servers/ai-providers/ai-core-server.d.ts +10 -0
  23. package/dist/mcp/servers/ai-providers/ai-core-server.js +302 -0
  24. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +101 -0
  25. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +430 -0
  26. package/dist/neurolink.d.ts +4 -4
  27. package/dist/neurolink.js +109 -56
  28. package/dist/providers/googleAIStudio.d.ts +30 -0
  29. package/dist/providers/googleAIStudio.js +215 -0
  30. package/dist/providers/googleVertexAI.js +2 -2
  31. package/dist/providers/index.d.ts +2 -0
  32. package/dist/providers/index.js +3 -1
  33. package/dist/providers/openAI.js +2 -2
  34. package/dist/utils/providerUtils.js +11 -2
  35. package/package.json +78 -6
@@ -0,0 +1,30 @@
1
+ import type { ZodType, ZodTypeDef } from 'zod';
2
+ import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
3
+ import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
4
+ export declare class GoogleAIStudio implements AIProvider {
5
+ private modelName;
6
+ /**
7
+ * Initializes a new instance of GoogleAIStudio
8
+ * @param modelName - Optional model name to override the default from config
9
+ */
10
+ constructor(modelName?: string | null);
11
+ /**
12
+ * Gets the appropriate model instance
13
+ * @private
14
+ */
15
+ private getModel;
16
+ /**
17
+ * Processes text using streaming approach with enhanced error handling callbacks
18
+ * @param prompt - The input text prompt to analyze
19
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
20
+ * @returns Promise resolving to StreamTextResult or null if operation fails
21
+ */
22
+ streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
23
+ /**
24
+ * Processes text using non-streaming approach with optional schema validation
25
+ * @param prompt - The input text prompt to analyze
26
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
27
+ * @returns Promise resolving to GenerateTextResult or null if operation fails
28
+ */
29
+ generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
30
+ }
@@ -0,0 +1,215 @@
1
+ import { createGoogleGenerativeAI } from '@ai-sdk/google';
2
+ import { streamText, generateText, Output } from 'ai';
3
+ // Default system context
4
+ const DEFAULT_SYSTEM_CONTEXT = {
5
+ systemPrompt: 'You are a helpful AI assistant.'
6
+ };
7
+ // Configuration helpers
8
+ const getGoogleAIApiKey = () => {
9
+ const apiKey = process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY;
10
+ if (!apiKey) {
11
+ throw new Error('GOOGLE_AI_API_KEY environment variable is not set');
12
+ }
13
+ return apiKey;
14
+ };
15
+ const getGoogleAIModelId = () => {
16
+ return process.env.GOOGLE_AI_MODEL || 'gemini-1.5-pro-latest';
17
+ };
18
+ const hasValidAuth = () => {
19
+ return !!(process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY);
20
+ };
21
+ // Lazy initialization cache
22
+ let _google = null;
23
+ function getGoogleInstance() {
24
+ if (!_google) {
25
+ const apiKey = getGoogleAIApiKey();
26
+ _google = createGoogleGenerativeAI({
27
+ apiKey: apiKey,
28
+ headers: {
29
+ 'X-Powered-By': 'NeuroLink'
30
+ }
31
+ });
32
+ }
33
+ return _google;
34
+ }
35
+ // Google AI Studio class with enhanced error handling
36
+ export class GoogleAIStudio {
37
+ modelName;
38
+ /**
39
+ * Initializes a new instance of GoogleAIStudio
40
+ * @param modelName - Optional model name to override the default from config
41
+ */
42
+ constructor(modelName) {
43
+ const functionTag = 'GoogleAIStudio.constructor';
44
+ this.modelName = modelName || getGoogleAIModelId();
45
+ try {
46
+ console.log(`[${functionTag}] Initialization started`, {
47
+ modelName: this.modelName,
48
+ hasApiKey: hasValidAuth()
49
+ });
50
+ console.log(`[${functionTag}] Initialization completed`, {
51
+ modelName: this.modelName,
52
+ success: true
53
+ });
54
+ }
55
+ catch (err) {
56
+ console.error(`[${functionTag}] Initialization failed`, {
57
+ message: 'Error in initializing Google AI Studio',
58
+ modelName: this.modelName,
59
+ error: err instanceof Error ? err.message : String(err),
60
+ stack: err instanceof Error ? err.stack : undefined
61
+ });
62
+ }
63
+ }
64
+ /**
65
+ * Gets the appropriate model instance
66
+ * @private
67
+ */
68
+ getModel() {
69
+ console.log('GoogleAIStudio.getModel - Google AI model selected', {
70
+ modelName: this.modelName
71
+ });
72
+ const google = getGoogleInstance();
73
+ return google(this.modelName);
74
+ }
75
+ /**
76
+ * Processes text using streaming approach with enhanced error handling callbacks
77
+ * @param prompt - The input text prompt to analyze
78
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
79
+ * @returns Promise resolving to StreamTextResult or null if operation fails
80
+ */
81
+ async streamText(optionsOrPrompt, analysisSchema) {
82
+ const functionTag = 'GoogleAIStudio.streamText';
83
+ const provider = 'google-ai';
84
+ let chunkCount = 0;
85
+ try {
86
+ // Parse parameters - support both string and options object
87
+ const options = typeof optionsOrPrompt === 'string'
88
+ ? { prompt: optionsOrPrompt }
89
+ : optionsOrPrompt;
90
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
91
+ // Use schema from options or fallback parameter
92
+ const finalSchema = schema || analysisSchema;
93
+ console.log(`[${functionTag}] Stream request started`, {
94
+ provider,
95
+ modelName: this.modelName,
96
+ promptLength: prompt.length,
97
+ temperature,
98
+ maxTokens,
99
+ hasSchema: !!finalSchema
100
+ });
101
+ const model = this.getModel();
102
+ const streamOptions = {
103
+ model: model,
104
+ prompt: prompt,
105
+ system: systemPrompt,
106
+ temperature,
107
+ maxTokens,
108
+ onError: (event) => {
109
+ const error = event.error;
110
+ const errorMessage = error instanceof Error ? error.message : String(error);
111
+ const errorStack = error instanceof Error ? error.stack : undefined;
112
+ console.error(`[${functionTag}] Stream text error`, {
113
+ provider,
114
+ modelName: this.modelName,
115
+ error: errorMessage,
116
+ stack: errorStack,
117
+ promptLength: prompt.length,
118
+ chunkCount
119
+ });
120
+ },
121
+ onFinish: (event) => {
122
+ console.log(`[${functionTag}] Stream text finished`, {
123
+ provider,
124
+ modelName: this.modelName,
125
+ finishReason: event.finishReason,
126
+ usage: event.usage,
127
+ totalChunks: chunkCount,
128
+ promptLength: prompt.length,
129
+ responseLength: event.text?.length || 0
130
+ });
131
+ },
132
+ onChunk: (event) => {
133
+ chunkCount++;
134
+ console.debug(`[${functionTag}] Stream text chunk`, {
135
+ provider,
136
+ modelName: this.modelName,
137
+ chunkNumber: chunkCount,
138
+ chunkLength: event.chunk.text?.length || 0,
139
+ chunkType: event.chunk.type
140
+ });
141
+ }
142
+ };
143
+ if (analysisSchema) {
144
+ streamOptions.experimental_output = Output.object({ schema: analysisSchema });
145
+ }
146
+ const result = streamText(streamOptions);
147
+ return result;
148
+ }
149
+ catch (err) {
150
+ console.error(`[${functionTag}] Exception`, {
151
+ provider,
152
+ modelName: this.modelName,
153
+ message: 'Error in streaming text',
154
+ err: String(err),
155
+ promptLength: typeof optionsOrPrompt === 'string' ? optionsOrPrompt.length : optionsOrPrompt.prompt.length
156
+ });
157
+ throw err; // Re-throw error to trigger fallback
158
+ }
159
+ }
160
+ /**
161
+ * Processes text using non-streaming approach with optional schema validation
162
+ * @param prompt - The input text prompt to analyze
163
+ * @param analysisSchema - Optional Zod schema or Schema object for output validation
164
+ * @returns Promise resolving to GenerateTextResult or null if operation fails
165
+ */
166
+ async generateText(optionsOrPrompt, analysisSchema) {
167
+ const functionTag = 'GoogleAIStudio.generateText';
168
+ const provider = 'google-ai';
169
+ try {
170
+ // Parse parameters - support both string and options object
171
+ const options = typeof optionsOrPrompt === 'string'
172
+ ? { prompt: optionsOrPrompt }
173
+ : optionsOrPrompt;
174
+ const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
175
+ // Use schema from options or fallback parameter
176
+ const finalSchema = schema || analysisSchema;
177
+ console.log(`[${functionTag}] Generate request started`, {
178
+ provider,
179
+ modelName: this.modelName,
180
+ promptLength: prompt.length,
181
+ temperature,
182
+ maxTokens
183
+ });
184
+ const model = this.getModel();
185
+ const generateOptions = {
186
+ model: model,
187
+ prompt: prompt,
188
+ system: systemPrompt,
189
+ temperature,
190
+ maxTokens
191
+ };
192
+ if (finalSchema) {
193
+ generateOptions.experimental_output = Output.object({ schema: finalSchema });
194
+ }
195
+ const result = await generateText(generateOptions);
196
+ console.log(`[${functionTag}] Generate text completed`, {
197
+ provider,
198
+ modelName: this.modelName,
199
+ usage: result.usage,
200
+ finishReason: result.finishReason,
201
+ responseLength: result.text?.length || 0
202
+ });
203
+ return result;
204
+ }
205
+ catch (err) {
206
+ console.error(`[${functionTag}] Exception`, {
207
+ provider,
208
+ modelName: this.modelName,
209
+ message: 'Error in generating text',
210
+ err: String(err)
211
+ });
212
+ throw err; // Re-throw error to trigger fallback
213
+ }
214
+ }
215
+ }
@@ -346,7 +346,7 @@ export class GoogleVertexAI {
346
346
  err: String(err),
347
347
  promptLength: prompt.length
348
348
  });
349
- return null;
349
+ throw err; // Re-throw error to trigger fallback
350
350
  }
351
351
  }
352
352
  /**
@@ -402,7 +402,7 @@ export class GoogleVertexAI {
402
402
  message: 'Error in generating text',
403
403
  err: String(err)
404
404
  });
405
- return null;
405
+ throw err; // Re-throw error to trigger fallback
406
406
  }
407
407
  }
408
408
  }
@@ -7,6 +7,7 @@ export { AmazonBedrock } from './amazonBedrock.js';
7
7
  export { OpenAI } from './openAI.js';
8
8
  export { AnthropicProvider } from './anthropic.js';
9
9
  export { AzureOpenAIProvider } from './azureOpenAI.js';
10
+ export { GoogleAIStudio } from './googleAIStudio.js';
10
11
  export type { AIProvider } from '../core/types.js';
11
12
  /**
12
13
  * Provider registry for dynamic provider instantiation
@@ -17,6 +18,7 @@ export declare const PROVIDERS: {
17
18
  readonly openai: "OpenAI";
18
19
  readonly anthropic: "AnthropicProvider";
19
20
  readonly azure: "AzureOpenAIProvider";
21
+ readonly 'google-ai': "GoogleAIStudio";
20
22
  };
21
23
  /**
22
24
  * Type for valid provider names
@@ -7,6 +7,7 @@ export { AmazonBedrock } from './amazonBedrock.js';
7
7
  export { OpenAI } from './openAI.js';
8
8
  export { AnthropicProvider } from './anthropic.js';
9
9
  export { AzureOpenAIProvider } from './azureOpenAI.js';
10
+ export { GoogleAIStudio } from './googleAIStudio.js';
10
11
  /**
11
12
  * Provider registry for dynamic provider instantiation
12
13
  */
@@ -15,7 +16,8 @@ export const PROVIDERS = {
15
16
  bedrock: 'AmazonBedrock',
16
17
  openai: 'OpenAI',
17
18
  anthropic: 'AnthropicProvider',
18
- azure: 'AzureOpenAIProvider'
19
+ azure: 'AzureOpenAIProvider',
20
+ 'google-ai': 'GoogleAIStudio'
19
21
  };
20
22
  /**
21
23
  * List of all available provider names
@@ -114,7 +114,7 @@ export class OpenAI {
114
114
  message: 'Error in streaming text',
115
115
  err: String(err)
116
116
  });
117
- return null;
117
+ throw err; // Re-throw error to trigger fallback
118
118
  }
119
119
  }
120
120
  async generateText(optionsOrPrompt, analysisSchema) {
@@ -162,7 +162,7 @@ export class OpenAI {
162
162
  message: 'Error in generating text',
163
163
  err: String(err)
164
164
  });
165
- return null;
165
+ throw err; // Re-throw error to trigger fallback
166
166
  }
167
167
  }
168
168
  }
@@ -12,7 +12,7 @@ export function getBestProvider(requestedProvider) {
12
12
  return requestedProvider;
13
13
  }
14
14
  // Default fallback order based on environment variables - OpenAI first since it's most reliable
15
- const providers = ['openai', 'vertex', 'bedrock'];
15
+ const providers = ['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai'];
16
16
  // Check which providers have their required environment variables
17
17
  for (const provider of providers) {
18
18
  if (isProviderConfigured(provider)) {
@@ -42,6 +42,15 @@ function isProviderConfigured(provider) {
42
42
  case 'openai':
43
43
  case 'gpt':
44
44
  return !!process.env.OPENAI_API_KEY;
45
+ case 'anthropic':
46
+ case 'claude':
47
+ return !!process.env.ANTHROPIC_API_KEY;
48
+ case 'azure':
49
+ case 'azure-openai':
50
+ return !!process.env.AZURE_OPENAI_API_KEY;
51
+ case 'google-ai':
52
+ case 'google-studio':
53
+ return !!(process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY);
45
54
  default:
46
55
  return false;
47
56
  }
@@ -51,7 +60,7 @@ function isProviderConfigured(provider) {
51
60
  * @returns Array of available provider names
52
61
  */
53
62
  export function getAvailableProviders() {
54
- return ['bedrock', 'vertex', 'openai'];
63
+ return ['bedrock', 'vertex', 'openai', 'anthropic', 'azure', 'google-ai'];
55
64
  }
56
65
  /**
57
66
  * Validate provider name
package/package.json CHANGED
@@ -1,13 +1,38 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "1.2.4",
4
- "description": "AI toolkit with multi-provider support for OpenAI, Amazon Bedrock, and Google Vertex AI",
5
- "author": "Juspay Technologies",
3
+ "version": "1.5.0",
4
+ "description": "Universal AI Development Platform with external MCP server integration, multi-provider support, and professional CLI. Connect to 65+ MCP servers for filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with OpenAI, Anthropic, Google Vertex AI, and AWS Bedrock.",
5
+ "author": {
6
+ "name": "Juspay Technologies",
7
+ "email": "support@juspay.in",
8
+ "url": "https://juspay.io"
9
+ },
6
10
  "license": "MIT",
11
+ "homepage": "https://github.com/juspay/neurolink#readme",
12
+ "repository": {
13
+ "type": "git",
14
+ "url": "git+https://github.com/juspay/neurolink.git"
15
+ },
16
+ "bugs": {
17
+ "url": "https://github.com/juspay/neurolink/issues"
18
+ },
19
+ "funding": {
20
+ "type": "individual",
21
+ "url": "https://github.com/sponsors/juspay"
22
+ },
23
+ "engines": {
24
+ "node": ">=18.0.0",
25
+ "npm": ">=8.0.0",
26
+ "pnpm": ">=8.0.0"
27
+ },
7
28
  "files": [
8
29
  "dist",
9
30
  "!dist/**/*.test.*",
10
- "!dist/**/*.spec.*"
31
+ "!dist/**/*.spec.*",
32
+ "!dist/**/*.map",
33
+ "README.md",
34
+ "CHANGELOG.md",
35
+ "LICENSE"
11
36
  ],
12
37
  "sideEffects": [
13
38
  "**/*.css"
@@ -25,6 +50,12 @@
25
50
  "svelte": "./dist/index.js",
26
51
  "import": "./dist/index.js",
27
52
  "default": "./dist/index.js"
53
+ },
54
+ "./package.json": "./package.json",
55
+ "./cli": {
56
+ "types": "./dist/cli/index.d.ts",
57
+ "import": "./dist/cli/index.js",
58
+ "default": "./dist/cli/index.js"
28
59
  }
29
60
  },
30
61
  "peerDependencies": {
@@ -34,7 +65,19 @@
34
65
  "ai": "^4.0.0",
35
66
  "zod": "^3.22.0"
36
67
  },
68
+ "peerDependenciesMeta": {
69
+ "@ai-sdk/amazon-bedrock": {
70
+ "optional": true
71
+ },
72
+ "@ai-sdk/google-vertex": {
73
+ "optional": true
74
+ },
75
+ "@ai-sdk/openai": {
76
+ "optional": true
77
+ }
78
+ },
37
79
  "dependencies": {
80
+ "@ai-sdk/google": "^1.2.19",
38
81
  "chalk": "^5.3.0",
39
82
  "dotenv": "^16.5.0",
40
83
  "inquirer": "^9.2.15",
@@ -55,6 +98,7 @@
55
98
  "eslint": "^9.0.0",
56
99
  "prettier": "^3.0.0",
57
100
  "publint": "^0.3.2",
101
+ "puppeteer": "^24.10.0",
58
102
  "svelte": "^5.0.0",
59
103
  "svelte-check": "^4.0.0",
60
104
  "tslib": "^2.4.1",
@@ -65,6 +109,11 @@
65
109
  "keywords": [
66
110
  "ai",
67
111
  "llm",
112
+ "mcp",
113
+ "model-context-protocol",
114
+ "lighthouse",
115
+ "tool-orchestration",
116
+ "ai-platform",
68
117
  "openai",
69
118
  "anthropic",
70
119
  "google",
@@ -74,12 +123,35 @@
74
123
  "tools",
75
124
  "neurolink",
76
125
  "juspay",
77
- "svelte"
126
+ "svelte",
127
+ "chatgpt",
128
+ "gpt-4",
129
+ "claude",
130
+ "gemini",
131
+ "ai-sdk",
132
+ "typescript",
133
+ "cli-tool",
134
+ "developer-tools",
135
+ "automation",
136
+ "machine-learning",
137
+ "artificial-intelligence",
138
+ "multi-provider",
139
+ "ai-agents",
140
+ "prompt-engineering",
141
+ "ai-workflow",
142
+ "universal-ai",
143
+ "ai-development",
144
+ "llm-integration"
145
+ ],
146
+ "os": [
147
+ "darwin",
148
+ "linux",
149
+ "win32"
78
150
  ],
79
151
  "scripts": {
80
152
  "dev": "vite dev",
81
153
  "build": "vite build && pnpm run prepack",
82
- "build:cli": "echo 'Building CLI...' && tsc src/cli/index.ts --outDir dist/cli --target es2022 --module esnext --moduleResolution bundler --allowImportingTsExtensions false --resolveJsonModule --esModuleInterop --allowSyntheticDefaultImports --strict --rootDir src/cli",
154
+ "build:cli": "echo 'Building CLI...' && tsc --project tsconfig.cli.json",
83
155
  "cli": "node dist/cli/index.js",
84
156
  "preview": "vite preview",
85
157
  "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",