@juspay/neurolink 1.3.0 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +62 -0
- package/README.md +96 -285
- package/dist/cli/commands/config.d.ts +33 -3
- package/dist/cli/commands/config.js +36 -1
- package/dist/cli/index.js +11 -9
- package/dist/core/factory.js +6 -2
- package/dist/core/types.d.ts +12 -2
- package/dist/core/types.js +11 -0
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.d.ts +21 -0
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +215 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.d.ts +1 -1
- package/dist/mcp/servers/ai-providers/ai-core-server.js +28 -6
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +101 -0
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +430 -0
- package/dist/neurolink.d.ts +4 -4
- package/dist/neurolink.js +109 -56
- package/dist/providers/googleAIStudio.d.ts +30 -0
- package/dist/providers/googleAIStudio.js +215 -0
- package/dist/providers/googleVertexAI.js +2 -2
- package/dist/providers/index.d.ts +2 -0
- package/dist/providers/index.js +3 -1
- package/dist/providers/openAI.js +2 -2
- package/dist/utils/providerUtils.js +5 -2
- package/package.json +3 -1
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import type { ZodType, ZodTypeDef } from 'zod';
|
|
2
|
+
import { type StreamTextResult, type ToolSet, type Schema, type GenerateTextResult } from 'ai';
|
|
3
|
+
import type { AIProvider, TextGenerationOptions, StreamTextOptions } from '../core/types.js';
|
|
4
|
+
export declare class GoogleAIStudio implements AIProvider {
|
|
5
|
+
private modelName;
|
|
6
|
+
/**
|
|
7
|
+
* Initializes a new instance of GoogleAIStudio
|
|
8
|
+
* @param modelName - Optional model name to override the default from config
|
|
9
|
+
*/
|
|
10
|
+
constructor(modelName?: string | null);
|
|
11
|
+
/**
|
|
12
|
+
* Gets the appropriate model instance
|
|
13
|
+
* @private
|
|
14
|
+
*/
|
|
15
|
+
private getModel;
|
|
16
|
+
/**
|
|
17
|
+
* Processes text using streaming approach with enhanced error handling callbacks
|
|
18
|
+
* @param prompt - The input text prompt to analyze
|
|
19
|
+
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
20
|
+
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
21
|
+
*/
|
|
22
|
+
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
23
|
+
/**
|
|
24
|
+
* Processes text using non-streaming approach with optional schema validation
|
|
25
|
+
* @param prompt - The input text prompt to analyze
|
|
26
|
+
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
27
|
+
* @returns Promise resolving to GenerateTextResult or null if operation fails
|
|
28
|
+
*/
|
|
29
|
+
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
30
|
+
}
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
|
2
|
+
import { streamText, generateText, Output } from 'ai';
|
|
3
|
+
// Default system context
|
|
4
|
+
const DEFAULT_SYSTEM_CONTEXT = {
|
|
5
|
+
systemPrompt: 'You are a helpful AI assistant.'
|
|
6
|
+
};
|
|
7
|
+
// Configuration helpers
|
|
8
|
+
const getGoogleAIApiKey = () => {
|
|
9
|
+
const apiKey = process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY;
|
|
10
|
+
if (!apiKey) {
|
|
11
|
+
throw new Error('GOOGLE_AI_API_KEY environment variable is not set');
|
|
12
|
+
}
|
|
13
|
+
return apiKey;
|
|
14
|
+
};
|
|
15
|
+
const getGoogleAIModelId = () => {
|
|
16
|
+
return process.env.GOOGLE_AI_MODEL || 'gemini-1.5-pro-latest';
|
|
17
|
+
};
|
|
18
|
+
const hasValidAuth = () => {
|
|
19
|
+
return !!(process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY);
|
|
20
|
+
};
|
|
21
|
+
// Lazy initialization cache
|
|
22
|
+
let _google = null;
|
|
23
|
+
function getGoogleInstance() {
|
|
24
|
+
if (!_google) {
|
|
25
|
+
const apiKey = getGoogleAIApiKey();
|
|
26
|
+
_google = createGoogleGenerativeAI({
|
|
27
|
+
apiKey: apiKey,
|
|
28
|
+
headers: {
|
|
29
|
+
'X-Powered-By': 'NeuroLink'
|
|
30
|
+
}
|
|
31
|
+
});
|
|
32
|
+
}
|
|
33
|
+
return _google;
|
|
34
|
+
}
|
|
35
|
+
// Google AI Studio class with enhanced error handling
|
|
36
|
+
export class GoogleAIStudio {
|
|
37
|
+
modelName;
|
|
38
|
+
/**
|
|
39
|
+
* Initializes a new instance of GoogleAIStudio
|
|
40
|
+
* @param modelName - Optional model name to override the default from config
|
|
41
|
+
*/
|
|
42
|
+
constructor(modelName) {
|
|
43
|
+
const functionTag = 'GoogleAIStudio.constructor';
|
|
44
|
+
this.modelName = modelName || getGoogleAIModelId();
|
|
45
|
+
try {
|
|
46
|
+
console.log(`[${functionTag}] Initialization started`, {
|
|
47
|
+
modelName: this.modelName,
|
|
48
|
+
hasApiKey: hasValidAuth()
|
|
49
|
+
});
|
|
50
|
+
console.log(`[${functionTag}] Initialization completed`, {
|
|
51
|
+
modelName: this.modelName,
|
|
52
|
+
success: true
|
|
53
|
+
});
|
|
54
|
+
}
|
|
55
|
+
catch (err) {
|
|
56
|
+
console.error(`[${functionTag}] Initialization failed`, {
|
|
57
|
+
message: 'Error in initializing Google AI Studio',
|
|
58
|
+
modelName: this.modelName,
|
|
59
|
+
error: err instanceof Error ? err.message : String(err),
|
|
60
|
+
stack: err instanceof Error ? err.stack : undefined
|
|
61
|
+
});
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
/**
|
|
65
|
+
* Gets the appropriate model instance
|
|
66
|
+
* @private
|
|
67
|
+
*/
|
|
68
|
+
getModel() {
|
|
69
|
+
console.log('GoogleAIStudio.getModel - Google AI model selected', {
|
|
70
|
+
modelName: this.modelName
|
|
71
|
+
});
|
|
72
|
+
const google = getGoogleInstance();
|
|
73
|
+
return google(this.modelName);
|
|
74
|
+
}
|
|
75
|
+
/**
|
|
76
|
+
* Processes text using streaming approach with enhanced error handling callbacks
|
|
77
|
+
* @param prompt - The input text prompt to analyze
|
|
78
|
+
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
79
|
+
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
80
|
+
*/
|
|
81
|
+
async streamText(optionsOrPrompt, analysisSchema) {
|
|
82
|
+
const functionTag = 'GoogleAIStudio.streamText';
|
|
83
|
+
const provider = 'google-ai';
|
|
84
|
+
let chunkCount = 0;
|
|
85
|
+
try {
|
|
86
|
+
// Parse parameters - support both string and options object
|
|
87
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
88
|
+
? { prompt: optionsOrPrompt }
|
|
89
|
+
: optionsOrPrompt;
|
|
90
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
91
|
+
// Use schema from options or fallback parameter
|
|
92
|
+
const finalSchema = schema || analysisSchema;
|
|
93
|
+
console.log(`[${functionTag}] Stream request started`, {
|
|
94
|
+
provider,
|
|
95
|
+
modelName: this.modelName,
|
|
96
|
+
promptLength: prompt.length,
|
|
97
|
+
temperature,
|
|
98
|
+
maxTokens,
|
|
99
|
+
hasSchema: !!finalSchema
|
|
100
|
+
});
|
|
101
|
+
const model = this.getModel();
|
|
102
|
+
const streamOptions = {
|
|
103
|
+
model: model,
|
|
104
|
+
prompt: prompt,
|
|
105
|
+
system: systemPrompt,
|
|
106
|
+
temperature,
|
|
107
|
+
maxTokens,
|
|
108
|
+
onError: (event) => {
|
|
109
|
+
const error = event.error;
|
|
110
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
111
|
+
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
112
|
+
console.error(`[${functionTag}] Stream text error`, {
|
|
113
|
+
provider,
|
|
114
|
+
modelName: this.modelName,
|
|
115
|
+
error: errorMessage,
|
|
116
|
+
stack: errorStack,
|
|
117
|
+
promptLength: prompt.length,
|
|
118
|
+
chunkCount
|
|
119
|
+
});
|
|
120
|
+
},
|
|
121
|
+
onFinish: (event) => {
|
|
122
|
+
console.log(`[${functionTag}] Stream text finished`, {
|
|
123
|
+
provider,
|
|
124
|
+
modelName: this.modelName,
|
|
125
|
+
finishReason: event.finishReason,
|
|
126
|
+
usage: event.usage,
|
|
127
|
+
totalChunks: chunkCount,
|
|
128
|
+
promptLength: prompt.length,
|
|
129
|
+
responseLength: event.text?.length || 0
|
|
130
|
+
});
|
|
131
|
+
},
|
|
132
|
+
onChunk: (event) => {
|
|
133
|
+
chunkCount++;
|
|
134
|
+
console.debug(`[${functionTag}] Stream text chunk`, {
|
|
135
|
+
provider,
|
|
136
|
+
modelName: this.modelName,
|
|
137
|
+
chunkNumber: chunkCount,
|
|
138
|
+
chunkLength: event.chunk.text?.length || 0,
|
|
139
|
+
chunkType: event.chunk.type
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
};
|
|
143
|
+
if (analysisSchema) {
|
|
144
|
+
streamOptions.experimental_output = Output.object({ schema: analysisSchema });
|
|
145
|
+
}
|
|
146
|
+
const result = streamText(streamOptions);
|
|
147
|
+
return result;
|
|
148
|
+
}
|
|
149
|
+
catch (err) {
|
|
150
|
+
console.error(`[${functionTag}] Exception`, {
|
|
151
|
+
provider,
|
|
152
|
+
modelName: this.modelName,
|
|
153
|
+
message: 'Error in streaming text',
|
|
154
|
+
err: String(err),
|
|
155
|
+
promptLength: typeof optionsOrPrompt === 'string' ? optionsOrPrompt.length : optionsOrPrompt.prompt.length
|
|
156
|
+
});
|
|
157
|
+
throw err; // Re-throw error to trigger fallback
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
/**
|
|
161
|
+
* Processes text using non-streaming approach with optional schema validation
|
|
162
|
+
* @param prompt - The input text prompt to analyze
|
|
163
|
+
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
164
|
+
* @returns Promise resolving to GenerateTextResult or null if operation fails
|
|
165
|
+
*/
|
|
166
|
+
async generateText(optionsOrPrompt, analysisSchema) {
|
|
167
|
+
const functionTag = 'GoogleAIStudio.generateText';
|
|
168
|
+
const provider = 'google-ai';
|
|
169
|
+
try {
|
|
170
|
+
// Parse parameters - support both string and options object
|
|
171
|
+
const options = typeof optionsOrPrompt === 'string'
|
|
172
|
+
? { prompt: optionsOrPrompt }
|
|
173
|
+
: optionsOrPrompt;
|
|
174
|
+
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
175
|
+
// Use schema from options or fallback parameter
|
|
176
|
+
const finalSchema = schema || analysisSchema;
|
|
177
|
+
console.log(`[${functionTag}] Generate request started`, {
|
|
178
|
+
provider,
|
|
179
|
+
modelName: this.modelName,
|
|
180
|
+
promptLength: prompt.length,
|
|
181
|
+
temperature,
|
|
182
|
+
maxTokens
|
|
183
|
+
});
|
|
184
|
+
const model = this.getModel();
|
|
185
|
+
const generateOptions = {
|
|
186
|
+
model: model,
|
|
187
|
+
prompt: prompt,
|
|
188
|
+
system: systemPrompt,
|
|
189
|
+
temperature,
|
|
190
|
+
maxTokens
|
|
191
|
+
};
|
|
192
|
+
if (finalSchema) {
|
|
193
|
+
generateOptions.experimental_output = Output.object({ schema: finalSchema });
|
|
194
|
+
}
|
|
195
|
+
const result = await generateText(generateOptions);
|
|
196
|
+
console.log(`[${functionTag}] Generate text completed`, {
|
|
197
|
+
provider,
|
|
198
|
+
modelName: this.modelName,
|
|
199
|
+
usage: result.usage,
|
|
200
|
+
finishReason: result.finishReason,
|
|
201
|
+
responseLength: result.text?.length || 0
|
|
202
|
+
});
|
|
203
|
+
return result;
|
|
204
|
+
}
|
|
205
|
+
catch (err) {
|
|
206
|
+
console.error(`[${functionTag}] Exception`, {
|
|
207
|
+
provider,
|
|
208
|
+
modelName: this.modelName,
|
|
209
|
+
message: 'Error in generating text',
|
|
210
|
+
err: String(err)
|
|
211
|
+
});
|
|
212
|
+
throw err; // Re-throw error to trigger fallback
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
}
|
|
@@ -346,7 +346,7 @@ export class GoogleVertexAI {
|
|
|
346
346
|
err: String(err),
|
|
347
347
|
promptLength: prompt.length
|
|
348
348
|
});
|
|
349
|
-
|
|
349
|
+
throw err; // Re-throw error to trigger fallback
|
|
350
350
|
}
|
|
351
351
|
}
|
|
352
352
|
/**
|
|
@@ -402,7 +402,7 @@ export class GoogleVertexAI {
|
|
|
402
402
|
message: 'Error in generating text',
|
|
403
403
|
err: String(err)
|
|
404
404
|
});
|
|
405
|
-
|
|
405
|
+
throw err; // Re-throw error to trigger fallback
|
|
406
406
|
}
|
|
407
407
|
}
|
|
408
408
|
}
|
|
@@ -7,6 +7,7 @@ export { AmazonBedrock } from './amazonBedrock.js';
|
|
|
7
7
|
export { OpenAI } from './openAI.js';
|
|
8
8
|
export { AnthropicProvider } from './anthropic.js';
|
|
9
9
|
export { AzureOpenAIProvider } from './azureOpenAI.js';
|
|
10
|
+
export { GoogleAIStudio } from './googleAIStudio.js';
|
|
10
11
|
export type { AIProvider } from '../core/types.js';
|
|
11
12
|
/**
|
|
12
13
|
* Provider registry for dynamic provider instantiation
|
|
@@ -17,6 +18,7 @@ export declare const PROVIDERS: {
|
|
|
17
18
|
readonly openai: "OpenAI";
|
|
18
19
|
readonly anthropic: "AnthropicProvider";
|
|
19
20
|
readonly azure: "AzureOpenAIProvider";
|
|
21
|
+
readonly 'google-ai': "GoogleAIStudio";
|
|
20
22
|
};
|
|
21
23
|
/**
|
|
22
24
|
* Type for valid provider names
|
package/dist/providers/index.js
CHANGED
|
@@ -7,6 +7,7 @@ export { AmazonBedrock } from './amazonBedrock.js';
|
|
|
7
7
|
export { OpenAI } from './openAI.js';
|
|
8
8
|
export { AnthropicProvider } from './anthropic.js';
|
|
9
9
|
export { AzureOpenAIProvider } from './azureOpenAI.js';
|
|
10
|
+
export { GoogleAIStudio } from './googleAIStudio.js';
|
|
10
11
|
/**
|
|
11
12
|
* Provider registry for dynamic provider instantiation
|
|
12
13
|
*/
|
|
@@ -15,7 +16,8 @@ export const PROVIDERS = {
|
|
|
15
16
|
bedrock: 'AmazonBedrock',
|
|
16
17
|
openai: 'OpenAI',
|
|
17
18
|
anthropic: 'AnthropicProvider',
|
|
18
|
-
azure: 'AzureOpenAIProvider'
|
|
19
|
+
azure: 'AzureOpenAIProvider',
|
|
20
|
+
'google-ai': 'GoogleAIStudio'
|
|
19
21
|
};
|
|
20
22
|
/**
|
|
21
23
|
* List of all available provider names
|
package/dist/providers/openAI.js
CHANGED
|
@@ -114,7 +114,7 @@ export class OpenAI {
|
|
|
114
114
|
message: 'Error in streaming text',
|
|
115
115
|
err: String(err)
|
|
116
116
|
});
|
|
117
|
-
|
|
117
|
+
throw err; // Re-throw error to trigger fallback
|
|
118
118
|
}
|
|
119
119
|
}
|
|
120
120
|
async generateText(optionsOrPrompt, analysisSchema) {
|
|
@@ -162,7 +162,7 @@ export class OpenAI {
|
|
|
162
162
|
message: 'Error in generating text',
|
|
163
163
|
err: String(err)
|
|
164
164
|
});
|
|
165
|
-
|
|
165
|
+
throw err; // Re-throw error to trigger fallback
|
|
166
166
|
}
|
|
167
167
|
}
|
|
168
168
|
}
|
|
@@ -12,7 +12,7 @@ export function getBestProvider(requestedProvider) {
|
|
|
12
12
|
return requestedProvider;
|
|
13
13
|
}
|
|
14
14
|
// Default fallback order based on environment variables - OpenAI first since it's most reliable
|
|
15
|
-
const providers = ['openai', 'vertex', 'bedrock', 'anthropic', 'azure'];
|
|
15
|
+
const providers = ['openai', 'vertex', 'bedrock', 'anthropic', 'azure', 'google-ai'];
|
|
16
16
|
// Check which providers have their required environment variables
|
|
17
17
|
for (const provider of providers) {
|
|
18
18
|
if (isProviderConfigured(provider)) {
|
|
@@ -48,6 +48,9 @@ function isProviderConfigured(provider) {
|
|
|
48
48
|
case 'azure':
|
|
49
49
|
case 'azure-openai':
|
|
50
50
|
return !!process.env.AZURE_OPENAI_API_KEY;
|
|
51
|
+
case 'google-ai':
|
|
52
|
+
case 'google-studio':
|
|
53
|
+
return !!(process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY);
|
|
51
54
|
default:
|
|
52
55
|
return false;
|
|
53
56
|
}
|
|
@@ -57,7 +60,7 @@ function isProviderConfigured(provider) {
|
|
|
57
60
|
* @returns Array of available provider names
|
|
58
61
|
*/
|
|
59
62
|
export function getAvailableProviders() {
|
|
60
|
-
return ['bedrock', 'vertex', 'openai', 'anthropic', 'azure'];
|
|
63
|
+
return ['bedrock', 'vertex', 'openai', 'anthropic', 'azure', 'google-ai'];
|
|
61
64
|
}
|
|
62
65
|
/**
|
|
63
66
|
* Validate provider name
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@juspay/neurolink",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.5.0",
|
|
4
4
|
"description": "Universal AI Development Platform with external MCP server integration, multi-provider support, and professional CLI. Connect to 65+ MCP servers for filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with OpenAI, Anthropic, Google Vertex AI, and AWS Bedrock.",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Juspay Technologies",
|
|
@@ -77,6 +77,7 @@
|
|
|
77
77
|
}
|
|
78
78
|
},
|
|
79
79
|
"dependencies": {
|
|
80
|
+
"@ai-sdk/google": "^1.2.19",
|
|
80
81
|
"chalk": "^5.3.0",
|
|
81
82
|
"dotenv": "^16.5.0",
|
|
82
83
|
"inquirer": "^9.2.15",
|
|
@@ -97,6 +98,7 @@
|
|
|
97
98
|
"eslint": "^9.0.0",
|
|
98
99
|
"prettier": "^3.0.0",
|
|
99
100
|
"publint": "^0.3.2",
|
|
101
|
+
"puppeteer": "^24.10.0",
|
|
100
102
|
"svelte": "^5.0.0",
|
|
101
103
|
"svelte-check": "^4.0.0",
|
|
102
104
|
"tslib": "^2.4.1",
|