@juspay/neurolink 1.5.1 → 1.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +49 -0
- package/README.md +1 -1
- package/dist/cli/commands/config.d.ts +35 -35
- package/dist/cli/index.js +63 -19
- package/dist/core/factory.js +12 -11
- package/dist/lib/core/factory.d.ts +40 -0
- package/dist/lib/core/factory.js +162 -0
- package/dist/lib/core/types.d.ts +111 -0
- package/dist/lib/core/types.js +68 -0
- package/dist/lib/index.d.ts +56 -0
- package/dist/lib/index.js +62 -0
- package/dist/lib/mcp/context-manager.d.ts +164 -0
- package/dist/lib/mcp/context-manager.js +273 -0
- package/dist/lib/mcp/factory.d.ts +144 -0
- package/dist/lib/mcp/factory.js +141 -0
- package/dist/lib/mcp/orchestrator.d.ts +170 -0
- package/dist/lib/mcp/orchestrator.js +372 -0
- package/dist/lib/mcp/registry.d.ts +188 -0
- package/dist/lib/mcp/registry.js +373 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.d.ts +21 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +215 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.d.ts +10 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +303 -0
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +101 -0
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +428 -0
- package/dist/lib/neurolink.d.ts +53 -0
- package/dist/lib/neurolink.js +155 -0
- package/dist/lib/providers/amazonBedrock.d.ts +11 -0
- package/dist/lib/providers/amazonBedrock.js +256 -0
- package/dist/lib/providers/anthropic.d.ts +34 -0
- package/dist/lib/providers/anthropic.js +308 -0
- package/dist/lib/providers/azureOpenAI.d.ts +37 -0
- package/dist/lib/providers/azureOpenAI.js +339 -0
- package/dist/lib/providers/googleAIStudio.d.ts +30 -0
- package/dist/lib/providers/googleAIStudio.js +216 -0
- package/dist/lib/providers/googleVertexAI.d.ts +30 -0
- package/dist/lib/providers/googleVertexAI.js +409 -0
- package/dist/lib/providers/index.d.ts +30 -0
- package/dist/lib/providers/index.js +25 -0
- package/dist/lib/providers/openAI.d.ts +10 -0
- package/dist/lib/providers/openAI.js +169 -0
- package/dist/lib/utils/logger.d.ts +12 -0
- package/dist/lib/utils/logger.js +25 -0
- package/dist/lib/utils/providerUtils.d.ts +17 -0
- package/dist/lib/utils/providerUtils.js +73 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.js +11 -10
- package/dist/neurolink.js +13 -12
- package/dist/providers/amazonBedrock.js +22 -21
- package/dist/providers/anthropic.js +21 -20
- package/dist/providers/azureOpenAI.js +21 -20
- package/dist/providers/googleAIStudio.js +13 -12
- package/dist/providers/googleVertexAI.js +27 -26
- package/dist/providers/openAI.js +12 -11
- package/dist/utils/logger.d.ts +12 -0
- package/dist/utils/logger.js +25 -0
- package/dist/utils/providerUtils.d.ts +0 -3
- package/dist/utils/providerUtils.js +3 -2
- package/package.json +1 -1
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
* Supports all OpenAI models with enhanced security and compliance.
|
|
6
6
|
*/
|
|
7
7
|
import { AIProviderName } from '../core/types.js';
|
|
8
|
+
import { logger } from '../utils/logger.js';
|
|
8
9
|
export class AzureOpenAIProvider {
|
|
9
10
|
name = AIProviderName.AZURE;
|
|
10
11
|
apiKey;
|
|
@@ -16,7 +17,7 @@ export class AzureOpenAIProvider {
|
|
|
16
17
|
this.endpoint = this.getEndpoint();
|
|
17
18
|
this.deploymentId = this.getDeploymentId();
|
|
18
19
|
this.apiVersion = process.env.AZURE_OPENAI_API_VERSION || '2024-02-15-preview';
|
|
19
|
-
|
|
20
|
+
logger.debug(`[AzureOpenAIProvider] Initialized with endpoint: ${this.endpoint}, deployment: ${this.deploymentId}`);
|
|
20
21
|
}
|
|
21
22
|
getApiKey() {
|
|
22
23
|
const apiKey = process.env.AZURE_OPENAI_API_KEY;
|
|
@@ -48,8 +49,8 @@ export class AzureOpenAIProvider {
|
|
|
48
49
|
'Content-Type': 'application/json',
|
|
49
50
|
'api-key': this.apiKey
|
|
50
51
|
};
|
|
51
|
-
|
|
52
|
-
|
|
52
|
+
logger.debug(`[AzureOpenAIProvider.makeRequest] ${stream ? 'Streaming' : 'Non-streaming'} request to deployment: ${this.deploymentId}`);
|
|
53
|
+
logger.debug(`[AzureOpenAIProvider.makeRequest] Max tokens: ${body.max_tokens || 'default'}, Temperature: ${body.temperature || 'default'}`);
|
|
53
54
|
const response = await fetch(url, {
|
|
54
55
|
method: 'POST',
|
|
55
56
|
headers,
|
|
@@ -57,19 +58,19 @@ export class AzureOpenAIProvider {
|
|
|
57
58
|
});
|
|
58
59
|
if (!response.ok) {
|
|
59
60
|
const errorText = await response.text();
|
|
60
|
-
|
|
61
|
+
logger.error(`[AzureOpenAIProvider.makeRequest] API error ${response.status}: ${errorText}`);
|
|
61
62
|
throw new Error(`Azure OpenAI API error ${response.status}: ${errorText}`);
|
|
62
63
|
}
|
|
63
64
|
return response;
|
|
64
65
|
}
|
|
65
66
|
async generateText(optionsOrPrompt, schema) {
|
|
66
|
-
|
|
67
|
+
logger.debug('[AzureOpenAIProvider.generateText] Starting text generation');
|
|
67
68
|
// Parse parameters with backward compatibility
|
|
68
69
|
const options = typeof optionsOrPrompt === 'string'
|
|
69
70
|
? { prompt: optionsOrPrompt }
|
|
70
71
|
: optionsOrPrompt;
|
|
71
72
|
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are a helpful AI assistant.' } = options;
|
|
72
|
-
|
|
73
|
+
logger.debug(`[AzureOpenAIProvider.generateText] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}`);
|
|
73
74
|
const messages = [];
|
|
74
75
|
if (systemPrompt) {
|
|
75
76
|
messages.push({
|
|
@@ -89,7 +90,7 @@ export class AzureOpenAIProvider {
|
|
|
89
90
|
try {
|
|
90
91
|
const response = await this.makeRequest(requestBody);
|
|
91
92
|
const data = await response.json();
|
|
92
|
-
|
|
93
|
+
logger.debug(`[AzureOpenAIProvider.generateText] Success. Generated ${data.usage.completion_tokens} tokens`);
|
|
93
94
|
const content = data.choices[0]?.message?.content || '';
|
|
94
95
|
return {
|
|
95
96
|
content,
|
|
@@ -104,18 +105,18 @@ export class AzureOpenAIProvider {
|
|
|
104
105
|
};
|
|
105
106
|
}
|
|
106
107
|
catch (error) {
|
|
107
|
-
|
|
108
|
+
logger.error('[AzureOpenAIProvider.generateText] Error:', error);
|
|
108
109
|
throw error;
|
|
109
110
|
}
|
|
110
111
|
}
|
|
111
112
|
async streamText(optionsOrPrompt, schema) {
|
|
112
|
-
|
|
113
|
+
logger.debug('[AzureOpenAIProvider.streamText] Starting text streaming');
|
|
113
114
|
// Parse parameters with backward compatibility
|
|
114
115
|
const options = typeof optionsOrPrompt === 'string'
|
|
115
116
|
? { prompt: optionsOrPrompt }
|
|
116
117
|
: optionsOrPrompt;
|
|
117
118
|
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are a helpful AI assistant.' } = options;
|
|
118
|
-
|
|
119
|
+
logger.debug(`[AzureOpenAIProvider.streamText] Streaming prompt: "${prompt.substring(0, 100)}..."`);
|
|
119
120
|
const messages = [];
|
|
120
121
|
if (systemPrompt) {
|
|
121
122
|
messages.push({
|
|
@@ -147,7 +148,7 @@ export class AzureOpenAIProvider {
|
|
|
147
148
|
};
|
|
148
149
|
}
|
|
149
150
|
catch (error) {
|
|
150
|
-
|
|
151
|
+
logger.error('[AzureOpenAIProvider.streamText] Error:', error);
|
|
151
152
|
throw error;
|
|
152
153
|
}
|
|
153
154
|
}
|
|
@@ -178,7 +179,7 @@ export class AzureOpenAIProvider {
|
|
|
178
179
|
}
|
|
179
180
|
}
|
|
180
181
|
catch (parseError) {
|
|
181
|
-
|
|
182
|
+
logger.warn('[AzureOpenAIProvider.createAsyncIterable] Failed to parse chunk:', parseError);
|
|
182
183
|
continue;
|
|
183
184
|
}
|
|
184
185
|
}
|
|
@@ -190,13 +191,13 @@ export class AzureOpenAIProvider {
|
|
|
190
191
|
}
|
|
191
192
|
}
|
|
192
193
|
async *generateTextStream(optionsOrPrompt) {
|
|
193
|
-
|
|
194
|
+
logger.debug('[AzureOpenAIProvider.generateTextStream] Starting text streaming');
|
|
194
195
|
// Parse parameters with backward compatibility
|
|
195
196
|
const options = typeof optionsOrPrompt === 'string'
|
|
196
197
|
? { prompt: optionsOrPrompt }
|
|
197
198
|
: optionsOrPrompt;
|
|
198
199
|
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = 'You are a helpful AI assistant.' } = options;
|
|
199
|
-
|
|
200
|
+
logger.debug(`[AzureOpenAIProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
|
|
200
201
|
const messages = [];
|
|
201
202
|
if (systemPrompt) {
|
|
202
203
|
messages.push({
|
|
@@ -249,7 +250,7 @@ export class AzureOpenAIProvider {
|
|
|
249
250
|
}
|
|
250
251
|
}
|
|
251
252
|
catch (parseError) {
|
|
252
|
-
|
|
253
|
+
logger.warn('[AzureOpenAIProvider.generateTextStream] Failed to parse chunk:', parseError);
|
|
253
254
|
continue;
|
|
254
255
|
}
|
|
255
256
|
}
|
|
@@ -259,15 +260,15 @@ export class AzureOpenAIProvider {
|
|
|
259
260
|
finally {
|
|
260
261
|
reader.releaseLock();
|
|
261
262
|
}
|
|
262
|
-
|
|
263
|
+
logger.debug('[AzureOpenAIProvider.generateTextStream] Streaming completed');
|
|
263
264
|
}
|
|
264
265
|
catch (error) {
|
|
265
|
-
|
|
266
|
+
logger.error('[AzureOpenAIProvider.generateTextStream] Error:', error);
|
|
266
267
|
throw error;
|
|
267
268
|
}
|
|
268
269
|
}
|
|
269
270
|
async testConnection() {
|
|
270
|
-
|
|
271
|
+
logger.debug('[AzureOpenAIProvider.testConnection] Testing connection to Azure OpenAI');
|
|
271
272
|
const startTime = Date.now();
|
|
272
273
|
try {
|
|
273
274
|
await this.generateText({
|
|
@@ -275,7 +276,7 @@ export class AzureOpenAIProvider {
|
|
|
275
276
|
maxTokens: 5
|
|
276
277
|
});
|
|
277
278
|
const responseTime = Date.now() - startTime;
|
|
278
|
-
|
|
279
|
+
logger.debug(`[AzureOpenAIProvider.testConnection] Connection test successful (${responseTime}ms)`);
|
|
279
280
|
return {
|
|
280
281
|
success: true,
|
|
281
282
|
responseTime
|
|
@@ -283,7 +284,7 @@ export class AzureOpenAIProvider {
|
|
|
283
284
|
}
|
|
284
285
|
catch (error) {
|
|
285
286
|
const responseTime = Date.now() - startTime;
|
|
286
|
-
|
|
287
|
+
logger.error(`[AzureOpenAIProvider.testConnection] Connection test failed (${responseTime}ms):`, error);
|
|
287
288
|
return {
|
|
288
289
|
success: false,
|
|
289
290
|
error: error instanceof Error ? error.message : 'Unknown error',
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import { createGoogleGenerativeAI } from '@ai-sdk/google';
|
|
2
2
|
import { streamText, generateText, Output } from 'ai';
|
|
3
|
+
import { logger } from '../utils/logger.js';
|
|
3
4
|
// Default system context
|
|
4
5
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
5
6
|
systemPrompt: 'You are a helpful AI assistant.'
|
|
@@ -43,17 +44,17 @@ export class GoogleAIStudio {
|
|
|
43
44
|
const functionTag = 'GoogleAIStudio.constructor';
|
|
44
45
|
this.modelName = modelName || getGoogleAIModelId();
|
|
45
46
|
try {
|
|
46
|
-
|
|
47
|
+
logger.debug(`[${functionTag}] Initialization started`, {
|
|
47
48
|
modelName: this.modelName,
|
|
48
49
|
hasApiKey: hasValidAuth()
|
|
49
50
|
});
|
|
50
|
-
|
|
51
|
+
logger.debug(`[${functionTag}] Initialization completed`, {
|
|
51
52
|
modelName: this.modelName,
|
|
52
53
|
success: true
|
|
53
54
|
});
|
|
54
55
|
}
|
|
55
56
|
catch (err) {
|
|
56
|
-
|
|
57
|
+
logger.error(`[${functionTag}] Initialization failed`, {
|
|
57
58
|
message: 'Error in initializing Google AI Studio',
|
|
58
59
|
modelName: this.modelName,
|
|
59
60
|
error: err instanceof Error ? err.message : String(err),
|
|
@@ -66,7 +67,7 @@ export class GoogleAIStudio {
|
|
|
66
67
|
* @private
|
|
67
68
|
*/
|
|
68
69
|
getModel() {
|
|
69
|
-
|
|
70
|
+
logger.debug('GoogleAIStudio.getModel - Google AI model selected', {
|
|
70
71
|
modelName: this.modelName
|
|
71
72
|
});
|
|
72
73
|
const google = getGoogleInstance();
|
|
@@ -90,7 +91,7 @@ export class GoogleAIStudio {
|
|
|
90
91
|
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
91
92
|
// Use schema from options or fallback parameter
|
|
92
93
|
const finalSchema = schema || analysisSchema;
|
|
93
|
-
|
|
94
|
+
logger.debug(`[${functionTag}] Stream request started`, {
|
|
94
95
|
provider,
|
|
95
96
|
modelName: this.modelName,
|
|
96
97
|
promptLength: prompt.length,
|
|
@@ -109,7 +110,7 @@ export class GoogleAIStudio {
|
|
|
109
110
|
const error = event.error;
|
|
110
111
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
111
112
|
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
112
|
-
|
|
113
|
+
logger.error(`[${functionTag}] Stream text error`, {
|
|
113
114
|
provider,
|
|
114
115
|
modelName: this.modelName,
|
|
115
116
|
error: errorMessage,
|
|
@@ -119,7 +120,7 @@ export class GoogleAIStudio {
|
|
|
119
120
|
});
|
|
120
121
|
},
|
|
121
122
|
onFinish: (event) => {
|
|
122
|
-
|
|
123
|
+
logger.debug(`[${functionTag}] Stream text finished`, {
|
|
123
124
|
provider,
|
|
124
125
|
modelName: this.modelName,
|
|
125
126
|
finishReason: event.finishReason,
|
|
@@ -131,7 +132,7 @@ export class GoogleAIStudio {
|
|
|
131
132
|
},
|
|
132
133
|
onChunk: (event) => {
|
|
133
134
|
chunkCount++;
|
|
134
|
-
|
|
135
|
+
logger.debug(`[${functionTag}] Stream text chunk`, {
|
|
135
136
|
provider,
|
|
136
137
|
modelName: this.modelName,
|
|
137
138
|
chunkNumber: chunkCount,
|
|
@@ -147,7 +148,7 @@ export class GoogleAIStudio {
|
|
|
147
148
|
return result;
|
|
148
149
|
}
|
|
149
150
|
catch (err) {
|
|
150
|
-
|
|
151
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
151
152
|
provider,
|
|
152
153
|
modelName: this.modelName,
|
|
153
154
|
message: 'Error in streaming text',
|
|
@@ -174,7 +175,7 @@ export class GoogleAIStudio {
|
|
|
174
175
|
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
175
176
|
// Use schema from options or fallback parameter
|
|
176
177
|
const finalSchema = schema || analysisSchema;
|
|
177
|
-
|
|
178
|
+
logger.debug(`[${functionTag}] Generate request started`, {
|
|
178
179
|
provider,
|
|
179
180
|
modelName: this.modelName,
|
|
180
181
|
promptLength: prompt.length,
|
|
@@ -193,7 +194,7 @@ export class GoogleAIStudio {
|
|
|
193
194
|
generateOptions.experimental_output = Output.object({ schema: finalSchema });
|
|
194
195
|
}
|
|
195
196
|
const result = await generateText(generateOptions);
|
|
196
|
-
|
|
197
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
197
198
|
provider,
|
|
198
199
|
modelName: this.modelName,
|
|
199
200
|
usage: result.usage,
|
|
@@ -203,7 +204,7 @@ export class GoogleAIStudio {
|
|
|
203
204
|
return result;
|
|
204
205
|
}
|
|
205
206
|
catch (err) {
|
|
206
|
-
|
|
207
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
207
208
|
provider,
|
|
208
209
|
modelName: this.modelName,
|
|
209
210
|
message: 'Error in generating text',
|
|
@@ -12,16 +12,17 @@ async function getCreateVertexAnthropic() {
|
|
|
12
12
|
// Try to import the anthropic module - available in @ai-sdk/google-vertex ^2.2.0+
|
|
13
13
|
const anthropicModule = await import('@ai-sdk/google-vertex/anthropic');
|
|
14
14
|
_createVertexAnthropic = anthropicModule.createVertexAnthropic;
|
|
15
|
-
|
|
15
|
+
logger.debug('[GoogleVertexAI] Anthropic module successfully loaded');
|
|
16
16
|
return _createVertexAnthropic;
|
|
17
17
|
}
|
|
18
18
|
catch (error) {
|
|
19
19
|
// Anthropic module not available
|
|
20
|
-
|
|
20
|
+
logger.warn('[GoogleVertexAI] Anthropic module not available. Install @ai-sdk/google-vertex ^2.2.0 for Anthropic model support.');
|
|
21
21
|
return null;
|
|
22
22
|
}
|
|
23
23
|
}
|
|
24
24
|
import { streamText, generateText, Output } from 'ai';
|
|
25
|
+
import { logger } from '../utils/logger.js';
|
|
25
26
|
// Default system context
|
|
26
27
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
27
28
|
systemPrompt: 'You are a helpful AI assistant.'
|
|
@@ -70,7 +71,7 @@ const setupGoogleAuth = async () => {
|
|
|
70
71
|
// Method 2: Service Account Key (JSON string) - Create temporary file
|
|
71
72
|
if (hasServiceAccountKeyAuth() && !hasPrincipalAccountAuth()) {
|
|
72
73
|
const serviceAccountKey = getGoogleServiceAccountKey();
|
|
73
|
-
|
|
74
|
+
logger.debug(`[${functionTag}] Service account key auth (JSON string)`, {
|
|
74
75
|
hasServiceAccountKey: !!serviceAccountKey,
|
|
75
76
|
authMethod: 'service_account_key'
|
|
76
77
|
});
|
|
@@ -84,13 +85,13 @@ const setupGoogleAuth = async () => {
|
|
|
84
85
|
const tempFile = join(tmpdir(), `gcp-credentials-${Date.now()}.json`);
|
|
85
86
|
writeFileSync(tempFile, serviceAccountKey);
|
|
86
87
|
process.env.GOOGLE_APPLICATION_CREDENTIALS = tempFile;
|
|
87
|
-
|
|
88
|
+
logger.debug(`[${functionTag}] Created temporary credentials file`, {
|
|
88
89
|
tempFile: '[CREATED]',
|
|
89
90
|
authMethod: 'service_account_key_temp_file'
|
|
90
91
|
});
|
|
91
92
|
}
|
|
92
93
|
catch (error) {
|
|
93
|
-
|
|
94
|
+
logger.error(`[${functionTag}] Failed to parse service account key`, {
|
|
94
95
|
error: error instanceof Error ? error.message : String(error)
|
|
95
96
|
});
|
|
96
97
|
throw new Error('Invalid GOOGLE_SERVICE_ACCOUNT_KEY format. Must be valid JSON.');
|
|
@@ -100,7 +101,7 @@ const setupGoogleAuth = async () => {
|
|
|
100
101
|
if (hasServiceAccountEnvAuth() && !hasPrincipalAccountAuth() && !hasServiceAccountKeyAuth()) {
|
|
101
102
|
const clientEmail = getGoogleClientEmail();
|
|
102
103
|
const privateKey = getGooglePrivateKey();
|
|
103
|
-
|
|
104
|
+
logger.debug(`[${functionTag}] Service account env auth (separate variables)`, {
|
|
104
105
|
hasClientEmail: !!clientEmail,
|
|
105
106
|
hasPrivateKey: !!privateKey,
|
|
106
107
|
authMethod: 'service_account_env'
|
|
@@ -122,13 +123,13 @@ const setupGoogleAuth = async () => {
|
|
|
122
123
|
const tempFile = join(tmpdir(), `gcp-credentials-env-${Date.now()}.json`);
|
|
123
124
|
writeFileSync(tempFile, JSON.stringify(serviceAccount, null, 2));
|
|
124
125
|
process.env.GOOGLE_APPLICATION_CREDENTIALS = tempFile;
|
|
125
|
-
|
|
126
|
+
logger.debug(`[${functionTag}] Created temporary credentials file from env vars`, {
|
|
126
127
|
tempFile: '[CREATED]',
|
|
127
128
|
authMethod: 'service_account_env_temp_file'
|
|
128
129
|
});
|
|
129
130
|
}
|
|
130
131
|
catch (error) {
|
|
131
|
-
|
|
132
|
+
logger.error(`[${functionTag}] Failed to create service account file from env vars`, {
|
|
132
133
|
error: error instanceof Error ? error.message : String(error)
|
|
133
134
|
});
|
|
134
135
|
throw new Error('Failed to create temporary service account file from environment variables.');
|
|
@@ -147,7 +148,7 @@ const createVertexSettings = async () => {
|
|
|
147
148
|
// Method 1: Principal Account Authentication (file path) - Recommended for production
|
|
148
149
|
if (hasPrincipalAccountAuth()) {
|
|
149
150
|
const credentialsPath = getGoogleApplicationCredentials();
|
|
150
|
-
|
|
151
|
+
logger.debug(`[${functionTag}] Principal account auth (file path)`, {
|
|
151
152
|
credentialsPath: credentialsPath ? '[PROVIDED]' : '[NOT_PROVIDED]',
|
|
152
153
|
authMethod: 'principal_account_file'
|
|
153
154
|
});
|
|
@@ -155,14 +156,14 @@ const createVertexSettings = async () => {
|
|
|
155
156
|
}
|
|
156
157
|
// Method 2 & 3: Other methods now set GOOGLE_APPLICATION_CREDENTIALS in setupGoogleAuth()
|
|
157
158
|
if (hasServiceAccountKeyAuth() || hasServiceAccountEnvAuth()) {
|
|
158
|
-
|
|
159
|
+
logger.debug(`[${functionTag}] Alternative auth method configured`, {
|
|
159
160
|
authMethod: hasServiceAccountKeyAuth() ? 'service_account_key' : 'service_account_env',
|
|
160
161
|
credentialsSet: !!process.env.GOOGLE_APPLICATION_CREDENTIALS
|
|
161
162
|
});
|
|
162
163
|
return baseSettings;
|
|
163
164
|
}
|
|
164
165
|
// No valid authentication found
|
|
165
|
-
|
|
166
|
+
logger.error(`[${functionTag}] No valid authentication method found`, {
|
|
166
167
|
authMethod: 'none',
|
|
167
168
|
hasPrincipalAccount: hasPrincipalAccountAuth(),
|
|
168
169
|
hasServiceAccountKey: hasServiceAccountKeyAuth(),
|
|
@@ -203,29 +204,29 @@ export class GoogleVertexAI {
|
|
|
203
204
|
const functionTag = 'GoogleVertexAI.constructor';
|
|
204
205
|
this.modelName = modelName || getVertexModelId();
|
|
205
206
|
try {
|
|
206
|
-
|
|
207
|
+
logger.debug(`[${functionTag}] Initialization started`, {
|
|
207
208
|
modelName: this.modelName,
|
|
208
209
|
isAnthropic: isAnthropicModel(this.modelName)
|
|
209
210
|
});
|
|
210
211
|
const hasPrincipal = hasPrincipalAccountAuth();
|
|
211
|
-
|
|
212
|
+
logger.debug(`[${functionTag}] Authentication validation`, {
|
|
212
213
|
hasPrincipalAccountAuth: hasPrincipal,
|
|
213
214
|
projectId: getGCPVertexBreezeProjectId() || 'MISSING',
|
|
214
215
|
location: getGCPVertexBreezeLocation() || 'MISSING'
|
|
215
216
|
});
|
|
216
217
|
if (hasPrincipal) {
|
|
217
|
-
|
|
218
|
+
logger.debug(`[${functionTag}] Auth method selected`, {
|
|
218
219
|
authMethod: 'principal_account',
|
|
219
220
|
hasGoogleApplicationCredentials: !!getGoogleApplicationCredentials()
|
|
220
221
|
});
|
|
221
222
|
}
|
|
222
223
|
else {
|
|
223
|
-
|
|
224
|
+
logger.warn(`[${functionTag}] Auth method missing`, {
|
|
224
225
|
authMethod: 'none',
|
|
225
226
|
hasPrincipalAccountAuth: hasPrincipal
|
|
226
227
|
});
|
|
227
228
|
}
|
|
228
|
-
|
|
229
|
+
logger.debug(`[${functionTag}] Initialization completed`, {
|
|
229
230
|
modelName: this.modelName,
|
|
230
231
|
isAnthropic: isAnthropicModel(this.modelName),
|
|
231
232
|
authMethod: hasPrincipalAccountAuth() ? 'principal_account' : 'none',
|
|
@@ -233,7 +234,7 @@ export class GoogleVertexAI {
|
|
|
233
234
|
});
|
|
234
235
|
}
|
|
235
236
|
catch (err) {
|
|
236
|
-
|
|
237
|
+
logger.error(`[${functionTag}] Initialization failed`, {
|
|
237
238
|
message: 'Error in initializing Google Vertex AI',
|
|
238
239
|
modelName: this.modelName,
|
|
239
240
|
isAnthropic: isAnthropicModel(this.modelName),
|
|
@@ -248,7 +249,7 @@ export class GoogleVertexAI {
|
|
|
248
249
|
*/
|
|
249
250
|
async getModel() {
|
|
250
251
|
if (isAnthropicModel(this.modelName)) {
|
|
251
|
-
|
|
252
|
+
logger.debug('GoogleVertexAI.getModel - Anthropic model selected', {
|
|
252
253
|
modelName: this.modelName
|
|
253
254
|
});
|
|
254
255
|
const createVertexAnthropic = await getCreateVertexAnthropic();
|
|
@@ -281,7 +282,7 @@ export class GoogleVertexAI {
|
|
|
281
282
|
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
282
283
|
// Use schema from options or fallback parameter
|
|
283
284
|
const finalSchema = schema || analysisSchema;
|
|
284
|
-
|
|
285
|
+
logger.debug(`[${functionTag}] Stream request started`, {
|
|
285
286
|
provider,
|
|
286
287
|
modelName: this.modelName,
|
|
287
288
|
isAnthropic: isAnthropicModel(this.modelName),
|
|
@@ -301,7 +302,7 @@ export class GoogleVertexAI {
|
|
|
301
302
|
const error = event.error;
|
|
302
303
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
303
304
|
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
304
|
-
|
|
305
|
+
logger.error(`[${functionTag}] Stream text error`, {
|
|
305
306
|
provider,
|
|
306
307
|
modelName: this.modelName,
|
|
307
308
|
error: errorMessage,
|
|
@@ -311,7 +312,7 @@ export class GoogleVertexAI {
|
|
|
311
312
|
});
|
|
312
313
|
},
|
|
313
314
|
onFinish: (event) => {
|
|
314
|
-
|
|
315
|
+
logger.debug(`[${functionTag}] Stream text finished`, {
|
|
315
316
|
provider,
|
|
316
317
|
modelName: this.modelName,
|
|
317
318
|
finishReason: event.finishReason,
|
|
@@ -323,7 +324,7 @@ export class GoogleVertexAI {
|
|
|
323
324
|
},
|
|
324
325
|
onChunk: (event) => {
|
|
325
326
|
chunkCount++;
|
|
326
|
-
|
|
327
|
+
logger.debug(`[${functionTag}] Stream text chunk`, {
|
|
327
328
|
provider,
|
|
328
329
|
modelName: this.modelName,
|
|
329
330
|
chunkNumber: chunkCount,
|
|
@@ -339,7 +340,7 @@ export class GoogleVertexAI {
|
|
|
339
340
|
return result;
|
|
340
341
|
}
|
|
341
342
|
catch (err) {
|
|
342
|
-
|
|
343
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
343
344
|
provider,
|
|
344
345
|
modelName: this.modelName,
|
|
345
346
|
message: 'Error in streaming text',
|
|
@@ -366,7 +367,7 @@ export class GoogleVertexAI {
|
|
|
366
367
|
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
367
368
|
// Use schema from options or fallback parameter
|
|
368
369
|
const finalSchema = schema || analysisSchema;
|
|
369
|
-
|
|
370
|
+
logger.debug(`[${functionTag}] Generate request started`, {
|
|
370
371
|
provider,
|
|
371
372
|
modelName: this.modelName,
|
|
372
373
|
isAnthropic: isAnthropicModel(this.modelName),
|
|
@@ -386,7 +387,7 @@ export class GoogleVertexAI {
|
|
|
386
387
|
generateOptions.experimental_output = Output.object({ schema: finalSchema });
|
|
387
388
|
}
|
|
388
389
|
const result = await generateText(generateOptions);
|
|
389
|
-
|
|
390
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
390
391
|
provider,
|
|
391
392
|
modelName: this.modelName,
|
|
392
393
|
usage: result.usage,
|
|
@@ -396,7 +397,7 @@ export class GoogleVertexAI {
|
|
|
396
397
|
return result;
|
|
397
398
|
}
|
|
398
399
|
catch (err) {
|
|
399
|
-
|
|
400
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
400
401
|
provider,
|
|
401
402
|
modelName: this.modelName,
|
|
402
403
|
message: 'Error in generating text',
|
package/dist/providers/openAI.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import { openai } from '@ai-sdk/openai';
|
|
2
2
|
import { streamText, generateText, Output } from 'ai';
|
|
3
|
+
import { logger } from '../utils/logger.js';
|
|
3
4
|
// Default system context
|
|
4
5
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
5
6
|
systemPrompt: 'You are a helpful AI assistant.'
|
|
@@ -23,17 +24,17 @@ export class OpenAI {
|
|
|
23
24
|
const functionTag = 'OpenAI.constructor';
|
|
24
25
|
this.modelName = modelName || getOpenAIModel();
|
|
25
26
|
try {
|
|
26
|
-
|
|
27
|
+
logger.debug(`[${functionTag}] Function called`, { modelName: this.modelName });
|
|
27
28
|
// Set OpenAI API key as environment variable
|
|
28
29
|
process.env.OPENAI_API_KEY = getOpenAIApiKey();
|
|
29
30
|
this.model = openai(this.modelName);
|
|
30
|
-
|
|
31
|
+
logger.debug(`[${functionTag}] Function result`, {
|
|
31
32
|
modelName: this.modelName,
|
|
32
33
|
success: true
|
|
33
34
|
});
|
|
34
35
|
}
|
|
35
36
|
catch (err) {
|
|
36
|
-
|
|
37
|
+
logger.debug(`[${functionTag}] Exception`, {
|
|
37
38
|
message: 'Error in initializing OpenAI',
|
|
38
39
|
modelName: this.modelName,
|
|
39
40
|
err: String(err)
|
|
@@ -53,7 +54,7 @@ export class OpenAI {
|
|
|
53
54
|
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
54
55
|
// Use schema from options or fallback parameter
|
|
55
56
|
const finalSchema = schema || analysisSchema;
|
|
56
|
-
|
|
57
|
+
logger.debug(`[${functionTag}] Stream text started`, {
|
|
57
58
|
provider,
|
|
58
59
|
modelName: this.modelName,
|
|
59
60
|
promptLength: prompt.length,
|
|
@@ -70,7 +71,7 @@ export class OpenAI {
|
|
|
70
71
|
const error = event.error;
|
|
71
72
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
72
73
|
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
73
|
-
|
|
74
|
+
logger.debug(`[${functionTag}] Stream text error`, {
|
|
74
75
|
provider,
|
|
75
76
|
modelName: this.modelName,
|
|
76
77
|
error: errorMessage,
|
|
@@ -80,7 +81,7 @@ export class OpenAI {
|
|
|
80
81
|
});
|
|
81
82
|
},
|
|
82
83
|
onFinish: (event) => {
|
|
83
|
-
|
|
84
|
+
logger.debug(`[${functionTag}] Stream text finished`, {
|
|
84
85
|
provider,
|
|
85
86
|
modelName: this.modelName,
|
|
86
87
|
finishReason: event.finishReason,
|
|
@@ -92,7 +93,7 @@ export class OpenAI {
|
|
|
92
93
|
},
|
|
93
94
|
onChunk: (event) => {
|
|
94
95
|
chunkCount++;
|
|
95
|
-
|
|
96
|
+
logger.debug(`[${functionTag}] Stream text chunk`, {
|
|
96
97
|
provider,
|
|
97
98
|
modelName: this.modelName,
|
|
98
99
|
chunkNumber: chunkCount,
|
|
@@ -108,7 +109,7 @@ export class OpenAI {
|
|
|
108
109
|
return result;
|
|
109
110
|
}
|
|
110
111
|
catch (err) {
|
|
111
|
-
|
|
112
|
+
logger.debug(`[${functionTag}] Exception`, {
|
|
112
113
|
provider,
|
|
113
114
|
modelName: this.modelName,
|
|
114
115
|
message: 'Error in streaming text',
|
|
@@ -128,7 +129,7 @@ export class OpenAI {
|
|
|
128
129
|
const { prompt, temperature = 0.7, maxTokens = 500, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema } = options;
|
|
129
130
|
// Use schema from options or fallback parameter
|
|
130
131
|
const finalSchema = schema || analysisSchema;
|
|
131
|
-
|
|
132
|
+
logger.debug(`[${functionTag}] Generate text started`, {
|
|
132
133
|
provider,
|
|
133
134
|
modelName: this.modelName,
|
|
134
135
|
promptLength: prompt.length,
|
|
@@ -146,7 +147,7 @@ export class OpenAI {
|
|
|
146
147
|
generateOptions.experimental_output = Output.object({ schema: finalSchema });
|
|
147
148
|
}
|
|
148
149
|
const result = await generateText(generateOptions);
|
|
149
|
-
|
|
150
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
150
151
|
provider,
|
|
151
152
|
modelName: this.modelName,
|
|
152
153
|
usage: result.usage,
|
|
@@ -156,7 +157,7 @@ export class OpenAI {
|
|
|
156
157
|
return result;
|
|
157
158
|
}
|
|
158
159
|
catch (err) {
|
|
159
|
-
|
|
160
|
+
logger.debug(`[${functionTag}] Exception`, {
|
|
160
161
|
provider,
|
|
161
162
|
modelName: this.modelName,
|
|
162
163
|
message: 'Error in generating text',
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink Logger Utility
|
|
3
|
+
*
|
|
4
|
+
* Provides conditional logging based on NEUROLINK_DEBUG environment variable
|
|
5
|
+
*/
|
|
6
|
+
export declare const logger: {
|
|
7
|
+
debug: (...args: any[]) => void;
|
|
8
|
+
info: (...args: any[]) => void;
|
|
9
|
+
warn: (...args: any[]) => void;
|
|
10
|
+
error: (...args: any[]) => void;
|
|
11
|
+
always: (...args: any[]) => void;
|
|
12
|
+
};
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NeuroLink Logger Utility
|
|
3
|
+
*
|
|
4
|
+
* Provides conditional logging based on NEUROLINK_DEBUG environment variable
|
|
5
|
+
*/
|
|
6
|
+
export const logger = {
|
|
7
|
+
debug: (...args) => {
|
|
8
|
+
if (process.env.NEUROLINK_DEBUG === 'true') {
|
|
9
|
+
console.log(...args);
|
|
10
|
+
}
|
|
11
|
+
},
|
|
12
|
+
info: (...args) => {
|
|
13
|
+
// Completely disabled for clean CLI demo output
|
|
14
|
+
},
|
|
15
|
+
warn: (...args) => {
|
|
16
|
+
// Completely disabled for clean CLI demo output
|
|
17
|
+
},
|
|
18
|
+
error: (...args) => {
|
|
19
|
+
// Always show errors regardless of debug mode
|
|
20
|
+
console.error(...args);
|
|
21
|
+
},
|
|
22
|
+
always: (...args) => {
|
|
23
|
+
console.log(...args);
|
|
24
|
+
}
|
|
25
|
+
};
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Utility functions for AI provider management
|
|
3
3
|
*/
|
|
4
|
+
import { logger } from './logger.js';
|
|
4
5
|
/**
|
|
5
6
|
* Get the best available provider based on preferences and availability
|
|
6
7
|
* @param requestedProvider - Optional preferred provider name
|
|
@@ -16,12 +17,12 @@ export function getBestProvider(requestedProvider) {
|
|
|
16
17
|
// Check which providers have their required environment variables
|
|
17
18
|
for (const provider of providers) {
|
|
18
19
|
if (isProviderConfigured(provider)) {
|
|
19
|
-
|
|
20
|
+
logger.debug(`[getBestProvider] Selected provider: ${provider}`);
|
|
20
21
|
return provider;
|
|
21
22
|
}
|
|
22
23
|
}
|
|
23
24
|
// Default to bedrock if nothing is configured
|
|
24
|
-
|
|
25
|
+
logger.warn('[getBestProvider] No providers configured, defaulting to bedrock');
|
|
25
26
|
return 'bedrock';
|
|
26
27
|
}
|
|
27
28
|
/**
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@juspay/neurolink",
|
|
3
|
-
"version": "1.5.
|
|
3
|
+
"version": "1.5.2",
|
|
4
4
|
"description": "Universal AI Development Platform with external MCP server integration, multi-provider support, and professional CLI. Connect to 65+ MCP servers for filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with OpenAI, Anthropic, Google Vertex AI, and AWS Bedrock.",
|
|
5
5
|
"author": {
|
|
6
6
|
"name": "Juspay Technologies",
|