@juspay/neurolink 5.2.0 → 5.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +8 -2
  2. package/dist/cli/factories/command-factory.js +6 -5
  3. package/dist/core/base-provider.js +24 -13
  4. package/dist/core/constants.d.ts +1 -0
  5. package/dist/core/constants.js +1 -0
  6. package/dist/lib/core/base-provider.js +24 -13
  7. package/dist/lib/core/constants.d.ts +1 -0
  8. package/dist/lib/core/constants.js +1 -0
  9. package/dist/lib/mcp/client.d.ts +1 -0
  10. package/dist/lib/mcp/client.js +1 -0
  11. package/dist/lib/mcp/context-manager.d.ts +1 -0
  12. package/dist/lib/mcp/context-manager.js +8 -4
  13. package/dist/lib/mcp/function-calling.d.ts +13 -0
  14. package/dist/lib/mcp/function-calling.js +133 -34
  15. package/dist/lib/mcp/neurolink-mcp-client.d.ts +1 -0
  16. package/dist/lib/mcp/neurolink-mcp-client.js +21 -5
  17. package/dist/lib/providers/function-calling-provider.d.ts +64 -2
  18. package/dist/lib/providers/function-calling-provider.js +208 -9
  19. package/dist/lib/providers/mcp-provider.js +20 -5
  20. package/dist/lib/services/streaming/streaming-manager.js +11 -10
  21. package/dist/lib/services/websocket/websocket-server.js +12 -11
  22. package/dist/lib/telemetry/telemetry-service.js +8 -7
  23. package/dist/mcp/client.d.ts +1 -0
  24. package/dist/mcp/client.js +1 -0
  25. package/dist/mcp/context-manager.d.ts +1 -0
  26. package/dist/mcp/context-manager.js +8 -4
  27. package/dist/mcp/function-calling.d.ts +13 -0
  28. package/dist/mcp/function-calling.js +133 -34
  29. package/dist/mcp/neurolink-mcp-client.d.ts +1 -0
  30. package/dist/mcp/neurolink-mcp-client.js +21 -5
  31. package/dist/providers/function-calling-provider.d.ts +64 -2
  32. package/dist/providers/function-calling-provider.js +208 -9
  33. package/dist/providers/mcp-provider.js +20 -5
  34. package/dist/services/streaming/streaming-manager.js +11 -10
  35. package/dist/services/websocket/websocket-server.js +12 -11
  36. package/dist/telemetry/telemetry-service.js +8 -7
  37. package/package.json +12 -10
@@ -7,7 +7,7 @@ import { streamText as aiStreamText, generateText as aiGenerate, Output, } from
7
7
  import { getAvailableFunctionTools, executeFunctionCall, isFunctionCallingAvailable, } from "../mcp/function-calling.js";
8
8
  import { createExecutionContext } from "../mcp/context-manager.js";
9
9
  import { mcpLogger } from "../mcp/logging.js";
10
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
10
+ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
11
11
  /**
12
12
  * Enhanced provider that enables real function calling with MCP tools
13
13
  */
@@ -16,11 +16,22 @@ export class FunctionCallingProvider {
16
16
  enableFunctionCalling;
17
17
  sessionId;
18
18
  userId;
19
+ cachedToolsObject = null;
20
+ cachedToolMap = null;
21
+ cacheTimestamp = null;
22
+ cacheExpiryMs;
19
23
  constructor(baseProvider, options = {}) {
20
24
  this.baseProvider = baseProvider;
21
25
  this.enableFunctionCalling = options.enableFunctionCalling ?? true;
22
26
  this.sessionId = options.sessionId || `function-calling-${Date.now()}`;
23
27
  this.userId = options.userId || "function-calling-user";
28
+ // Configurable cache expiry: default 5 minutes, with environment override, then constructor option
29
+ const defaultExpiryMs = 5 * 60 * 1000; // 5 minutes
30
+ const envExpiryMs = process.env.NEUROLINK_CACHE_EXPIRY_MS
31
+ ? parseInt(process.env.NEUROLINK_CACHE_EXPIRY_MS, 10)
32
+ : undefined;
33
+ this.cacheExpiryMs =
34
+ options.cacheExpiryMs ?? envExpiryMs ?? defaultExpiryMs;
24
35
  }
25
36
  /**
26
37
  * PRIMARY METHOD: Stream content using AI (recommended for new code)
@@ -91,8 +102,30 @@ export class FunctionCallingProvider {
91
102
  }
92
103
  return result;
93
104
  }
94
- // Get available function tools
95
- const { tools, toolMap } = await getAvailableFunctionTools();
105
+ // Get available function tools (with automatic cache invalidation)
106
+ let toolsObject, toolMap;
107
+ const now = Date.now();
108
+ const isCacheExpired = this.cacheTimestamp === null ||
109
+ now - this.cacheTimestamp > this.cacheExpiryMs;
110
+ if (this.cachedToolsObject && this.cachedToolMap && !isCacheExpired) {
111
+ toolsObject = this.cachedToolsObject;
112
+ toolMap = this.cachedToolMap;
113
+ mcpLogger.debug(`[${functionTag}] Using cached tools (${Math.round((now - this.cacheTimestamp) / 1000)}s old)`);
114
+ }
115
+ else {
116
+ if (isCacheExpired && this.cachedToolsObject) {
117
+ mcpLogger.debug(`[${functionTag}] Cache expired, refreshing tools`);
118
+ }
119
+ const result = await getAvailableFunctionTools();
120
+ toolsObject = result.toolsObject;
121
+ toolMap = result.toolMap;
122
+ // Cache the results for future use with timestamp
123
+ this.cachedToolsObject = toolsObject;
124
+ this.cachedToolMap = toolMap;
125
+ this.cacheTimestamp = now;
126
+ mcpLogger.debug(`[${functionTag}] Cached ${Object.keys(toolsObject).length} tools with expiry in ${this.cacheExpiryMs / 1000}s`);
127
+ }
128
+ const tools = Object.values(toolsObject);
96
129
  if (tools.length === 0) {
97
130
  mcpLogger.debug(`[${functionTag}] No tools available, using base provider`);
98
131
  const result = await this.baseProvider.generate(options, analysisSchema);
@@ -113,8 +146,8 @@ export class FunctionCallingProvider {
113
146
  aiProvider: this.baseProvider.constructor.name,
114
147
  });
115
148
  // Use the AI SDK's native function calling by calling generate directly
116
- // We need to get the underlying model from the base provider
117
- const result = await this.generateWithTools(options, tools, toolMap, context, analysisSchema);
149
+ // We can now use the toolsObject directly instead of converting from array
150
+ const result = await this.generateWithToolsObject(options, toolsObject, toolMap, context, analysisSchema);
118
151
  if (!result) {
119
152
  return {
120
153
  content: "No response generated",
@@ -150,14 +183,83 @@ export class FunctionCallingProvider {
150
183
  }
151
184
  }
152
185
  /**
153
- * Generate text using AI SDK's native function calling
186
+ * Generate text with tools using the AI SDK's generate function (with tools object)
187
+ */
188
+ async generateWithToolsObject(options, toolsObject, toolMap, context, analysisSchema) {
189
+ const functionTag = "FunctionCallingProvider.generateWithToolsObject";
190
+ try {
191
+ // Use the toolsObject directly with proper execution wrapped
192
+ const toolsWithExecution = this.wrapToolsWithExecution(toolsObject, toolMap, context);
193
+ mcpLogger.debug(`[${functionTag}] Using tools object with ${Object.keys(toolsWithExecution).length} tools`);
194
+ // Get the model from base provider
195
+ const modelInfo = await this.getModelFromProvider();
196
+ if (!modelInfo) {
197
+ mcpLogger.warn(`[${functionTag}] Could not get model from provider, falling back to base provider`);
198
+ const result = await this.baseProvider.generate(options, analysisSchema);
199
+ if (!result) {
200
+ return {
201
+ content: "No response generated",
202
+ provider: "function-calling",
203
+ model: "unknown",
204
+ };
205
+ }
206
+ return result;
207
+ }
208
+ // Use AI SDK's generate directly with tools
209
+ const generateOptions = {
210
+ model: modelInfo.model,
211
+ prompt: options.prompt,
212
+ system: options.systemPrompt || "You are a helpful AI assistant.",
213
+ temperature: options.temperature || 0.7,
214
+ maxTokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
215
+ tools: toolsWithExecution,
216
+ toolChoice: "auto", // Let the AI decide when to use tools
217
+ maxSteps: options.maxSteps ?? DEFAULT_MAX_STEPS, // Enable multi-turn tool execution
218
+ };
219
+ // Add experimental_output if schema is provided
220
+ if (analysisSchema) {
221
+ generateOptions.experimental_output = Output.object({
222
+ schema: analysisSchema,
223
+ });
224
+ }
225
+ const result = await aiGenerate(generateOptions);
226
+ mcpLogger.debug(`[${functionTag}] AI SDK generate completed`, {
227
+ toolCalls: result.toolCalls?.length || 0,
228
+ finishReason: result.finishReason,
229
+ usage: result.usage,
230
+ });
231
+ return {
232
+ content: result.text,
233
+ provider: "function-calling",
234
+ model: "unknown",
235
+ usage: result.usage
236
+ ? {
237
+ inputTokens: result.usage.promptTokens,
238
+ outputTokens: result.usage.completionTokens,
239
+ totalTokens: result.usage.totalTokens,
240
+ }
241
+ : undefined,
242
+ responseTime: 0,
243
+ toolsUsed: result.toolCalls?.map((tc) => tc.toolName) || [],
244
+ toolExecutions: [],
245
+ enhancedWithTools: (result.toolCalls?.length || 0) > 0,
246
+ availableTools: [],
247
+ };
248
+ }
249
+ catch (error) {
250
+ mcpLogger.error(`[${functionTag}] Failed to generate text with tools:`, error);
251
+ throw error;
252
+ }
253
+ }
254
+ /**
255
+ * Generate text using AI SDK's native function calling (legacy array-based)
154
256
  */
155
257
  async generateWithTools(options, tools, toolMap, context, analysisSchema) {
156
258
  const functionTag = "FunctionCallingProvider.generateWithTools";
157
259
  try {
158
260
  // Convert our tools to AI SDK format with proper execution
159
261
  const toolsWithExecution = this.convertToAISDKTools(tools, toolMap, context);
160
- mcpLogger.debug(`[${functionTag}] Calling AI SDK generate with ${Object.keys(toolsWithExecution).length} tools and maxSteps: 5`);
262
+ mcpLogger.debug(`[${functionTag}] Calling AI SDK generate with ${Object.keys(toolsWithExecution).length} tools and maxSteps: ${options.maxSteps ?? DEFAULT_MAX_STEPS}`);
161
263
  mcpLogger.debug(`[${functionTag}] Sanitized tool names:`, Object.keys(toolsWithExecution));
162
264
  // Log the first few tools to debug the issue
163
265
  const toolNames = Object.keys(toolsWithExecution);
@@ -187,7 +289,7 @@ export class FunctionCallingProvider {
187
289
  maxTokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
188
290
  tools: toolsWithExecution,
189
291
  toolChoice: "auto", // Let the AI decide when to use tools
190
- maxSteps: 5, // CRITICAL: Enable multi-turn tool execution
292
+ maxSteps: options.maxSteps ?? DEFAULT_MAX_STEPS, // Enable multi-turn tool execution
191
293
  };
192
294
  // Add experimental_output if schema is provided
193
295
  if (analysisSchema) {
@@ -270,7 +372,52 @@ export class FunctionCallingProvider {
270
372
  return sanitized;
271
373
  }
272
374
  /**
273
- * Convert our tools to AI SDK format with proper execution
375
+ * Wrap tools with proper execution context (for object-based tools)
376
+ */
377
+ wrapToolsWithExecution(toolsObject, toolMap, context) {
378
+ const functionTag = "FunctionCallingProvider.wrapToolsWithExecution";
379
+ const wrappedTools = {};
380
+ for (const [toolName, tool] of Object.entries(toolsObject)) {
381
+ const toolInfo = toolMap.get(toolName);
382
+ const originalToolName = toolInfo ? toolInfo.toolName : toolName;
383
+ // Create a version with actual MCP execution
384
+ wrappedTools[toolName] = {
385
+ description: tool.description,
386
+ parameters: tool.parameters,
387
+ execute: async (args) => {
388
+ // Debug logging only in debug mode
389
+ if (process.env.NEUROLINK_DEBUG === "true") {
390
+ const providerName = this.baseProvider.constructor.name;
391
+ mcpLogger.debug(`Tool execution - Provider: ${providerName}`);
392
+ mcpLogger.debug(`Tool: ${toolName} (original: ${originalToolName})`);
393
+ mcpLogger.debug(`Args:`, args);
394
+ }
395
+ try {
396
+ // Execute the actual MCP tool
397
+ const result = await executeFunctionCall(toolName, args);
398
+ if (process.env.NEUROLINK_DEBUG === "true") {
399
+ mcpLogger.debug(`Tool result:`, result);
400
+ }
401
+ if (result.success) {
402
+ return result.data || { success: true };
403
+ }
404
+ else {
405
+ return { error: result.error || "Tool execution failed" };
406
+ }
407
+ }
408
+ catch (error) {
409
+ mcpLogger.error(`[${functionTag}] Tool execution error: ${toolName}`, error);
410
+ return {
411
+ error: error instanceof Error ? error.message : String(error),
412
+ };
413
+ }
414
+ },
415
+ };
416
+ }
417
+ return wrappedTools;
418
+ }
419
+ /**
420
+ * Convert our tools to AI SDK format with proper execution (legacy array-based)
274
421
  */
275
422
  convertToAISDKTools(tools, toolMap, context) {
276
423
  const functionTag = "FunctionCallingProvider.convertToAISDKTools";
@@ -394,6 +541,57 @@ These functions provide accurate, real-time data. Use them actively to enhance y
394
541
  /**
395
542
  * Alias for generate() - CLI-SDK consistency
396
543
  */
544
+ /**
545
+ * Clear cached tools - Cache Invalidation Strategy
546
+ *
547
+ * WHEN TO CALL clearToolsCache():
548
+ *
549
+ * 1. **MCP Server Changes**: When MCP servers are added, removed, or restarted
550
+ * - After calling unifiedRegistry.addServer() or removeServer()
551
+ * - When MCP server configurations change
552
+ * - After MCP server restart or reconnection
553
+ *
554
+ * 2. **Tool Registration Changes**: When custom tools are modified
555
+ * - After registering new SDK tools via registerTool()
556
+ * - When tool implementations change
557
+ * - After unregistering tools
558
+ *
559
+ * 3. **Provider Reinitialization**: When the provider context changes
560
+ * - Before switching between different AI providers
561
+ * - When session context changes significantly
562
+ * - After provider authentication refresh
563
+ *
564
+ * 4. **Error Recovery**: When tool execution encounters systematic failures
565
+ * - After MCP connection errors are resolved
566
+ * - When tool discovery needs to be re-run
567
+ * - During error recovery workflows
568
+ *
569
+ * 5. **Development/Testing**: During development and testing cycles
570
+ * - Between test cases that modify tool availability
571
+ * - When testing different tool configurations
572
+ * - During hot reloading scenarios
573
+ *
574
+ * CACHE LIFECYCLE:
575
+ * - Cache is populated on first generate() call via getAvailableFunctionTools()
576
+ * - Cache persists across multiple generate() calls for performance
577
+ * - Cache is invalidated by calling this method
578
+ * - Next generate() call will rebuild cache from current tool state
579
+ *
580
+ * PERFORMANCE IMPACT:
581
+ * - Clearing cache forces tool discovery on next usage (~100-500ms overhead)
582
+ * - Recommended to clear cache proactively rather than reactively
583
+ * - Consider batching tool changes before clearing cache
584
+ *
585
+ * THREAD SAFETY:
586
+ * - This method is not thread-safe
587
+ * - Avoid calling during active generate() operations
588
+ * - Safe to call between separate AI generation requests
589
+ */
590
+ clearToolsCache() {
591
+ this.cachedToolsObject = null;
592
+ this.cachedToolMap = null;
593
+ this.cacheTimestamp = null;
594
+ }
397
595
  /**
398
596
  * Short alias for generate() - CLI-SDK consistency
399
597
  */
@@ -422,6 +620,7 @@ export function createMCPAwareProviderV3(baseProvider, options = {}) {
422
620
  enableFunctionCalling: options.enableFunctionCalling,
423
621
  sessionId: options.sessionId,
424
622
  userId: options.userId,
623
+ cacheExpiryMs: options.cacheExpiryMs,
425
624
  });
426
625
  mcpLogger.debug(`[${functionTag}] Created MCP-aware provider with function calling`, {
427
626
  providerName: options.providerName,
@@ -62,11 +62,26 @@ export class MCPAwareProvider {
62
62
  exists: async () => false,
63
63
  },
64
64
  path: {
65
- join: (...paths) => require("path").join(...paths),
66
- resolve: (...paths) => require("path").resolve(...paths),
67
- relative: (from, to) => require("path").relative(from, to),
68
- dirname: (path) => require("path").dirname(path),
69
- basename: (path, ext) => require("path").basename(path, ext),
65
+ join: (...paths) => {
66
+ const pathModule = require("path");
67
+ return pathModule.join(...paths);
68
+ },
69
+ resolve: (...paths) => {
70
+ const pathModule = require("path");
71
+ return pathModule.resolve(...paths);
72
+ },
73
+ relative: (from, to) => {
74
+ const pathModule = require("path");
75
+ return pathModule.relative(from, to);
76
+ },
77
+ dirname: (pathArg) => {
78
+ const pathModule = require("path");
79
+ return pathModule.dirname(pathArg);
80
+ },
81
+ basename: (pathArg, ext) => {
82
+ const pathModule = require("path");
83
+ return pathModule.basename(pathArg, ext);
84
+ },
70
85
  },
71
86
  grantedPermissions: [],
72
87
  log: console.log,
@@ -1,5 +1,6 @@
1
1
  import { EventEmitter } from "events";
2
2
  import { randomUUID } from "crypto";
3
+ import { logger } from "../../utils/logger.js";
3
4
  export class StreamingManager extends EventEmitter {
4
5
  activeSessions = new Map();
5
6
  streamingPools = new Map();
@@ -40,7 +41,7 @@ export class StreamingManager extends EventEmitter {
40
41
  };
41
42
  this.activeSessions.set(sessionId, session);
42
43
  this.updateGlobalMetrics();
43
- console.log(`[Streaming Manager] Created session ${sessionId} for provider ${config.provider}`);
44
+ logger.debug(`[Streaming Manager] Created session ${sessionId} for provider ${config.provider}`);
44
45
  this.emit("session-created", session);
45
46
  return session;
46
47
  }
@@ -52,7 +53,7 @@ export class StreamingManager extends EventEmitter {
52
53
  session.status = "terminated";
53
54
  this.activeSessions.delete(sessionId);
54
55
  this.updateGlobalMetrics();
55
- console.log(`[Streaming Manager] Terminated session ${sessionId}`);
56
+ logger.debug(`[Streaming Manager] Terminated session ${sessionId}`);
56
57
  this.emit("session-terminated", session);
57
58
  }
58
59
  async pauseStreamingSession(sessionId) {
@@ -62,7 +63,7 @@ export class StreamingManager extends EventEmitter {
62
63
  }
63
64
  if (session.status === "active") {
64
65
  session.status = "paused";
65
- console.log(`[Streaming Manager] Paused session ${sessionId}`);
66
+ logger.debug(`[Streaming Manager] Paused session ${sessionId}`);
66
67
  this.emit("session-paused", session);
67
68
  }
68
69
  }
@@ -74,7 +75,7 @@ export class StreamingManager extends EventEmitter {
74
75
  if (session.status === "paused") {
75
76
  session.status = "active";
76
77
  session.lastActivity = Date.now();
77
- console.log(`[Streaming Manager] Resumed session ${sessionId}`);
78
+ logger.debug(`[Streaming Manager] Resumed session ${sessionId}`);
78
79
  this.emit("session-resumed", session);
79
80
  }
80
81
  }
@@ -97,7 +98,7 @@ export class StreamingManager extends EventEmitter {
97
98
  session.config.bufferSize = Math.max(session.config.bufferSize * 0.8, 1024);
98
99
  session.config.streamingMode = "real-time";
99
100
  }
100
- console.log(`[Streaming Manager] Optimized session ${sessionId}: latency=${currentLatency}ms, mode=${session.config.streamingMode}`);
101
+ logger.debug(`[Streaming Manager] Optimized session ${sessionId}: latency=${currentLatency}ms, mode=${session.config.streamingMode}`);
101
102
  }
102
103
  async enableStreamingCompression(sessionId) {
103
104
  const session = this.activeSessions.get(sessionId);
@@ -105,7 +106,7 @@ export class StreamingManager extends EventEmitter {
105
106
  return;
106
107
  }
107
108
  session.config.compressionEnabled = true;
108
- console.log(`[Streaming Manager] Enabled compression for session ${sessionId}`);
109
+ logger.debug(`[Streaming Manager] Enabled compression for session ${sessionId}`);
109
110
  }
110
111
  async configureStreamingBuffering(sessionId, bufferConfig) {
111
112
  const session = this.activeSessions.get(sessionId);
@@ -114,7 +115,7 @@ export class StreamingManager extends EventEmitter {
114
115
  }
115
116
  session.config.bufferSize = bufferConfig.maxSize;
116
117
  session.config.maxChunkSize = Math.min(session.config.maxChunkSize, bufferConfig.flushThreshold);
117
- console.log(`[Streaming Manager] Updated buffer config for session ${sessionId}:`, bufferConfig);
118
+ logger.debug(`[Streaming Manager] Updated buffer config for session ${sessionId}:`, bufferConfig);
118
119
  }
119
120
  // Multi-Stream Coordination
120
121
  async createStreamingPool(poolId, config) {
@@ -126,7 +127,7 @@ export class StreamingManager extends EventEmitter {
126
127
  loadBalancer: config.loadBalancing,
127
128
  };
128
129
  this.streamingPools.set(poolId, pool);
129
- console.log(`[Streaming Manager] Created pool ${poolId} with max ${config.maxConcurrentSessions} sessions`);
130
+ logger.debug(`[Streaming Manager] Created pool ${poolId} with max ${config.maxConcurrentSessions} sessions`);
130
131
  }
131
132
  async balanceStreamingLoad(poolId) {
132
133
  const pool = this.streamingPools.get(poolId);
@@ -160,7 +161,7 @@ export class StreamingManager extends EventEmitter {
160
161
  const newMaxSessions = Math.max(1, Math.floor(pool.maxSessions * scale));
161
162
  pool.maxSessions = newMaxSessions;
162
163
  pool.config.maxConcurrentSessions = newMaxSessions;
163
- console.log(`[Streaming Manager] Scaled pool ${poolId} to ${newMaxSessions} max sessions (${scale}x)`);
164
+ logger.debug(`[Streaming Manager] Scaled pool ${poolId} to ${newMaxSessions} max sessions (${scale}x)`);
164
165
  }
165
166
  // Performance Monitoring
166
167
  getStreamingMetrics(sessionId) {
@@ -217,7 +218,7 @@ export class StreamingManager extends EventEmitter {
217
218
  this.healthCheckInterval = setInterval(() => {
218
219
  const health = this.getStreamingHealthStatus();
219
220
  if (health.status !== "healthy") {
220
- console.warn("[Streaming Manager] Health check:", health);
221
+ logger.debug("[Streaming Manager] Health check:", health);
221
222
  this.emit("health-warning", health);
222
223
  }
223
224
  }, 30000); // Check every 30 seconds
@@ -1,6 +1,7 @@
1
1
  import { WebSocketServer, WebSocket } from "ws";
2
2
  import { EventEmitter } from "events";
3
3
  import { randomUUID } from "crypto";
4
+ import { logger } from "../../utils/logger.js";
4
5
  export class NeuroLinkWebSocketServer extends EventEmitter {
5
6
  wss;
6
7
  connections = new Map();
@@ -32,7 +33,7 @@ export class NeuroLinkWebSocketServer extends EventEmitter {
32
33
  this.handleConnection(ws, request);
33
34
  });
34
35
  this.wss.on("error", (error) => {
35
- console.error("[WebSocket Server] Error:", error);
36
+ logger.error("[WebSocket Server] Error:", error);
36
37
  this.emit("error", error);
37
38
  });
38
39
  }
@@ -64,7 +65,7 @@ export class NeuroLinkWebSocketServer extends EventEmitter {
64
65
  this.handleDisconnection(connectionId);
65
66
  });
66
67
  ws.on("error", (error) => {
67
- console.error(`[WebSocket] Connection ${connectionId} error:`, error);
68
+ logger.error(`[WebSocket] Connection ${connectionId} error:`, error);
68
69
  this.handleDisconnection(connectionId);
69
70
  });
70
71
  // Send connection confirmation
@@ -82,7 +83,7 @@ export class NeuroLinkWebSocketServer extends EventEmitter {
82
83
  },
83
84
  },
84
85
  });
85
- console.log(`[WebSocket] New connection: ${connectionId} (${this.connections.size}/${this.options.maxConnections})`);
86
+ logger.debug(`[WebSocket] New connection: ${connectionId} (${this.connections.size}/${this.options.maxConnections})`);
86
87
  this.emit("connection", { connectionId, userAgent, ipAddress });
87
88
  }
88
89
  handleMessage(connectionId, data) {
@@ -101,7 +102,7 @@ export class NeuroLinkWebSocketServer extends EventEmitter {
101
102
  }
102
103
  }
103
104
  catch (error) {
104
- console.error(`[WebSocket] Invalid message from ${connectionId}:`, error);
105
+ logger.error(`[WebSocket] Invalid message from ${connectionId}:`, error);
105
106
  this.sendError(connectionId, "Invalid message format");
106
107
  }
107
108
  }
@@ -124,7 +125,7 @@ export class NeuroLinkWebSocketServer extends EventEmitter {
124
125
  // Clean up
125
126
  this.connections.delete(connectionId);
126
127
  this.connectionInfo.delete(connectionId);
127
- console.log(`[WebSocket] Disconnected: ${connectionId} (${this.connections.size}/${this.options.maxConnections})`);
128
+ logger.debug(`[WebSocket] Disconnected: ${connectionId} (${this.connections.size}/${this.options.maxConnections})`);
128
129
  this.emit("disconnection", { connectionId });
129
130
  }
130
131
  // Room Management
@@ -150,7 +151,7 @@ export class NeuroLinkWebSocketServer extends EventEmitter {
150
151
  memberCount: this.rooms.get(roomId).size,
151
152
  },
152
153
  });
153
- console.log(`[WebSocket] ${connectionId} joined room ${roomId}`);
154
+ logger.debug(`[WebSocket] ${connectionId} joined room ${roomId}`);
154
155
  return true;
155
156
  }
156
157
  leaveRoom(connectionId, roomId) {
@@ -179,7 +180,7 @@ export class NeuroLinkWebSocketServer extends EventEmitter {
179
180
  memberCount: room.size,
180
181
  },
181
182
  });
182
- console.log(`[WebSocket] ${connectionId} left room ${roomId}`);
183
+ logger.debug(`[WebSocket] ${connectionId} left room ${roomId}`);
183
184
  return true;
184
185
  }
185
186
  broadcastToRoom(roomId, message) {
@@ -231,7 +232,7 @@ export class NeuroLinkWebSocketServer extends EventEmitter {
231
232
  return true;
232
233
  }
233
234
  catch (error) {
234
- console.error(`[WebSocket] Failed to send message to ${connectionId}:`, error);
235
+ logger.error(`[WebSocket] Failed to send message to ${connectionId}:`, error);
235
236
  return false;
236
237
  }
237
238
  }
@@ -269,7 +270,7 @@ export class NeuroLinkWebSocketServer extends EventEmitter {
269
270
  const connection = this.connectionInfo.get(connectionId);
270
271
  if (connection &&
271
272
  Date.now() - connection.lastActivity > this.options.timeoutMs) {
272
- console.log(`[WebSocket] Timeout for connection ${connectionId}`);
273
+ logger.debug(`[WebSocket] Timeout for connection ${connectionId}`);
273
274
  ws.terminate();
274
275
  }
275
276
  }
@@ -296,9 +297,9 @@ export class NeuroLinkWebSocketServer extends EventEmitter {
296
297
  // Implementation for channel data handling
297
298
  }
298
299
  handleChannelError(channelId, error) {
299
- console.error(`[Streaming Channel] ${channelId} error:`, error);
300
+ logger.error(`[Streaming Channel] ${channelId} error:`, error);
300
301
  }
301
302
  handleChannelClose(channelId) {
302
- console.log(`[Streaming Channel] ${channelId} closed`);
303
+ logger.debug(`[Streaming Channel] ${channelId} closed`);
303
304
  }
304
305
  }
@@ -6,6 +6,7 @@ import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-http";
6
6
  import { Resource } from "@opentelemetry/resources";
7
7
  import { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION, } from "@opentelemetry/semantic-conventions";
8
8
  import { PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics";
9
+ import { logger } from "../utils/logger.js";
9
10
  export class TelemetryService {
10
11
  static instance;
11
12
  sdk;
@@ -27,7 +28,7 @@ export class TelemetryService {
27
28
  this.initializeTelemetry();
28
29
  }
29
30
  else {
30
- console.log("[Telemetry] Disabled - set NEUROLINK_TELEMETRY_ENABLED=true or configure OTEL_EXPORTER_OTLP_ENDPOINT to enable");
31
+ logger.debug("[Telemetry] Disabled - set NEUROLINK_TELEMETRY_ENABLED=true or configure OTEL_EXPORTER_OTLP_ENDPOINT to enable");
31
32
  }
32
33
  }
33
34
  static getInstance() {
@@ -58,10 +59,10 @@ export class TelemetryService {
58
59
  this.meter = metrics.getMeter("neurolink-ai");
59
60
  this.tracer = trace.getTracer("neurolink-ai");
60
61
  this.initializeMetrics();
61
- console.log("[Telemetry] Initialized with endpoint:", process.env.OTEL_EXPORTER_OTLP_ENDPOINT);
62
+ logger.debug("[Telemetry] Initialized with endpoint:", process.env.OTEL_EXPORTER_OTLP_ENDPOINT);
62
63
  }
63
64
  catch (error) {
64
- console.error("[Telemetry] Failed to initialize:", error);
65
+ logger.error("[Telemetry] Failed to initialize:", error);
65
66
  this.enabled = false;
66
67
  }
67
68
  }
@@ -97,10 +98,10 @@ export class TelemetryService {
97
98
  }
98
99
  try {
99
100
  await this.sdk?.start();
100
- console.log("[Telemetry] SDK started successfully");
101
+ logger.debug("[Telemetry] SDK started successfully");
101
102
  }
102
103
  catch (error) {
103
- console.error("[Telemetry] Failed to start SDK:", error);
104
+ logger.error("[Telemetry] Failed to start SDK:", error);
104
105
  this.enabled = false;
105
106
  }
106
107
  }
@@ -251,10 +252,10 @@ export class TelemetryService {
251
252
  if (this.enabled && this.sdk) {
252
253
  try {
253
254
  await this.sdk.shutdown();
254
- console.log("[Telemetry] SDK shutdown completed");
255
+ logger.debug("[Telemetry] SDK shutdown completed");
255
256
  }
256
257
  catch (error) {
257
- console.error("[Telemetry] Error during shutdown:", error);
258
+ logger.error("[Telemetry] Error during shutdown:", error);
258
259
  }
259
260
  }
260
261
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@juspay/neurolink",
3
- "version": "5.2.0",
3
+ "version": "5.3.0",
4
4
  "description": "Universal AI Development Platform with working MCP integration, multi-provider support, and professional CLI. Built-in tools operational, 58+ external MCP servers discoverable. Connect to filesystem, GitHub, database operations, and more. Build, test, and deploy AI applications with 9 major providers: OpenAI, Anthropic, Google AI, AWS Bedrock, Azure, Hugging Face, Ollama, and Mistral AI.",
5
5
  "author": {
6
6
  "name": "Juspay Technologies",
@@ -149,25 +149,22 @@
149
149
  "@opentelemetry/sdk-node": "^0.54.2",
150
150
  "@opentelemetry/semantic-conventions": "^1.34.0",
151
151
  "ai": "^4.0.0",
152
+ "chalk": "^5.3.0",
152
153
  "dotenv": "^16.5.0",
154
+ "inquirer": "^9.2.15",
155
+ "mathjs": "^14.5.3",
153
156
  "ollama-ai-provider": "^1.2.0",
157
+ "ora": "^7.0.1",
154
158
  "reconnecting-eventsource": "^1.6.4",
155
159
  "undici": "^6.6.2",
156
160
  "uuid": "^11.1.0",
157
161
  "ws": "^8.18.3",
162
+ "yargs": "^17.7.2",
158
163
  "zod": "^3.22.0",
159
- "zod-to-json-schema": "^3.24.5",
160
- "chalk": "^5.3.0",
161
- "inquirer": "^9.2.15",
162
- "ora": "^7.0.1",
163
- "yargs": "^17.7.2"
164
+ "zod-to-json-schema": "^3.24.5"
164
165
  },
165
166
  "devDependencies": {
166
167
  "@changesets/cli": "^2.26.2",
167
- "@types/ws": "^8.18.1",
168
- "@vitest/coverage-v8": "^2.1.9",
169
- "cors": "^2.8.5",
170
- "express": "^5.1.0",
171
168
  "@eslint/js": "^9.0.0",
172
169
  "@semantic-release/changelog": "^6.0.3",
173
170
  "@semantic-release/commit-analyzer": "^13.0.0",
@@ -183,15 +180,20 @@
183
180
  "@types/express": "^5.0.3",
184
181
  "@types/inquirer": "^9.0.7",
185
182
  "@types/node": "^20.0.0",
183
+ "@types/ws": "^8.18.1",
186
184
  "@types/yargs": "^17.0.33",
187
185
  "@typescript-eslint/eslint-plugin": "^8.0.0",
188
186
  "@typescript-eslint/parser": "^8.0.0",
187
+ "@vitest/coverage-v8": "^2.1.9",
188
+ "cors": "^2.8.5",
189
189
  "eslint": "^9.0.0",
190
+ "express": "^5.1.0",
190
191
  "playwright": "^1.52.0",
191
192
  "prettier": "^3.0.0",
192
193
  "publint": "^0.3.2",
193
194
  "puppeteer": "^24.10.0",
194
195
  "semantic-release": "^24.0.0",
196
+ "shell-quote": "^1.8.3",
195
197
  "svelte": "^5.0.0",
196
198
  "svelte-check": "^4.0.0",
197
199
  "tslib": "^2.4.1",