@juspay/neurolink 4.0.0 → 4.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/CHANGELOG.md +14 -5
  2. package/README.md +150 -92
  3. package/dist/lib/mcp/dynamic-chain-executor.d.ts +201 -0
  4. package/dist/lib/mcp/dynamic-chain-executor.js +489 -0
  5. package/dist/lib/mcp/dynamic-orchestrator.d.ts +109 -0
  6. package/dist/lib/mcp/dynamic-orchestrator.js +351 -0
  7. package/dist/lib/mcp/error-manager.d.ts +254 -0
  8. package/dist/lib/mcp/error-manager.js +501 -0
  9. package/dist/lib/mcp/error-recovery.d.ts +158 -0
  10. package/dist/lib/mcp/error-recovery.js +405 -0
  11. package/dist/lib/mcp/health-monitor.d.ts +256 -0
  12. package/dist/lib/mcp/health-monitor.js +621 -0
  13. package/dist/lib/mcp/orchestrator.d.ts +136 -5
  14. package/dist/lib/mcp/orchestrator.js +316 -9
  15. package/dist/lib/mcp/registry.d.ts +22 -0
  16. package/dist/lib/mcp/registry.js +24 -0
  17. package/dist/lib/mcp/semaphore-manager.d.ts +137 -0
  18. package/dist/lib/mcp/semaphore-manager.js +329 -0
  19. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  20. package/dist/lib/mcp/session-manager.d.ts +186 -0
  21. package/dist/lib/mcp/session-manager.js +400 -0
  22. package/dist/lib/mcp/session-persistence.d.ts +93 -0
  23. package/dist/lib/mcp/session-persistence.js +298 -0
  24. package/dist/lib/mcp/transport-manager.d.ts +153 -0
  25. package/dist/lib/mcp/transport-manager.js +330 -0
  26. package/dist/lib/mcp/unified-registry.d.ts +42 -1
  27. package/dist/lib/mcp/unified-registry.js +122 -2
  28. package/dist/lib/neurolink.d.ts +75 -0
  29. package/dist/lib/neurolink.js +104 -0
  30. package/dist/mcp/dynamic-chain-executor.d.ts +201 -0
  31. package/dist/mcp/dynamic-chain-executor.js +489 -0
  32. package/dist/mcp/dynamic-orchestrator.d.ts +109 -0
  33. package/dist/mcp/dynamic-orchestrator.js +351 -0
  34. package/dist/mcp/error-manager.d.ts +254 -0
  35. package/dist/mcp/error-manager.js +501 -0
  36. package/dist/mcp/error-recovery.d.ts +158 -0
  37. package/dist/mcp/error-recovery.js +405 -0
  38. package/dist/mcp/health-monitor.d.ts +256 -0
  39. package/dist/mcp/health-monitor.js +621 -0
  40. package/dist/mcp/orchestrator.d.ts +136 -5
  41. package/dist/mcp/orchestrator.js +316 -9
  42. package/dist/mcp/plugins/core/neurolink-mcp.json +15 -15
  43. package/dist/mcp/registry.d.ts +22 -0
  44. package/dist/mcp/registry.js +24 -0
  45. package/dist/mcp/semaphore-manager.d.ts +137 -0
  46. package/dist/mcp/semaphore-manager.js +329 -0
  47. package/dist/mcp/session-manager.d.ts +186 -0
  48. package/dist/mcp/session-manager.js +400 -0
  49. package/dist/mcp/session-persistence.d.ts +93 -0
  50. package/dist/mcp/session-persistence.js +299 -0
  51. package/dist/mcp/transport-manager.d.ts +153 -0
  52. package/dist/mcp/transport-manager.js +331 -0
  53. package/dist/mcp/unified-registry.d.ts +42 -1
  54. package/dist/mcp/unified-registry.js +122 -2
  55. package/dist/neurolink.d.ts +75 -0
  56. package/dist/neurolink.js +104 -0
  57. package/package.json +245 -244
@@ -0,0 +1,621 @@
1
+ /**
2
+ * NeuroLink MCP Health Monitoring System
3
+ * Provides periodic health checks, connection status tracking, and auto-recovery
4
+ * Based on health monitoring patterns from Cline
5
+ */
6
+ import { ErrorCategory, ErrorSeverity } from "./error-manager.js";
7
+ /**
8
+ * Connection status states
9
+ */
10
+ export var ConnectionStatus;
11
+ (function (ConnectionStatus) {
12
+ ConnectionStatus["DISCONNECTED"] = "DISCONNECTED";
13
+ ConnectionStatus["CONNECTING"] = "CONNECTING";
14
+ ConnectionStatus["CONNECTED"] = "CONNECTED";
15
+ ConnectionStatus["CHECKING"] = "CHECKING";
16
+ ConnectionStatus["ERROR"] = "ERROR";
17
+ ConnectionStatus["RECOVERING"] = "RECOVERING";
18
+ })(ConnectionStatus || (ConnectionStatus = {}));
19
+ /**
20
+ * Ping health check - Simple availability check
21
+ */
22
+ export class PingHealthCheck {
23
+ name = "ping";
24
+ async check(serverId, registry) {
25
+ const startTime = Date.now();
26
+ try {
27
+ // Try to list tools as a simple ping
28
+ const tools = await registry.listTools();
29
+ const latency = Date.now() - startTime;
30
+ return {
31
+ success: true,
32
+ status: ConnectionStatus.CONNECTED,
33
+ latency,
34
+ timestamp: Date.now(),
35
+ };
36
+ }
37
+ catch (error) {
38
+ return {
39
+ success: false,
40
+ status: ConnectionStatus.ERROR,
41
+ error: error instanceof Error ? error : new Error(String(error)),
42
+ timestamp: Date.now(),
43
+ };
44
+ }
45
+ }
46
+ }
47
+ /**
48
+ * Tool list validation check - Ensures tools are accessible
49
+ */
50
+ export class ToolListValidationCheck {
51
+ name = "tool-validation";
52
+ async check(serverId, registry) {
53
+ const startTime = Date.now();
54
+ try {
55
+ const tools = await registry.listTools();
56
+ const latency = Date.now() - startTime;
57
+ if (!tools || tools.length === 0) {
58
+ return {
59
+ success: false,
60
+ status: ConnectionStatus.ERROR,
61
+ message: "No tools available from server",
62
+ latency,
63
+ timestamp: Date.now(),
64
+ };
65
+ }
66
+ return {
67
+ success: true,
68
+ status: ConnectionStatus.CONNECTED,
69
+ message: `${tools.length} tools available`,
70
+ latency,
71
+ timestamp: Date.now(),
72
+ };
73
+ }
74
+ catch (error) {
75
+ return {
76
+ success: false,
77
+ status: ConnectionStatus.ERROR,
78
+ error: error instanceof Error ? error : new Error(String(error)),
79
+ timestamp: Date.now(),
80
+ };
81
+ }
82
+ }
83
+ }
84
+ /**
85
+ * Performance baseline check - Monitors response times
86
+ */
87
+ export class PerformanceCheck {
88
+ name = "performance";
89
+ performanceThreshold;
90
+ constructor(thresholdMs = 1000) {
91
+ this.performanceThreshold = thresholdMs;
92
+ }
93
+ async check(serverId, registry) {
94
+ const startTime = Date.now();
95
+ try {
96
+ const tools = await registry.listTools();
97
+ const latency = Date.now() - startTime;
98
+ if (latency > this.performanceThreshold) {
99
+ return {
100
+ success: true, // Still successful, just slow
101
+ status: ConnectionStatus.CONNECTED,
102
+ message: `Performance degraded: ${latency}ms (threshold: ${this.performanceThreshold}ms)`,
103
+ latency,
104
+ timestamp: Date.now(),
105
+ };
106
+ }
107
+ return {
108
+ success: true,
109
+ status: ConnectionStatus.CONNECTED,
110
+ message: `Performance normal: ${latency}ms`,
111
+ latency,
112
+ timestamp: Date.now(),
113
+ };
114
+ }
115
+ catch (error) {
116
+ return {
117
+ success: false,
118
+ status: ConnectionStatus.ERROR,
119
+ error: error instanceof Error ? error : new Error(String(error)),
120
+ timestamp: Date.now(),
121
+ };
122
+ }
123
+ }
124
+ }
125
+ /**
126
+ * Health Monitor for MCP connections
127
+ */
128
+ export class HealthMonitor {
129
+ registry;
130
+ errorManager;
131
+ serverHealth = new Map();
132
+ checkInterval;
133
+ checkTimeout;
134
+ maxRecoveryAttempts;
135
+ recoveryDelay;
136
+ enableAutoRecovery;
137
+ checkTimers = new Map();
138
+ strategies = new Map();
139
+ isMonitoring = false;
140
+ recoveryCallbacks = new Map();
141
+ constructor(registry, errorManager, options = {}) {
142
+ this.registry = registry;
143
+ this.errorManager = errorManager;
144
+ this.checkInterval = options.checkInterval || 30000; // 30 seconds
145
+ this.checkTimeout = options.checkTimeout || 5000; // 5 seconds
146
+ this.maxRecoveryAttempts = options.maxRecoveryAttempts || 3;
147
+ this.recoveryDelay = options.recoveryDelay || 5000; // 5 seconds
148
+ this.enableAutoRecovery = options.enableAutoRecovery ?? true;
149
+ // Initialize default strategies
150
+ this.strategies.set("ping", new PingHealthCheck());
151
+ this.strategies.set("tool-validation", new ToolListValidationCheck());
152
+ this.strategies.set("performance", new PerformanceCheck());
153
+ }
154
+ /**
155
+ * Start monitoring all registered servers
156
+ */
157
+ startMonitoring() {
158
+ if (this.isMonitoring) {
159
+ return;
160
+ }
161
+ this.isMonitoring = true;
162
+ const servers = this.registry.listServers();
163
+ if (process.env.NEUROLINK_DEBUG === "true") {
164
+ console.log(`[HealthMonitor] Starting monitoring for ${servers.length} servers`);
165
+ }
166
+ // Initialize health tracking for each server
167
+ servers.forEach((serverId) => {
168
+ if (!this.serverHealth.has(serverId)) {
169
+ this.serverHealth.set(serverId, {
170
+ serverId,
171
+ status: ConnectionStatus.DISCONNECTED,
172
+ checkCount: 0,
173
+ errorCount: 0,
174
+ recoveryAttempts: 0,
175
+ });
176
+ }
177
+ // Start periodic checks
178
+ this.scheduleHealthCheck(serverId);
179
+ });
180
+ }
181
+ /**
182
+ * Stop monitoring all servers
183
+ */
184
+ stopMonitoring() {
185
+ this.isMonitoring = false;
186
+ // Clear all timers
187
+ this.checkTimers.forEach((timer) => clearTimeout(timer));
188
+ this.checkTimers.clear();
189
+ if (process.env.NEUROLINK_DEBUG === "true") {
190
+ console.log("[HealthMonitor] Stopped monitoring");
191
+ }
192
+ }
193
+ /**
194
+ * Perform health check for a specific server
195
+ *
196
+ * @param serverId Server to check
197
+ * @param strategy Strategy name to use (default: "ping")
198
+ * @returns Health check result
199
+ */
200
+ async checkServerHealth(serverId, strategy = "ping") {
201
+ const health = this.serverHealth.get(serverId) || {
202
+ serverId,
203
+ status: ConnectionStatus.DISCONNECTED,
204
+ checkCount: 0,
205
+ errorCount: 0,
206
+ recoveryAttempts: 0,
207
+ };
208
+ // Update status to checking
209
+ health.status = ConnectionStatus.CHECKING;
210
+ this.serverHealth.set(serverId, health);
211
+ // Get strategy
212
+ const checkStrategy = this.strategies.get(strategy);
213
+ if (!checkStrategy) {
214
+ return {
215
+ success: false,
216
+ status: ConnectionStatus.ERROR,
217
+ error: new Error(`Unknown health check strategy: ${strategy}`),
218
+ timestamp: Date.now(),
219
+ };
220
+ }
221
+ // Perform check with timeout
222
+ const timeoutPromise = new Promise((_, reject) => {
223
+ setTimeout(() => reject(new Error("Health check timeout")), this.checkTimeout);
224
+ });
225
+ try {
226
+ const result = await Promise.race([
227
+ checkStrategy.check(serverId, this.registry),
228
+ timeoutPromise,
229
+ ]);
230
+ // Update health status
231
+ health.checkCount++;
232
+ health.lastCheck = result;
233
+ if (result.success) {
234
+ health.status = ConnectionStatus.CONNECTED;
235
+ health.lastSuccessfulCheck = Date.now();
236
+ health.errorCount = 0;
237
+ health.recoveryAttempts = 0;
238
+ }
239
+ else {
240
+ health.status = ConnectionStatus.ERROR;
241
+ health.errorCount++;
242
+ // Record error
243
+ this.errorManager.recordError(result.error || new Error("Health check failed"), {
244
+ category: ErrorCategory.NETWORK_ERROR,
245
+ severity: ErrorSeverity.HIGH,
246
+ toolName: `health-check-${serverId}`,
247
+ });
248
+ }
249
+ this.serverHealth.set(serverId, health);
250
+ // Trigger recovery if enabled and failed
251
+ if (!result.success &&
252
+ this.enableAutoRecovery &&
253
+ health.recoveryAttempts < this.maxRecoveryAttempts) {
254
+ // Schedule recovery after returning result
255
+ setTimeout(() => this.triggerRecovery(serverId), 0);
256
+ }
257
+ return result;
258
+ }
259
+ catch (error) {
260
+ // Handle timeout or other errors
261
+ const errorResult = {
262
+ success: false,
263
+ status: ConnectionStatus.ERROR,
264
+ error: error instanceof Error ? error : new Error(String(error)),
265
+ timestamp: Date.now(),
266
+ };
267
+ health.status = ConnectionStatus.ERROR;
268
+ health.errorCount++;
269
+ health.lastCheck = errorResult;
270
+ this.serverHealth.set(serverId, health);
271
+ // Record error
272
+ this.errorManager.recordError(error, {
273
+ category: ErrorCategory.TIMEOUT_ERROR,
274
+ severity: ErrorSeverity.HIGH,
275
+ toolName: `health-check-${serverId}`,
276
+ });
277
+ return errorResult;
278
+ }
279
+ }
280
+ /**
281
+ * Get health status for all servers
282
+ *
283
+ * @returns Map of server health information
284
+ */
285
+ getHealthStatus() {
286
+ return new Map(this.serverHealth);
287
+ }
288
+ /**
289
+ * Get health status for a specific server
290
+ *
291
+ * @param serverId Server ID
292
+ * @returns Server health information or null
293
+ */
294
+ getServerHealth(serverId) {
295
+ return this.serverHealth.get(serverId) || null;
296
+ }
297
+ /**
298
+ * Register a recovery callback for a server
299
+ *
300
+ * @param serverId Server ID
301
+ * @param callback Recovery callback function
302
+ */
303
+ registerRecoveryCallback(serverId, callback) {
304
+ this.recoveryCallbacks.set(serverId, callback);
305
+ }
306
+ /**
307
+ * Add a custom health check strategy
308
+ *
309
+ * @param strategy Health check strategy
310
+ */
311
+ addStrategy(strategy) {
312
+ this.strategies.set(strategy.name, strategy);
313
+ }
314
+ /**
315
+ * Schedule periodic health check for a server
316
+ *
317
+ * @private
318
+ */
319
+ scheduleHealthCheck(serverId) {
320
+ if (!this.isMonitoring) {
321
+ return;
322
+ }
323
+ // Clear existing timer if any
324
+ const existingTimer = this.checkTimers.get(serverId);
325
+ if (existingTimer) {
326
+ clearTimeout(existingTimer);
327
+ }
328
+ // Schedule next check
329
+ const timer = setTimeout(async () => {
330
+ await this.checkServerHealth(serverId);
331
+ // Reschedule if still monitoring
332
+ if (this.isMonitoring) {
333
+ this.scheduleHealthCheck(serverId);
334
+ }
335
+ }, this.checkInterval);
336
+ this.checkTimers.set(serverId, timer);
337
+ // Update next check time
338
+ const health = this.serverHealth.get(serverId);
339
+ if (health) {
340
+ health.nextCheckTime = Date.now() + this.checkInterval;
341
+ this.serverHealth.set(serverId, health);
342
+ }
343
+ }
344
+ /**
345
+ * Trigger recovery for a server
346
+ *
347
+ * @private
348
+ */
349
+ async triggerRecovery(serverId) {
350
+ const health = this.serverHealth.get(serverId);
351
+ if (!health) {
352
+ return;
353
+ }
354
+ health.status = ConnectionStatus.RECOVERING;
355
+ health.recoveryAttempts++;
356
+ this.serverHealth.set(serverId, health);
357
+ if (process.env.NEUROLINK_DEBUG === "true") {
358
+ console.log(`[HealthMonitor] Triggering recovery for ${serverId} (attempt ${health.recoveryAttempts}/${this.maxRecoveryAttempts})`);
359
+ }
360
+ // Use exponential backoff for recovery delay
361
+ const delay = this.recoveryDelay * Math.pow(2, health.recoveryAttempts - 1);
362
+ setTimeout(async () => {
363
+ // Call custom recovery callback if registered
364
+ const callback = this.recoveryCallbacks.get(serverId);
365
+ if (callback) {
366
+ try {
367
+ await callback(serverId);
368
+ // Perform immediate health check after recovery
369
+ const result = await this.checkServerHealth(serverId);
370
+ if (result.success) {
371
+ if (process.env.NEUROLINK_DEBUG === "true") {
372
+ console.log(`[HealthMonitor] Recovery successful for ${serverId}`);
373
+ }
374
+ }
375
+ }
376
+ catch (error) {
377
+ this.errorManager.recordError(error, {
378
+ category: ErrorCategory.UNKNOWN_ERROR,
379
+ severity: ErrorSeverity.CRITICAL,
380
+ toolName: `recovery-${serverId}`,
381
+ });
382
+ }
383
+ }
384
+ }, delay);
385
+ }
386
+ /**
387
+ * Generate comprehensive health report
388
+ *
389
+ * @returns Health report with server statuses and metrics
390
+ */
391
+ generateHealthReport() {
392
+ const servers = Array.from(this.serverHealth.values());
393
+ const now = Date.now();
394
+ // Calculate summary
395
+ const summary = {
396
+ totalServers: servers.length,
397
+ healthyServers: servers.filter((s) => s.status === ConnectionStatus.CONNECTED).length,
398
+ unhealthyServers: servers.filter((s) => s.status === ConnectionStatus.ERROR).length,
399
+ recoveringServers: servers.filter((s) => s.status === ConnectionStatus.RECOVERING).length,
400
+ overallHealth: 0,
401
+ };
402
+ // Calculate server details
403
+ const serverReports = servers.map((server) => {
404
+ const successRate = server.checkCount > 0
405
+ ? ((server.checkCount - server.errorCount) / server.checkCount) * 100
406
+ : 0;
407
+ const uptime = server.lastSuccessfulCheck && server.checkCount > 0
408
+ ? ((now - server.lastSuccessfulCheck) /
409
+ (server.checkCount * this.checkInterval)) *
410
+ 100
411
+ : 0;
412
+ // Calculate average latency from recent checks
413
+ const avgLatency = server.lastCheck?.latency || 0;
414
+ // Calculate health score (0-100)
415
+ let health = 100;
416
+ health -= server.errorCount * 10; // -10 per error
417
+ health -= server.recoveryAttempts * 5; // -5 per recovery attempt
418
+ if (server.status === ConnectionStatus.ERROR) {
419
+ health -= 50;
420
+ }
421
+ if (server.status === ConnectionStatus.RECOVERING) {
422
+ health -= 25;
423
+ }
424
+ health = Math.max(0, Math.min(100, health));
425
+ return {
426
+ serverId: server.serverId,
427
+ status: server.status,
428
+ health,
429
+ uptime: Math.min(100, uptime),
430
+ avgLatency,
431
+ lastError: server.lastCheck?.error?.message,
432
+ metrics: {
433
+ totalChecks: server.checkCount,
434
+ successfulChecks: server.checkCount - server.errorCount,
435
+ failedChecks: server.errorCount,
436
+ recoveryAttempts: server.recoveryAttempts,
437
+ },
438
+ };
439
+ });
440
+ // Calculate overall health
441
+ summary.overallHealth =
442
+ serverReports.length > 0
443
+ ? Math.round(serverReports.reduce((sum, s) => sum + s.health, 0) /
444
+ serverReports.length)
445
+ : 100;
446
+ // Generate trends (simplified for now)
447
+ const trends = {
448
+ healthHistory: this.getHealthHistory(),
449
+ errorRate: this.calculateErrorRate(),
450
+ avgRecoveryTime: this.calculateAvgRecoveryTime(),
451
+ };
452
+ // Generate recommendations
453
+ const recommendations = this.generateRecommendations(summary, serverReports);
454
+ return {
455
+ summary,
456
+ servers: serverReports,
457
+ trends,
458
+ recommendations,
459
+ };
460
+ }
461
+ /**
462
+ * Get health metrics for monitoring dashboards
463
+ *
464
+ * @returns Simplified metrics for real-time monitoring
465
+ */
466
+ getHealthMetrics() {
467
+ const report = this.generateHealthReport();
468
+ // Determine overall status
469
+ let status;
470
+ if (report.summary.overallHealth >= 80) {
471
+ status = "healthy";
472
+ }
473
+ else if (report.summary.overallHealth >= 50) {
474
+ status = "degraded";
475
+ }
476
+ else {
477
+ status = "critical";
478
+ }
479
+ // Generate active alerts
480
+ const activeAlerts = [];
481
+ this.serverHealth.forEach((health, serverId) => {
482
+ if (health.status === ConnectionStatus.ERROR) {
483
+ activeAlerts.push({
484
+ serverId,
485
+ severity: health.errorCount > 5 ? "critical" : "high",
486
+ message: health.lastCheck?.error?.message || "Server unreachable",
487
+ timestamp: health.lastCheck?.timestamp || Date.now(),
488
+ });
489
+ }
490
+ else if (health.status === ConnectionStatus.RECOVERING) {
491
+ activeAlerts.push({
492
+ serverId,
493
+ severity: "medium",
494
+ message: `Recovery attempt ${health.recoveryAttempts}/${this.maxRecoveryAttempts}`,
495
+ timestamp: Date.now(),
496
+ });
497
+ }
498
+ });
499
+ // Calculate performance metrics
500
+ const latencies = report.servers
501
+ .map((s) => s.avgLatency)
502
+ .filter((l) => l > 0);
503
+ const performance = {
504
+ avgLatency: latencies.length > 0
505
+ ? Math.round(latencies.reduce((sum, l) => sum + l, 0) / latencies.length)
506
+ : 0,
507
+ maxLatency: latencies.length > 0 ? Math.max(...latencies) : 0,
508
+ successRate: report.servers.length > 0
509
+ ? (report.summary.healthyServers / report.summary.totalServers) * 100
510
+ : 100,
511
+ };
512
+ // Build server status map
513
+ const serverStatuses = {};
514
+ this.serverHealth.forEach((health, serverId) => {
515
+ serverStatuses[serverId] = health.status;
516
+ });
517
+ return {
518
+ status,
519
+ healthScore: report.summary.overallHealth,
520
+ activeAlerts,
521
+ serverStatuses,
522
+ performance,
523
+ };
524
+ }
525
+ /**
526
+ * Subscribe to health events
527
+ *
528
+ * @param event Event type to subscribe to
529
+ * @param callback Callback function
530
+ */
531
+ on(event, callback) {
532
+ // Implementation would use EventEmitter
533
+ // For now, just a placeholder
534
+ }
535
+ /**
536
+ * Get health history for trend analysis
537
+ *
538
+ * @private
539
+ */
540
+ getHealthHistory() {
541
+ // In a real implementation, this would track health over time
542
+ // For now, return current snapshot
543
+ const report = this.generateHealthReport();
544
+ return [
545
+ {
546
+ timestamp: Date.now(),
547
+ health: report.summary.overallHealth,
548
+ },
549
+ ];
550
+ }
551
+ /**
552
+ * Calculate error rate
553
+ *
554
+ * @private
555
+ */
556
+ calculateErrorRate() {
557
+ let totalErrors = 0;
558
+ this.serverHealth.forEach((health) => {
559
+ totalErrors += health.errorCount;
560
+ });
561
+ // Errors per hour (simplified)
562
+ const hoursMonitored = (Date.now() - (Date.now() - this.checkInterval * 10)) / (1000 * 60 * 60);
563
+ return hoursMonitored > 0 ? totalErrors / hoursMonitored : 0;
564
+ }
565
+ /**
566
+ * Calculate average recovery time
567
+ *
568
+ * @private
569
+ */
570
+ calculateAvgRecoveryTime() {
571
+ // Simplified - would track actual recovery times
572
+ return this.recoveryDelay * 2; // Assume average of 2 attempts
573
+ }
574
+ /**
575
+ * Generate health recommendations
576
+ *
577
+ * @private
578
+ */
579
+ generateRecommendations(summary, servers) {
580
+ const recommendations = [];
581
+ // Check overall health
582
+ if (summary.overallHealth < 50) {
583
+ recommendations.push("Critical: System health is below 50%. Immediate attention required.");
584
+ }
585
+ // Check unhealthy servers
586
+ if (summary.unhealthyServers > summary.totalServers * 0.3) {
587
+ recommendations.push("Multiple servers are failing. Check network connectivity and server availability.");
588
+ }
589
+ // Check recovery attempts
590
+ const highRecoveryServers = servers.filter((s) => s.metrics.recoveryAttempts > 2);
591
+ if (highRecoveryServers.length > 0) {
592
+ recommendations.push(`Servers with repeated recovery attempts: ${highRecoveryServers.map((s) => s.serverId).join(", ")}. Consider manual intervention.`);
593
+ }
594
+ // Check latency
595
+ const highLatencyServers = servers.filter((s) => s.avgLatency > 1000);
596
+ if (highLatencyServers.length > 0) {
597
+ recommendations.push(`High latency detected on servers: ${highLatencyServers.map((s) => s.serverId).join(", ")}. Check server load and network conditions.`);
598
+ }
599
+ // Positive feedback
600
+ if (summary.overallHealth >= 90) {
601
+ recommendations.push("System health is excellent. All servers are operating normally.");
602
+ }
603
+ return recommendations;
604
+ }
605
+ }
606
+ /**
607
+ * Default health monitor instance (to be initialized with registry and error manager)
608
+ */
609
+ export let defaultHealthMonitor = null;
610
+ /**
611
+ * Initialize default health monitor
612
+ *
613
+ * @param registry Tool registry
614
+ * @param errorManager Error manager
615
+ * @param options Health monitor options
616
+ * @returns Health monitor instance
617
+ */
618
+ export function initializeHealthMonitor(registry, errorManager, options) {
619
+ defaultHealthMonitor = new HealthMonitor(registry, errorManager, options);
620
+ return defaultHealthMonitor;
621
+ }