@juspay/neurolink 7.33.1 → 7.33.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/CHANGELOG.md +8 -0
  2. package/dist/constants/index.d.ts +192 -0
  3. package/dist/constants/index.js +195 -0
  4. package/dist/constants/performance.d.ts +366 -0
  5. package/dist/constants/performance.js +389 -0
  6. package/dist/constants/retry.d.ts +224 -0
  7. package/dist/constants/retry.js +266 -0
  8. package/dist/constants/timeouts.d.ts +225 -0
  9. package/dist/constants/timeouts.js +182 -0
  10. package/dist/constants/tokens.d.ts +234 -0
  11. package/dist/constants/tokens.js +314 -0
  12. package/dist/core/types.d.ts +268 -0
  13. package/dist/core/types.js +153 -0
  14. package/dist/lib/constants/index.d.ts +192 -0
  15. package/dist/lib/constants/index.js +195 -0
  16. package/dist/lib/constants/performance.d.ts +366 -0
  17. package/dist/lib/constants/performance.js +389 -0
  18. package/dist/lib/constants/retry.d.ts +224 -0
  19. package/dist/lib/constants/retry.js +266 -0
  20. package/dist/lib/constants/timeouts.d.ts +225 -0
  21. package/dist/lib/constants/timeouts.js +182 -0
  22. package/dist/lib/constants/tokens.d.ts +234 -0
  23. package/dist/lib/constants/tokens.js +314 -0
  24. package/dist/lib/core/types.d.ts +268 -0
  25. package/dist/lib/core/types.js +153 -0
  26. package/dist/lib/mcp/externalServerManager.d.ts +18 -3
  27. package/dist/lib/mcp/externalServerManager.js +125 -3
  28. package/dist/lib/models/modelRegistry.d.ts +1 -1
  29. package/dist/lib/models/modelRegistry.js +63 -37
  30. package/dist/lib/neurolink.d.ts +1 -1
  31. package/dist/lib/neurolink.js +38 -36
  32. package/dist/lib/providers/azureOpenai.d.ts +1 -1
  33. package/dist/lib/providers/azureOpenai.js +2 -1
  34. package/dist/lib/utils/providerConfig.d.ts +25 -0
  35. package/dist/lib/utils/providerConfig.js +24 -3
  36. package/dist/lib/utils/providerHealth.d.ts +1 -1
  37. package/dist/lib/utils/providerHealth.js +40 -33
  38. package/dist/lib/utils/providerSetupMessages.js +7 -6
  39. package/dist/lib/utils/providerUtils.js +16 -24
  40. package/dist/mcp/externalServerManager.d.ts +18 -3
  41. package/dist/mcp/externalServerManager.js +125 -3
  42. package/dist/models/modelRegistry.d.ts +1 -1
  43. package/dist/models/modelRegistry.js +63 -37
  44. package/dist/neurolink.d.ts +1 -1
  45. package/dist/neurolink.js +38 -36
  46. package/dist/providers/azureOpenai.d.ts +1 -1
  47. package/dist/providers/azureOpenai.js +2 -1
  48. package/dist/utils/providerConfig.d.ts +25 -0
  49. package/dist/utils/providerConfig.js +24 -3
  50. package/dist/utils/providerHealth.d.ts +1 -1
  51. package/dist/utils/providerHealth.js +40 -33
  52. package/dist/utils/providerSetupMessages.js +7 -6
  53. package/dist/utils/providerUtils.js +16 -24
  54. package/package.json +1 -1
package/dist/neurolink.js CHANGED
@@ -16,6 +16,7 @@ catch {
16
16
  import { AIProviderFactory } from "./core/factory.js";
17
17
  import { mcpLogger } from "./utils/logger.js";
18
18
  import { SYSTEM_LIMITS } from "./core/constants.js";
19
+ import { NANOSECOND_TO_MS_DIVISOR, MCP_TIMEOUTS, SERVER_CONFIG, TOOL_TIMEOUTS, RETRY_ATTEMPTS, RETRY_DELAYS, CIRCUIT_BREAKER, CIRCUIT_BREAKER_RESET_MS, MEMORY_THRESHOLDS, PROVIDER_TIMEOUTS, PERFORMANCE_THRESHOLDS, } from "./constants/index.js";
19
20
  import pLimit from "p-limit";
20
21
  import { toolRegistry } from "./mcp/toolRegistry.js";
21
22
  import { logger } from "./utils/logger.js";
@@ -164,7 +165,7 @@ export class NeuroLink {
164
165
  elapsedMs: Date.now() - constructorStartTime,
165
166
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
166
167
  registrySetupDurationNs: registrySetupDurationNs.toString(),
167
- registrySetupDurationMs: Number(registrySetupDurationNs) / 1000000,
168
+ registrySetupDurationMs: Number(registrySetupDurationNs) / NANOSECOND_TO_MS_DIVISOR,
168
169
  enableManualMCP: false,
169
170
  message: "ProviderRegistry configured successfully with security settings",
170
171
  });
@@ -179,7 +180,7 @@ export class NeuroLink {
179
180
  elapsedMs: Date.now() - constructorStartTime,
180
181
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
181
182
  registrySetupDurationNs: registrySetupDurationNs.toString(),
182
- registrySetupDurationMs: Number(registrySetupDurationNs) / 1000000,
183
+ registrySetupDurationMs: Number(registrySetupDurationNs) / NANOSECOND_TO_MS_DIVISOR,
183
184
  error: error instanceof Error ? error.message : String(error),
184
185
  errorName: error instanceof Error ? error.name : "UnknownError",
185
186
  errorStack: error instanceof Error ? error.stack : undefined,
@@ -224,9 +225,9 @@ export class NeuroLink {
224
225
  elapsedMs: Date.now() - constructorStartTime,
225
226
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
226
227
  memoryInitDurationNs: memoryInitDurationNs.toString(),
227
- memoryInitDurationMs: Number(memoryInitDurationNs) / 1000000,
228
+ memoryInitDurationMs: Number(memoryInitDurationNs) / NANOSECOND_TO_MS_DIVISOR,
228
229
  memoryManagerCreateDurationNs: memoryManagerCreateDurationNs.toString(),
229
- memoryManagerCreateDurationMs: Number(memoryManagerCreateDurationNs) / 1000000,
230
+ memoryManagerCreateDurationMs: Number(memoryManagerCreateDurationNs) / NANOSECOND_TO_MS_DIVISOR,
230
231
  finalMemoryConfig: {
231
232
  maxSessions: memoryConfig.maxSessions,
232
233
  maxTurnsPerSession: memoryConfig.maxTurnsPerSession,
@@ -245,7 +246,7 @@ export class NeuroLink {
245
246
  elapsedMs: Date.now() - constructorStartTime,
246
247
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
247
248
  memoryInitDurationNs: memoryInitDurationNs.toString(),
248
- memoryInitDurationMs: Number(memoryInitDurationNs) / 1000000,
249
+ memoryInitDurationMs: Number(memoryInitDurationNs) / NANOSECOND_TO_MS_DIVISOR,
249
250
  error: error instanceof Error ? error.message : String(error),
250
251
  errorName: error instanceof Error ? error.name : "UnknownError",
251
252
  errorStack: error instanceof Error ? error.stack : undefined,
@@ -289,8 +290,8 @@ export class NeuroLink {
289
290
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
290
291
  externalServerInitStartTimeNs: externalServerInitStartTime.toString(),
291
292
  serverManagerConfig: {
292
- maxServers: 20,
293
- defaultTimeout: 15000,
293
+ maxServers: SERVER_CONFIG.MAX_MCP_SERVERS,
294
+ defaultTimeout: MCP_TIMEOUTS.EXTERNAL_SERVER_STARTUP_MS,
294
295
  enableAutoRestart: true,
295
296
  enablePerformanceMonitoring: true,
296
297
  },
@@ -317,7 +318,7 @@ export class NeuroLink {
317
318
  elapsedMs: Date.now() - constructorStartTime,
318
319
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
319
320
  externalServerInitDurationNs: externalServerInitDurationNs.toString(),
320
- externalServerInitDurationMs: Number(externalServerInitDurationNs) / 1000000,
321
+ externalServerInitDurationMs: Number(externalServerInitDurationNs) / NANOSECOND_TO_MS_DIVISOR,
321
322
  hasExternalServerManager: !!this.externalServerManager,
322
323
  message: "External server manager initialized successfully",
323
324
  });
@@ -333,7 +334,7 @@ export class NeuroLink {
333
334
  elapsedMs: Date.now() - constructorStartTime,
334
335
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
335
336
  externalServerInitDurationNs: externalServerInitDurationNs.toString(),
336
- externalServerInitDurationMs: Number(externalServerInitDurationNs) / 1000000,
337
+ externalServerInitDurationMs: Number(externalServerInitDurationNs) / NANOSECOND_TO_MS_DIVISOR,
337
338
  error: error instanceof Error ? error.message : String(error),
338
339
  errorName: error instanceof Error ? error.name : "UnknownError",
339
340
  errorStack: error instanceof Error ? error.stack : undefined,
@@ -418,7 +419,7 @@ export class NeuroLink {
418
419
  elapsedMs: Date.now() - constructorStartTime,
419
420
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
420
421
  eventHandlerSetupDurationNs: eventHandlerSetupDurationNs.toString(),
421
- eventHandlerSetupDurationMs: Number(eventHandlerSetupDurationNs) / 1000000,
422
+ eventHandlerSetupDurationMs: Number(eventHandlerSetupDurationNs) / NANOSECOND_TO_MS_DIVISOR,
422
423
  eventHandlersCount: 5,
423
424
  eventHandlerTypes: [
424
425
  "connected",
@@ -441,7 +442,7 @@ export class NeuroLink {
441
442
  constructorId,
442
443
  timestamp: new Date().toISOString(),
443
444
  constructorDurationNs: constructorDurationNs.toString(),
444
- constructorDurationMs: Number(constructorDurationNs) / 1000000,
445
+ constructorDurationMs: Number(constructorDurationNs) / NANOSECOND_TO_MS_DIVISOR,
445
446
  totalElapsedMs: Date.now() - constructorStartTime,
446
447
  finalState: {
447
448
  hasConversationMemory: !!this.conversationMemory,
@@ -542,7 +543,7 @@ export class NeuroLink {
542
543
  elapsedMs: Date.now() - mcpInitStartTime,
543
544
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
544
545
  performanceImportDurationNs: performanceImportDurationNs.toString(),
545
- performanceImportDurationMs: Number(performanceImportDurationNs) / 1000000,
546
+ performanceImportDurationMs: Number(performanceImportDurationNs) / NANOSECOND_TO_MS_DIVISOR,
546
547
  hasMemoryManager: !!MemoryManager,
547
548
  message: "MemoryManager imported successfully",
548
549
  });
@@ -558,7 +559,7 @@ export class NeuroLink {
558
559
  elapsedMs: Date.now() - mcpInitStartTime,
559
560
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
560
561
  performanceImportDurationNs: performanceImportDurationNs.toString(),
561
- performanceImportDurationMs: Number(performanceImportDurationNs) / 1000000,
562
+ performanceImportDurationMs: Number(performanceImportDurationNs) / NANOSECOND_TO_MS_DIVISOR,
562
563
  error: error instanceof Error ? error.message : String(error),
563
564
  errorName: error instanceof Error ? error.name : "UnknownError",
564
565
  message: "MemoryManager import failed - continuing without performance tracking",
@@ -590,7 +591,7 @@ export class NeuroLink {
590
591
  */
591
592
  async initializeToolRegistryInternal(mcpInitId, mcpInitStartTime, mcpInitHrTimeStart) {
592
593
  const toolRegistryStartTime = process.hrtime.bigint();
593
- const initTimeout = 3000;
594
+ const initTimeout = MCP_TIMEOUTS.INITIALIZATION_MS;
594
595
  logger.debug(`[NeuroLink] ⏱️ LOG_POINT_M007_TOOL_REGISTRY_TIMEOUT_SETUP`, {
595
596
  logPoint: "M007_TOOL_REGISTRY_TIMEOUT_SETUP",
596
597
  mcpInitId,
@@ -616,7 +617,7 @@ export class NeuroLink {
616
617
  elapsedMs: Date.now() - mcpInitStartTime,
617
618
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
618
619
  toolRegistryDurationNs: toolRegistryDurationNs.toString(),
619
- toolRegistryDurationMs: Number(toolRegistryDurationNs) / 1000000,
620
+ toolRegistryDurationMs: Number(toolRegistryDurationNs) / NANOSECOND_TO_MS_DIVISOR,
620
621
  message: "Tool registry initialization completed within timeout",
621
622
  });
622
623
  }
@@ -644,7 +645,7 @@ export class NeuroLink {
644
645
  elapsedMs: Date.now() - mcpInitStartTime,
645
646
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
646
647
  providerRegistryDurationNs: providerRegistryDurationNs.toString(),
647
- providerRegistryDurationMs: Number(providerRegistryDurationNs) / 1000000,
648
+ providerRegistryDurationMs: Number(providerRegistryDurationNs) / NANOSECOND_TO_MS_DIVISOR,
648
649
  message: "Provider registry registration completed successfully",
649
650
  });
650
651
  }
@@ -674,7 +675,7 @@ export class NeuroLink {
674
675
  elapsedMs: Date.now() - mcpInitStartTime,
675
676
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
676
677
  directToolsDurationNs: directToolsDurationNs.toString(),
677
- directToolsDurationMs: Number(directToolsDurationNs) / 1000000,
678
+ directToolsDurationMs: Number(directToolsDurationNs) / NANOSECOND_TO_MS_DIVISOR,
678
679
  serverId: "neurolink-direct",
679
680
  message: "Direct tools server registered successfully",
680
681
  });
@@ -692,7 +693,7 @@ export class NeuroLink {
692
693
  elapsedMs: Date.now() - mcpInitStartTime,
693
694
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
694
695
  directToolsDurationNs: directToolsDurationNs.toString(),
695
- directToolsDurationMs: Number(directToolsDurationNs) / 1000000,
696
+ directToolsDurationMs: Number(directToolsDurationNs) / NANOSECOND_TO_MS_DIVISOR,
696
697
  error: error instanceof Error ? error.message : String(error),
697
698
  errorName: error instanceof Error ? error.name : "UnknownError",
698
699
  errorStack: error instanceof Error ? error.stack : undefined,
@@ -705,7 +706,7 @@ export class NeuroLink {
705
706
  }
706
707
  }
707
708
  /**
708
- * Load MCP configuration from .mcp-config.json
709
+ * Load MCP configuration from .mcp-config.json with parallel loading for improved performance
709
710
  */
710
711
  async loadMCPConfigurationInternal(mcpInitId, mcpInitStartTime, mcpInitHrTimeStart) {
711
712
  const mcpConfigStartTime = process.hrtime.bigint();
@@ -720,7 +721,8 @@ export class NeuroLink {
720
721
  message: "Starting MCP configuration loading from .mcp-config.json",
721
722
  });
722
723
  try {
723
- const configResult = await this.externalServerManager.loadMCPConfiguration();
724
+ const configResult = await this.externalServerManager.loadMCPConfiguration(undefined, // Use default config path
725
+ { parallel: true });
724
726
  const mcpConfigSuccessTime = process.hrtime.bigint();
725
727
  const mcpConfigDurationNs = mcpConfigSuccessTime - mcpConfigStartTime;
726
728
  logger.debug(`[NeuroLink] ✅ LOG_POINT_M015_MCP_CONFIG_SUCCESS`, {
@@ -730,7 +732,7 @@ export class NeuroLink {
730
732
  elapsedMs: Date.now() - mcpInitStartTime,
731
733
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
732
734
  mcpConfigDurationNs: mcpConfigDurationNs.toString(),
733
- mcpConfigDurationMs: Number(mcpConfigDurationNs) / 1000000,
735
+ mcpConfigDurationMs: Number(mcpConfigDurationNs) / NANOSECOND_TO_MS_DIVISOR,
734
736
  serversLoaded: configResult.serversLoaded,
735
737
  errorsCount: configResult.errors.length,
736
738
  configResult: {
@@ -773,7 +775,7 @@ export class NeuroLink {
773
775
  initTime: `${initTime}ms`,
774
776
  memoryUsed: `${memoryDelta}MB`,
775
777
  });
776
- if (memoryDelta > 30) {
778
+ if (memoryDelta > MEMORY_THRESHOLDS.MODERATE_USAGE_MB) {
777
779
  mcpLogger.debug("💡 Memory cleanup suggestion: MCP initialization used significant memory. Consider calling MemoryManager.forceGC() after heavy operations.");
778
780
  }
779
781
  }
@@ -1119,7 +1121,7 @@ export class NeuroLink {
1119
1121
  elapsedMs: Date.now() - generateInternalStartTime,
1120
1122
  elapsedNs: (process.hrtime.bigint() - generateInternalHrTimeStart).toString(),
1121
1123
  conversationMemoryDurationNs: conversationMemoryDurationNs.toString(),
1122
- conversationMemoryDurationMs: Number(conversationMemoryDurationNs) / 1000000,
1124
+ conversationMemoryDurationMs: Number(conversationMemoryDurationNs) / NANOSECOND_TO_MS_DIVISOR,
1123
1125
  message: "Conversation memory initialization completed successfully",
1124
1126
  });
1125
1127
  }
@@ -1164,7 +1166,7 @@ export class NeuroLink {
1164
1166
  * Perform MCP generation with retry logic
1165
1167
  */
1166
1168
  async performMCPGenerationRetries(options, generateInternalId, generateInternalStartTime, generateInternalHrTimeStart, functionTag) {
1167
- const maxMcpRetries = 2;
1169
+ const maxMcpRetries = RETRY_ATTEMPTS.QUICK;
1168
1170
  const mcpRetryLoopStartTime = process.hrtime.bigint();
1169
1171
  logger.debug(`[NeuroLink] 🔄 LOG_POINT_G006_MCP_RETRY_LOOP_START`, {
1170
1172
  logPoint: "G006_MCP_RETRY_LOOP_START",
@@ -1206,7 +1208,7 @@ export class NeuroLink {
1206
1208
  elapsedMs: Date.now() - generateInternalStartTime,
1207
1209
  elapsedNs: (process.hrtime.bigint() - generateInternalHrTimeStart).toString(),
1208
1210
  mcpAttemptDurationNs: mcpAttemptDurationNs.toString(),
1209
- mcpAttemptDurationMs: Number(mcpAttemptDurationNs) / 1000000,
1211
+ mcpAttemptDurationMs: Number(mcpAttemptDurationNs) / NANOSECOND_TO_MS_DIVISOR,
1210
1212
  currentAttempt: attempt,
1211
1213
  resultAnalysis: {
1212
1214
  hasResult: !!mcpResult,
@@ -1325,7 +1327,7 @@ export class NeuroLink {
1325
1327
  elapsedMs: Date.now() - tryMCPStartTime,
1326
1328
  elapsedNs: (process.hrtime.bigint() - tryMCPHrTimeStart).toString(),
1327
1329
  mcpInitCheckDurationNs: mcpInitCheckDurationNs.toString(),
1328
- mcpInitCheckDurationMs: Number(mcpInitCheckDurationNs) / 1000000,
1330
+ mcpInitCheckDurationMs: Number(mcpInitCheckDurationNs) / NANOSECOND_TO_MS_DIVISOR,
1329
1331
  mcpInitializedAfter: this.mcpInitialized,
1330
1332
  initializationSuccessful: this.mcpInitialized,
1331
1333
  message: "MCP initialization check completed",
@@ -1781,7 +1783,7 @@ export class NeuroLink {
1781
1783
  elapsedMs: Date.now() - startTime,
1782
1784
  elapsedNs: (process.hrtime.bigint() - hrTimeStart).toString(),
1783
1785
  validationDurationNs: validationDurationNs.toString(),
1784
- validationDurationMs: Number(validationDurationNs) / 1000000,
1786
+ validationDurationMs: Number(validationDurationNs) / NANOSECOND_TO_MS_DIVISOR,
1785
1787
  validationError: "Stream options must include either input.text or input.audio",
1786
1788
  message: "EXHAUSTIVE validation failure analysis with character-level debugging",
1787
1789
  });
@@ -1796,7 +1798,7 @@ export class NeuroLink {
1796
1798
  elapsedMs: Date.now() - startTime,
1797
1799
  elapsedNs: (process.hrtime.bigint() - hrTimeStart).toString(),
1798
1800
  validationDurationNs: validationDurationNs.toString(),
1799
- validationDurationMs: Number(validationDurationNs) / 1000000,
1801
+ validationDurationMs: Number(validationDurationNs) / NANOSECOND_TO_MS_DIVISOR,
1800
1802
  inputTextValid: hasText,
1801
1803
  inputAudioPresent: hasAudio,
1802
1804
  inputTextLength: hasText ? options.input.text.length : 0,
@@ -2314,16 +2316,16 @@ export class NeuroLink {
2314
2316
  this.emitter.emit("tool:start", toolName, params);
2315
2317
  // Set default options
2316
2318
  const finalOptions = {
2317
- timeout: options?.timeout || 30000, // 30 second default timeout
2318
- maxRetries: options?.maxRetries || 2, // Default 2 retries for retriable errors
2319
- retryDelayMs: options?.retryDelayMs || 1000, // 1 second delay between retries
2319
+ timeout: options?.timeout || TOOL_TIMEOUTS.EXECUTION_DEFAULT_MS, // 30 second default timeout
2320
+ maxRetries: options?.maxRetries || RETRY_ATTEMPTS.DEFAULT, // Default 2 retries for retriable errors
2321
+ retryDelayMs: options?.retryDelayMs || RETRY_DELAYS.BASE_MS, // 1 second delay between retries
2320
2322
  };
2321
2323
  // Track memory usage for tool execution
2322
2324
  const { MemoryManager } = await import("./utils/performance.js");
2323
2325
  const startMemory = MemoryManager.getMemoryUsageMB();
2324
2326
  // Get or create circuit breaker for this tool
2325
2327
  if (!this.toolCircuitBreakers.has(toolName)) {
2326
- this.toolCircuitBreakers.set(toolName, new CircuitBreaker(5, 60000)); // 5 failures, 1 minute timeout
2328
+ this.toolCircuitBreakers.set(toolName, new CircuitBreaker(CIRCUIT_BREAKER.FAILURE_THRESHOLD, CIRCUIT_BREAKER_RESET_MS));
2327
2329
  }
2328
2330
  const circuitBreaker = this.toolCircuitBreakers.get(toolName);
2329
2331
  // Initialize metrics for this tool if not exists
@@ -2646,10 +2648,10 @@ export class NeuroLink {
2646
2648
  // Check memory usage after tool enumeration
2647
2649
  const endMemory = MemoryManager.getMemoryUsageMB();
2648
2650
  const memoryDelta = endMemory.heapUsed - startMemory.heapUsed;
2649
- if (memoryDelta > 10) {
2651
+ if (memoryDelta > MEMORY_THRESHOLDS.LOW_USAGE_MB) {
2650
2652
  mcpLogger.debug(`🔍 Tool listing used ${memoryDelta}MB memory (large tool registry detected)`);
2651
2653
  // Optimized collection patterns should reduce memory usage significantly
2652
- if (uniqueTools.length > 100) {
2654
+ if (uniqueTools.length > PERFORMANCE_THRESHOLDS.LARGE_TOOL_COLLECTION) {
2653
2655
  mcpLogger.debug("💡 Tool collection optimized for large sets. Memory usage reduced through efficient object reuse.");
2654
2656
  }
2655
2657
  }
@@ -2721,7 +2723,7 @@ export class NeuroLink {
2721
2723
  try {
2722
2724
  const response = await fetch("http://localhost:11434/api/tags", {
2723
2725
  method: "GET",
2724
- signal: AbortSignal.timeout(2000),
2726
+ signal: AbortSignal.timeout(PROVIDER_TIMEOUTS.AUTH_MS),
2725
2727
  });
2726
2728
  if (!response.ok) {
2727
2729
  throw new Error("Ollama service not responding");
@@ -3098,7 +3100,7 @@ export class NeuroLink {
3098
3100
  resetToolCircuitBreaker(toolName) {
3099
3101
  if (this.toolCircuitBreakers.has(toolName)) {
3100
3102
  // Create a new circuit breaker (effectively resets it)
3101
- this.toolCircuitBreakers.set(toolName, new CircuitBreaker(5, 60000));
3103
+ this.toolCircuitBreakers.set(toolName, new CircuitBreaker(CIRCUIT_BREAKER.FAILURE_THRESHOLD, CIRCUIT_BREAKER_RESET_MS));
3102
3104
  mcpLogger.info(`Circuit breaker reset for tool: ${toolName}`);
3103
3105
  }
3104
3106
  }
@@ -1,6 +1,6 @@
1
1
  import { type LanguageModelV1 } from "ai";
2
2
  import { BaseProvider } from "../core/baseProvider.js";
3
- import type { AIProviderName } from "../types/index.js";
3
+ import type { AIProviderName } from "../core/types.js";
4
4
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
5
  export declare class AzureOpenAIProvider extends BaseProvider {
6
6
  private apiKey;
@@ -1,6 +1,7 @@
1
1
  import { createAzure } from "@ai-sdk/azure";
2
2
  import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
+ import { APIVersions } from "../core/types.js";
4
5
  import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } from "../utils/providerConfig.js";
5
6
  import { logger } from "../utils/logger.js";
6
7
  import { buildMessagesArray } from "../utils/messageBuilder.js";
@@ -24,7 +25,7 @@ export class AzureOpenAIProvider extends BaseProvider {
24
25
  process.env.AZURE_OPENAI_DEPLOYMENT ||
25
26
  process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
26
27
  "gpt-4o";
27
- this.apiVersion = process.env.AZURE_API_VERSION || "2024-10-01-preview";
28
+ this.apiVersion = process.env.AZURE_API_VERSION || APIVersions.AZURE_LATEST;
28
29
  // Configuration validation - now using consolidated utility
29
30
  if (!this.apiKey) {
30
31
  validateApiKey(createAzureAPIKeyConfig());
@@ -15,6 +15,31 @@ export interface ProviderConfigOptions {
15
15
  instructions: string[];
16
16
  fallbackEnvVars?: string[];
17
17
  }
18
+ /**
19
+ * API key format validation patterns (extracted from advanced validation system)
20
+ * Exported for use across the codebase to replace scattered regex patterns
21
+ */
22
+ export declare const API_KEY_FORMATS: Record<string, RegExp>;
23
+ /**
24
+ * API key length constants to replace scattered magic numbers
25
+ */
26
+ export declare const API_KEY_LENGTHS: {
27
+ readonly OPENAI_MIN: 48;
28
+ readonly ANTHROPIC_MIN: 95;
29
+ readonly HUGGINGFACE_EXACT: 37;
30
+ readonly AZURE_MIN: 32;
31
+ readonly MISTRAL_EXACT: 32;
32
+ readonly AWS_ACCESS_KEY: 20;
33
+ readonly GOOGLE_AI_EXACT: 39;
34
+ };
35
+ /**
36
+ * Project ID format validation (for Google Cloud)
37
+ */
38
+ export declare const PROJECT_ID_FORMAT: {
39
+ readonly MIN_LENGTH: 6;
40
+ readonly MAX_LENGTH: 30;
41
+ readonly PATTERN: RegExp;
42
+ };
18
43
  /**
19
44
  * Enhanced validation result with format checking
20
45
  */
@@ -6,16 +6,37 @@
6
6
  */
7
7
  /**
8
8
  * API key format validation patterns (extracted from advanced validation system)
9
+ * Exported for use across the codebase to replace scattered regex patterns
9
10
  */
10
- const API_KEY_FORMATS = {
11
+ export const API_KEY_FORMATS = {
11
12
  openai: /^sk-[A-Za-z0-9]{48,}$/,
12
13
  anthropic: /^sk-ant-[A-Za-z0-9\-_]{95,}$/,
13
14
  "google-ai": /^AIza[A-Za-z0-9\-_]{35}$/,
14
15
  huggingface: /^hf_[A-Za-z0-9]{37}$/,
15
16
  mistral: /^[A-Za-z0-9]{32}$/,
16
- azure: /^[A-Za-z0-9]{32,}$/,
17
+ azure: /^[A-Za-z0-9]{32}$/,
17
18
  aws: /^[A-Z0-9]{20}$/, // Access Key ID format
18
- googleVertex: /^[A-Za-z0-9\-_]{1,}$/, // Project ID format
19
+ bedrock: /^[A-Z0-9]{20}$/, // AWS access key ID: 20 uppercase alphanumerics
20
+ };
21
+ /**
22
+ * API key length constants to replace scattered magic numbers
23
+ */
24
+ export const API_KEY_LENGTHS = {
25
+ OPENAI_MIN: 48, // OpenAI API keys minimum length
26
+ ANTHROPIC_MIN: 95, // Anthropic API keys minimum length
27
+ HUGGINGFACE_EXACT: 37, // HuggingFace tokens exact length
28
+ AZURE_MIN: 32, // Azure OpenAI API keys minimum length
29
+ MISTRAL_EXACT: 32, // Mistral API keys exact length
30
+ AWS_ACCESS_KEY: 20, // AWS access key ID exact length
31
+ GOOGLE_AI_EXACT: 39, // Google AI Studio keys exact length (with AIza prefix)
32
+ };
33
+ /**
34
+ * Project ID format validation (for Google Cloud)
35
+ */
36
+ export const PROJECT_ID_FORMAT = {
37
+ MIN_LENGTH: 6, // Minimum project ID length
38
+ MAX_LENGTH: 30, // Maximum project ID length
39
+ PATTERN: /^[a-z][a-z0-9-]{4,28}[a-z0-9]$/, // Google Cloud project ID format
19
40
  };
20
41
  /**
21
42
  * Validates API key format for a specific provider
@@ -2,7 +2,7 @@
2
2
  * Provider Health Checking System
3
3
  * Prevents 500 errors by validating provider availability and configuration
4
4
  */
5
- import { AIProviderName } from "../types/index.js";
5
+ import { AIProviderName } from "../core/types.js";
6
6
  export interface ProviderHealthStatus {
7
7
  provider: AIProviderName;
8
8
  isHealthy: boolean;
@@ -3,7 +3,8 @@
3
3
  * Prevents 500 errors by validating provider availability and configuration
4
4
  */
5
5
  import { logger } from "./logger.js";
6
- import { AIProviderName } from "../types/index.js";
6
+ import { AIProviderName, OpenAIModels, GoogleAIModels, AnthropicModels, BedrockModels, } from "../core/types.js";
7
+ import { API_KEY_LENGTHS, PROJECT_ID_FORMAT } from "./providerConfig.js";
7
8
  import { basename } from "path";
8
9
  import { createProxyFetch } from "../proxy/proxyFetch.js";
9
10
  export class ProviderHealthChecker {
@@ -402,17 +403,19 @@ export class ProviderHealthChecker {
402
403
  static validateApiKeyFormat(providerName, apiKey) {
403
404
  switch (providerName) {
404
405
  case AIProviderName.ANTHROPIC:
405
- return apiKey.startsWith("sk-ant-") && apiKey.length > 20;
406
+ return (apiKey.startsWith("sk-ant-") &&
407
+ apiKey.length >= API_KEY_LENGTHS.ANTHROPIC_MIN);
406
408
  case AIProviderName.OPENAI:
407
- return apiKey.startsWith("sk-") && apiKey.length > 20;
409
+ return (apiKey.startsWith("sk-") &&
410
+ apiKey.length >= API_KEY_LENGTHS.OPENAI_MIN);
408
411
  case AIProviderName.GOOGLE_AI:
409
- return apiKey.length > 20; // Basic length check
412
+ return apiKey.length >= API_KEY_LENGTHS.GOOGLE_AI_EXACT; // Basic length check
410
413
  case AIProviderName.VERTEX:
411
414
  return apiKey.endsWith(".json") || apiKey.includes("type"); // JSON key format
412
415
  case AIProviderName.BEDROCK:
413
- return apiKey.length >= 20; // AWS access key length
416
+ return apiKey.length >= API_KEY_LENGTHS.AWS_ACCESS_KEY; // AWS access key length
414
417
  case AIProviderName.AZURE:
415
- return apiKey.length >= 32; // Azure OpenAI API key length
418
+ return apiKey.length >= API_KEY_LENGTHS.AZURE_MIN; // Azure OpenAI API key length
416
419
  case AIProviderName.OLLAMA:
417
420
  return true; // Ollama usually doesn't require specific format
418
421
  default:
@@ -604,14 +607,14 @@ export class ProviderHealthChecker {
604
607
  static checkBedrockModels(healthStatus) {
605
608
  const bedrockModel = process.env.BEDROCK_MODEL || process.env.BEDROCK_MODEL_ID;
606
609
  const supportedModels = [
607
- "anthropic.claude-3-sonnet-20240229-v1:0",
608
- "anthropic.claude-3-haiku-20240307-v1:0",
609
- "anthropic.claude-3-opus-20240229-v1:0",
610
+ BedrockModels.CLAUDE_3_SONNET,
611
+ BedrockModels.CLAUDE_3_HAIKU,
612
+ BedrockModels.CLAUDE_3_5_SONNET,
610
613
  "anthropic.claude-v2:1",
611
614
  "amazon.titan-text-express-v1",
612
615
  ];
613
616
  if (!bedrockModel) {
614
- healthStatus.recommendations.push("Set BEDROCK_MODEL or BEDROCK_MODEL_ID for faster startup (e.g., anthropic.claude-3-sonnet-20240229-v1:0)");
617
+ healthStatus.recommendations.push(`Set BEDROCK_MODEL or BEDROCK_MODEL_ID for faster startup (e.g., ${BedrockModels.CLAUDE_3_SONNET})`);
615
618
  }
616
619
  else if (!supportedModels.includes(bedrockModel)) {
617
620
  healthStatus.recommendations.push(`Consider using a popular Bedrock model: ${supportedModels.slice(0, 3).join(", ")}`);
@@ -658,39 +661,44 @@ export class ProviderHealthChecker {
658
661
  switch (providerName) {
659
662
  case AIProviderName.ANTHROPIC:
660
663
  return [
661
- "claude-3-5-sonnet-20241022",
662
- "claude-3-haiku-20240307",
663
- "claude-3-opus-20240229",
664
+ AnthropicModels.CLAUDE_3_5_SONNET,
665
+ AnthropicModels.CLAUDE_3_HAIKU,
666
+ AnthropicModels.CLAUDE_3_OPUS,
664
667
  ];
665
668
  case AIProviderName.OPENAI:
666
- return ["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo"];
669
+ return [
670
+ OpenAIModels.GPT_4O,
671
+ OpenAIModels.GPT_4O_MINI,
672
+ OpenAIModels.GPT_3_5_TURBO,
673
+ ];
667
674
  case AIProviderName.GOOGLE_AI:
668
- return ["gemini-1.5-pro", "gemini-1.5-flash", "gemini-pro"];
675
+ return [
676
+ GoogleAIModels.GEMINI_1_5_PRO,
677
+ GoogleAIModels.GEMINI_1_5_FLASH,
678
+ GoogleAIModels.GEMINI_2_5_PRO,
679
+ ];
669
680
  case AIProviderName.VERTEX:
670
681
  return [
671
682
  // Google models (via vertex provider)
672
- "gemini-2.5-pro",
673
- "gemini-2.5-flash",
674
- "gemini-2.5-flash-lite",
675
- "gemini-2.0-flash-001",
676
- "gemini-1.5-pro",
677
- "gemini-1.5-flash",
683
+ GoogleAIModels.GEMINI_2_5_PRO,
684
+ GoogleAIModels.GEMINI_2_5_FLASH,
685
+ GoogleAIModels.GEMINI_2_5_FLASH_LITE,
686
+ GoogleAIModels.GEMINI_2_0_FLASH_001,
687
+ GoogleAIModels.GEMINI_1_5_PRO,
688
+ GoogleAIModels.GEMINI_1_5_FLASH,
678
689
  // Anthropic models (via vertexAnthropic provider)
679
690
  "claude-sonnet-4@20250514",
680
691
  "claude-opus-4@20250514",
681
- "claude-3-5-sonnet-20241022",
682
- "claude-3-5-haiku-20241022",
683
- "claude-3-sonnet-20240229",
684
- "claude-3-haiku-20240307",
685
- "claude-3-opus-20240229",
692
+ AnthropicModels.CLAUDE_3_5_SONNET,
693
+ AnthropicModels.CLAUDE_3_5_HAIKU,
694
+ AnthropicModels.CLAUDE_3_SONNET,
695
+ AnthropicModels.CLAUDE_3_HAIKU,
696
+ AnthropicModels.CLAUDE_3_OPUS,
686
697
  ];
687
698
  case AIProviderName.BEDROCK:
688
- return [
689
- "anthropic.claude-3-sonnet-20240229-v1:0",
690
- "anthropic.claude-3-haiku-20240307-v1:0",
691
- ];
699
+ return [BedrockModels.CLAUDE_3_SONNET, BedrockModels.CLAUDE_3_HAIKU];
692
700
  case AIProviderName.AZURE:
693
- return ["gpt-4o", "gpt-4o-mini", "gpt-35-turbo"];
701
+ return [OpenAIModels.GPT_4O, OpenAIModels.GPT_4O_MINI, "gpt-35-turbo"];
694
702
  case AIProviderName.OLLAMA:
695
703
  return ["llama3.2:latest", "llama3.1:latest", "mistral:latest"];
696
704
  default:
@@ -951,8 +959,7 @@ export class ProviderHealthChecker {
951
959
  if (projectId) {
952
960
  result.projectId = projectId;
953
961
  // Validate project ID format
954
- const projectIdPattern = /^[a-z][a-z0-9-]{4,28}[a-z0-9]$/;
955
- if (projectIdPattern.test(projectId)) {
962
+ if (PROJECT_ID_FORMAT.PATTERN.test(projectId)) {
956
963
  result.isValid = true;
957
964
  }
958
965
  else {
@@ -2,6 +2,7 @@
2
2
  * Enhanced Provider Setup Messages
3
3
  * Provides detailed setup instructions for AI providers
4
4
  */
5
+ import { OpenAIModels, GoogleAIModels, AnthropicModels, APIVersions, } from "../core/types.js";
5
6
  /**
6
7
  * Generate enhanced error message with setup instructions
7
8
  */
@@ -12,7 +13,7 @@ export function getProviderSetupMessage(provider, missingVars) {
12
13
  envVars: [
13
14
  'OPENAI_API_KEY="sk-proj-your-openai-api-key"',
14
15
  "# Optional:",
15
- 'OPENAI_MODEL="gpt-4o"',
16
+ `OPENAI_MODEL="${OpenAIModels.GPT_4O}"`,
16
17
  'OPENAI_BASE_URL="https://api.openai.com"',
17
18
  ],
18
19
  },
@@ -21,7 +22,7 @@ export function getProviderSetupMessage(provider, missingVars) {
21
22
  envVars: [
22
23
  'ANTHROPIC_API_KEY="sk-ant-api03-your-anthropic-key"',
23
24
  "# Optional:",
24
- 'ANTHROPIC_MODEL="claude-3-5-sonnet-20241022"',
25
+ `ANTHROPIC_MODEL="${AnthropicModels.CLAUDE_3_5_SONNET}"`,
25
26
  ],
26
27
  },
27
28
  "google-ai": {
@@ -29,7 +30,7 @@ export function getProviderSetupMessage(provider, missingVars) {
29
30
  envVars: [
30
31
  'GOOGLE_AI_API_KEY="AIza-your-google-ai-api-key"',
31
32
  "# Optional:",
32
- 'GOOGLE_AI_MODEL="gemini-2.5-pro"',
33
+ `GOOGLE_AI_MODEL="${GoogleAIModels.GEMINI_2_5_PRO}"`,
33
34
  ],
34
35
  },
35
36
  vertex: {
@@ -39,7 +40,7 @@ export function getProviderSetupMessage(provider, missingVars) {
39
40
  'GOOGLE_VERTEX_PROJECT="your-gcp-project-id"',
40
41
  'GOOGLE_VERTEX_LOCATION="us-central1"',
41
42
  "# Optional:",
42
- 'VERTEX_MODEL="gemini-2.5-pro"',
43
+ `VERTEX_MODEL="${GoogleAIModels.GEMINI_2_5_PRO}"`,
43
44
  ],
44
45
  },
45
46
  bedrock: {
@@ -61,8 +62,8 @@ export function getProviderSetupMessage(provider, missingVars) {
61
62
  'AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/"',
62
63
  'AZURE_OPENAI_DEPLOYMENT_ID="your-deployment-name"',
63
64
  "# Optional:",
64
- 'AZURE_MODEL="gpt-4o"',
65
- 'AZURE_API_VERSION="2024-02-15-preview"',
65
+ `AZURE_MODEL="${OpenAIModels.GPT_4O}"`,
66
+ `AZURE_API_VERSION="${APIVersions.AZURE_STABLE}"`,
66
67
  ],
67
68
  },
68
69
  huggingface: {