@juspay/neurolink 7.33.2 → 7.33.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/CHANGELOG.md +8 -0
  2. package/dist/cli/commands/config.d.ts +3 -4
  3. package/dist/cli/commands/config.js +2 -3
  4. package/dist/constants/index.d.ts +192 -0
  5. package/dist/constants/index.js +195 -0
  6. package/dist/constants/performance.d.ts +366 -0
  7. package/dist/constants/performance.js +389 -0
  8. package/dist/constants/retry.d.ts +224 -0
  9. package/dist/constants/retry.js +266 -0
  10. package/dist/constants/timeouts.d.ts +225 -0
  11. package/dist/constants/timeouts.js +182 -0
  12. package/dist/constants/tokens.d.ts +234 -0
  13. package/dist/constants/tokens.js +314 -0
  14. package/dist/core/baseProvider.js +26 -1
  15. package/dist/core/constants.d.ts +12 -3
  16. package/dist/core/constants.js +22 -6
  17. package/dist/core/factory.js +19 -0
  18. package/dist/core/types.d.ts +268 -0
  19. package/dist/core/types.js +153 -0
  20. package/dist/factories/providerRegistry.js +2 -0
  21. package/dist/lib/constants/index.d.ts +192 -0
  22. package/dist/lib/constants/index.js +195 -0
  23. package/dist/lib/constants/performance.d.ts +366 -0
  24. package/dist/lib/constants/performance.js +389 -0
  25. package/dist/lib/constants/retry.d.ts +224 -0
  26. package/dist/lib/constants/retry.js +266 -0
  27. package/dist/lib/constants/timeouts.d.ts +225 -0
  28. package/dist/lib/constants/timeouts.js +182 -0
  29. package/dist/lib/constants/tokens.d.ts +234 -0
  30. package/dist/lib/constants/tokens.js +314 -0
  31. package/dist/lib/core/baseProvider.js +26 -1
  32. package/dist/lib/core/constants.d.ts +12 -3
  33. package/dist/lib/core/constants.js +22 -6
  34. package/dist/lib/core/factory.js +19 -0
  35. package/dist/lib/core/types.d.ts +268 -0
  36. package/dist/lib/core/types.js +153 -0
  37. package/dist/lib/factories/providerRegistry.js +2 -0
  38. package/dist/lib/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  39. package/dist/lib/models/modelRegistry.d.ts +1 -1
  40. package/dist/lib/models/modelRegistry.js +63 -37
  41. package/dist/lib/neurolink.js +35 -34
  42. package/dist/lib/providers/amazonBedrock.js +2 -2
  43. package/dist/lib/providers/anthropic.js +3 -12
  44. package/dist/lib/providers/anthropicBaseProvider.js +1 -2
  45. package/dist/lib/providers/azureOpenai.d.ts +1 -1
  46. package/dist/lib/providers/azureOpenai.js +51 -9
  47. package/dist/lib/providers/googleAiStudio.js +3 -3
  48. package/dist/lib/providers/googleVertex.js +2 -2
  49. package/dist/lib/providers/huggingFace.js +1 -2
  50. package/dist/lib/providers/litellm.js +1 -2
  51. package/dist/lib/providers/mistral.js +2 -2
  52. package/dist/lib/providers/ollama.js +7 -8
  53. package/dist/lib/providers/openAI.js +2 -2
  54. package/dist/lib/providers/openaiCompatible.js +5 -2
  55. package/dist/lib/providers/sagemaker/language-model.d.ts +5 -0
  56. package/dist/lib/providers/sagemaker/language-model.js +9 -1
  57. package/dist/lib/utils/providerConfig.d.ts +25 -0
  58. package/dist/lib/utils/providerConfig.js +24 -3
  59. package/dist/lib/utils/providerHealth.d.ts +1 -1
  60. package/dist/lib/utils/providerHealth.js +47 -36
  61. package/dist/lib/utils/providerSetupMessages.js +7 -6
  62. package/dist/lib/utils/providerUtils.js +16 -24
  63. package/dist/lib/utils/tokenLimits.d.ts +2 -2
  64. package/dist/lib/utils/tokenLimits.js +10 -3
  65. package/dist/mcp/servers/aiProviders/aiWorkflowTools.js +2 -2
  66. package/dist/models/modelRegistry.d.ts +1 -1
  67. package/dist/models/modelRegistry.js +63 -37
  68. package/dist/neurolink.js +35 -34
  69. package/dist/providers/amazonBedrock.js +2 -2
  70. package/dist/providers/anthropic.js +3 -12
  71. package/dist/providers/anthropicBaseProvider.js +1 -2
  72. package/dist/providers/azureOpenai.d.ts +1 -1
  73. package/dist/providers/azureOpenai.js +51 -9
  74. package/dist/providers/googleAiStudio.js +3 -3
  75. package/dist/providers/googleVertex.js +2 -2
  76. package/dist/providers/huggingFace.js +1 -2
  77. package/dist/providers/litellm.js +1 -2
  78. package/dist/providers/mistral.js +2 -2
  79. package/dist/providers/ollama.js +7 -8
  80. package/dist/providers/openAI.js +2 -2
  81. package/dist/providers/openaiCompatible.js +5 -2
  82. package/dist/providers/sagemaker/language-model.d.ts +5 -0
  83. package/dist/providers/sagemaker/language-model.js +9 -1
  84. package/dist/utils/providerConfig.d.ts +25 -0
  85. package/dist/utils/providerConfig.js +24 -3
  86. package/dist/utils/providerHealth.d.ts +1 -1
  87. package/dist/utils/providerHealth.js +47 -36
  88. package/dist/utils/providerSetupMessages.js +7 -6
  89. package/dist/utils/providerUtils.js +16 -24
  90. package/dist/utils/tokenLimits.d.ts +2 -2
  91. package/dist/utils/tokenLimits.js +10 -3
  92. package/package.json +1 -1
@@ -3,14 +3,14 @@
3
3
  * Provides centralized model data for models command system
4
4
  * Part of Phase 4.1 - Models Command System
5
5
  */
6
- import { AIProviderName } from "../types/index.js";
6
+ import { AIProviderName, OpenAIModels, GoogleAIModels, AnthropicModels, DEFAULT_MODEL_ALIASES, } from "../core/types.js";
7
7
  /**
8
8
  * Comprehensive model registry
9
9
  */
10
10
  export const MODEL_REGISTRY = {
11
11
  // OpenAI Models
12
- "gpt-4o": {
13
- id: "gpt-4o",
12
+ [OpenAIModels.GPT_4O]: {
13
+ id: OpenAIModels.GPT_4O,
14
14
  name: "GPT-4 Omni",
15
15
  provider: AIProviderName.OPENAI,
16
16
  description: "Most capable OpenAI model with vision and advanced reasoning",
@@ -53,8 +53,8 @@ export const MODEL_REGISTRY = {
53
53
  releaseDate: "2024-05-13",
54
54
  category: "general",
55
55
  },
56
- "gpt-4o-mini": {
57
- id: "gpt-4o-mini",
56
+ [OpenAIModels.GPT_4O_MINI]: {
57
+ id: OpenAIModels.GPT_4O_MINI,
58
58
  name: "GPT-4 Omni Mini",
59
59
  provider: AIProviderName.OPENAI,
60
60
  description: "Fast and cost-effective model with strong performance",
@@ -98,8 +98,8 @@ export const MODEL_REGISTRY = {
98
98
  category: "general",
99
99
  },
100
100
  // Google AI Studio Models
101
- "gemini-2.5-pro": {
102
- id: "gemini-2.5-pro",
101
+ [GoogleAIModels.GEMINI_2_5_PRO]: {
102
+ id: GoogleAIModels.GEMINI_2_5_PRO,
103
103
  name: "Gemini 2.5 Pro",
104
104
  provider: AIProviderName.GOOGLE_AI,
105
105
  description: "Google's most capable multimodal model with large context window",
@@ -142,8 +142,8 @@ export const MODEL_REGISTRY = {
142
142
  releaseDate: "2024-12-11",
143
143
  category: "reasoning",
144
144
  },
145
- "gemini-2.5-flash": {
146
- id: "gemini-2.5-flash",
145
+ [GoogleAIModels.GEMINI_2_5_FLASH]: {
146
+ id: GoogleAIModels.GEMINI_2_5_FLASH,
147
147
  name: "Gemini 2.5 Flash",
148
148
  provider: AIProviderName.GOOGLE_AI,
149
149
  description: "Fast and efficient multimodal model with large context",
@@ -187,8 +187,8 @@ export const MODEL_REGISTRY = {
187
187
  category: "general",
188
188
  },
189
189
  // Anthropic Models
190
- "claude-3-5-sonnet-20241022": {
191
- id: "claude-3-5-sonnet-20241022",
190
+ [AnthropicModels.CLAUDE_3_5_SONNET]: {
191
+ id: AnthropicModels.CLAUDE_3_5_SONNET,
192
192
  name: "Claude 3.5 Sonnet",
193
193
  provider: AIProviderName.ANTHROPIC,
194
194
  description: "Anthropic's most capable model with excellent reasoning and coding",
@@ -236,8 +236,8 @@ export const MODEL_REGISTRY = {
236
236
  releaseDate: "2024-10-22",
237
237
  category: "coding",
238
238
  },
239
- "claude-3-5-haiku-20241022": {
240
- id: "claude-3-5-haiku-20241022",
239
+ [AnthropicModels.CLAUDE_3_5_HAIKU]: {
240
+ id: AnthropicModels.CLAUDE_3_5_HAIKU,
241
241
  name: "Claude 3.5 Haiku",
242
242
  provider: AIProviderName.ANTHROPIC,
243
243
  description: "Fast and efficient Claude model for quick tasks",
@@ -380,39 +380,65 @@ Object.values(MODEL_REGISTRY).forEach((model) => {
380
380
  MODEL_ALIASES[alias.toLowerCase()] = model.id;
381
381
  });
382
382
  });
383
- // Add common aliases
384
- Object.assign(MODEL_ALIASES, {
385
- latest: "gpt-4o", // Default latest model
386
- fastest: "gpt-4o-mini",
387
- cheapest: "gemini-2.5-flash",
388
- "best-coding": "claude-3-5-sonnet-20241022",
389
- "best-analysis": "gemini-2.5-pro",
390
- "best-creative": "claude-3-5-sonnet-20241022",
391
- "best-value": "gemini-2.5-flash",
392
- local: "llama3.2:latest",
383
+ // Pull canonical alias recommendations from core/types
384
+ Object.entries(DEFAULT_MODEL_ALIASES).forEach(([k, v]) => {
385
+ MODEL_ALIASES[k.toLowerCase().replace(/_/g, "-")] = v;
393
386
  });
387
+ MODEL_ALIASES.local = "llama3.2:latest";
394
388
  /**
395
389
  * Use case to model mappings
396
390
  */
397
391
  export const USE_CASE_RECOMMENDATIONS = {
398
- coding: ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
399
- creative: ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
400
- analysis: ["gemini-2.5-pro", "claude-3-5-sonnet-20241022", "gpt-4o"],
392
+ coding: [
393
+ AnthropicModels.CLAUDE_3_5_SONNET,
394
+ OpenAIModels.GPT_4O,
395
+ GoogleAIModels.GEMINI_2_5_PRO,
396
+ ],
397
+ creative: [
398
+ AnthropicModels.CLAUDE_3_5_SONNET,
399
+ OpenAIModels.GPT_4O,
400
+ GoogleAIModels.GEMINI_2_5_PRO,
401
+ ],
402
+ analysis: [
403
+ GoogleAIModels.GEMINI_2_5_PRO,
404
+ AnthropicModels.CLAUDE_3_5_SONNET,
405
+ OpenAIModels.GPT_4O,
406
+ ],
401
407
  conversation: [
402
- "gpt-4o",
403
- "claude-3-5-sonnet-20241022",
404
- "claude-3-5-haiku-20241022",
408
+ OpenAIModels.GPT_4O,
409
+ AnthropicModels.CLAUDE_3_5_SONNET,
410
+ AnthropicModels.CLAUDE_3_5_HAIKU,
411
+ ],
412
+ reasoning: [
413
+ AnthropicModels.CLAUDE_3_5_SONNET,
414
+ GoogleAIModels.GEMINI_2_5_PRO,
415
+ OpenAIModels.GPT_4O,
416
+ ],
417
+ translation: [
418
+ GoogleAIModels.GEMINI_2_5_PRO,
419
+ OpenAIModels.GPT_4O,
420
+ AnthropicModels.CLAUDE_3_5_HAIKU,
405
421
  ],
406
- reasoning: ["claude-3-5-sonnet-20241022", "gemini-2.5-pro", "gpt-4o"],
407
- translation: ["gemini-2.5-pro", "gpt-4o", "claude-3-5-haiku-20241022"],
408
422
  summarization: [
409
- "gemini-2.5-flash",
410
- "gpt-4o-mini",
411
- "claude-3-5-haiku-20241022",
423
+ GoogleAIModels.GEMINI_2_5_FLASH,
424
+ OpenAIModels.GPT_4O_MINI,
425
+ AnthropicModels.CLAUDE_3_5_HAIKU,
426
+ ],
427
+ "cost-effective": [
428
+ GoogleAIModels.GEMINI_2_5_FLASH,
429
+ OpenAIModels.GPT_4O_MINI,
430
+ "mistral-small-latest",
431
+ ],
432
+ "high-quality": [
433
+ AnthropicModels.CLAUDE_3_5_SONNET,
434
+ OpenAIModels.GPT_4O,
435
+ GoogleAIModels.GEMINI_2_5_PRO,
436
+ ],
437
+ fast: [
438
+ OpenAIModels.GPT_4O_MINI,
439
+ GoogleAIModels.GEMINI_2_5_FLASH,
440
+ AnthropicModels.CLAUDE_3_5_HAIKU,
412
441
  ],
413
- "cost-effective": ["gemini-2.5-flash", "gpt-4o-mini", "mistral-small-latest"],
414
- "high-quality": ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
415
- fast: ["gpt-4o-mini", "gemini-2.5-flash", "claude-3-5-haiku-20241022"],
416
442
  };
417
443
  /**
418
444
  * Get all models
package/dist/neurolink.js CHANGED
@@ -16,6 +16,7 @@ catch {
16
16
  import { AIProviderFactory } from "./core/factory.js";
17
17
  import { mcpLogger } from "./utils/logger.js";
18
18
  import { SYSTEM_LIMITS } from "./core/constants.js";
19
+ import { NANOSECOND_TO_MS_DIVISOR, MCP_TIMEOUTS, SERVER_CONFIG, TOOL_TIMEOUTS, RETRY_ATTEMPTS, RETRY_DELAYS, CIRCUIT_BREAKER, CIRCUIT_BREAKER_RESET_MS, MEMORY_THRESHOLDS, PROVIDER_TIMEOUTS, PERFORMANCE_THRESHOLDS, } from "./constants/index.js";
19
20
  import pLimit from "p-limit";
20
21
  import { toolRegistry } from "./mcp/toolRegistry.js";
21
22
  import { logger } from "./utils/logger.js";
@@ -164,7 +165,7 @@ export class NeuroLink {
164
165
  elapsedMs: Date.now() - constructorStartTime,
165
166
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
166
167
  registrySetupDurationNs: registrySetupDurationNs.toString(),
167
- registrySetupDurationMs: Number(registrySetupDurationNs) / 1000000,
168
+ registrySetupDurationMs: Number(registrySetupDurationNs) / NANOSECOND_TO_MS_DIVISOR,
168
169
  enableManualMCP: false,
169
170
  message: "ProviderRegistry configured successfully with security settings",
170
171
  });
@@ -179,7 +180,7 @@ export class NeuroLink {
179
180
  elapsedMs: Date.now() - constructorStartTime,
180
181
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
181
182
  registrySetupDurationNs: registrySetupDurationNs.toString(),
182
- registrySetupDurationMs: Number(registrySetupDurationNs) / 1000000,
183
+ registrySetupDurationMs: Number(registrySetupDurationNs) / NANOSECOND_TO_MS_DIVISOR,
183
184
  error: error instanceof Error ? error.message : String(error),
184
185
  errorName: error instanceof Error ? error.name : "UnknownError",
185
186
  errorStack: error instanceof Error ? error.stack : undefined,
@@ -224,9 +225,9 @@ export class NeuroLink {
224
225
  elapsedMs: Date.now() - constructorStartTime,
225
226
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
226
227
  memoryInitDurationNs: memoryInitDurationNs.toString(),
227
- memoryInitDurationMs: Number(memoryInitDurationNs) / 1000000,
228
+ memoryInitDurationMs: Number(memoryInitDurationNs) / NANOSECOND_TO_MS_DIVISOR,
228
229
  memoryManagerCreateDurationNs: memoryManagerCreateDurationNs.toString(),
229
- memoryManagerCreateDurationMs: Number(memoryManagerCreateDurationNs) / 1000000,
230
+ memoryManagerCreateDurationMs: Number(memoryManagerCreateDurationNs) / NANOSECOND_TO_MS_DIVISOR,
230
231
  finalMemoryConfig: {
231
232
  maxSessions: memoryConfig.maxSessions,
232
233
  maxTurnsPerSession: memoryConfig.maxTurnsPerSession,
@@ -245,7 +246,7 @@ export class NeuroLink {
245
246
  elapsedMs: Date.now() - constructorStartTime,
246
247
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
247
248
  memoryInitDurationNs: memoryInitDurationNs.toString(),
248
- memoryInitDurationMs: Number(memoryInitDurationNs) / 1000000,
249
+ memoryInitDurationMs: Number(memoryInitDurationNs) / NANOSECOND_TO_MS_DIVISOR,
249
250
  error: error instanceof Error ? error.message : String(error),
250
251
  errorName: error instanceof Error ? error.name : "UnknownError",
251
252
  errorStack: error instanceof Error ? error.stack : undefined,
@@ -289,8 +290,8 @@ export class NeuroLink {
289
290
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
290
291
  externalServerInitStartTimeNs: externalServerInitStartTime.toString(),
291
292
  serverManagerConfig: {
292
- maxServers: 20,
293
- defaultTimeout: 15000,
293
+ maxServers: SERVER_CONFIG.MAX_MCP_SERVERS,
294
+ defaultTimeout: MCP_TIMEOUTS.EXTERNAL_SERVER_STARTUP_MS,
294
295
  enableAutoRestart: true,
295
296
  enablePerformanceMonitoring: true,
296
297
  },
@@ -317,7 +318,7 @@ export class NeuroLink {
317
318
  elapsedMs: Date.now() - constructorStartTime,
318
319
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
319
320
  externalServerInitDurationNs: externalServerInitDurationNs.toString(),
320
- externalServerInitDurationMs: Number(externalServerInitDurationNs) / 1000000,
321
+ externalServerInitDurationMs: Number(externalServerInitDurationNs) / NANOSECOND_TO_MS_DIVISOR,
321
322
  hasExternalServerManager: !!this.externalServerManager,
322
323
  message: "External server manager initialized successfully",
323
324
  });
@@ -333,7 +334,7 @@ export class NeuroLink {
333
334
  elapsedMs: Date.now() - constructorStartTime,
334
335
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
335
336
  externalServerInitDurationNs: externalServerInitDurationNs.toString(),
336
- externalServerInitDurationMs: Number(externalServerInitDurationNs) / 1000000,
337
+ externalServerInitDurationMs: Number(externalServerInitDurationNs) / NANOSECOND_TO_MS_DIVISOR,
337
338
  error: error instanceof Error ? error.message : String(error),
338
339
  errorName: error instanceof Error ? error.name : "UnknownError",
339
340
  errorStack: error instanceof Error ? error.stack : undefined,
@@ -418,7 +419,7 @@ export class NeuroLink {
418
419
  elapsedMs: Date.now() - constructorStartTime,
419
420
  elapsedNs: (process.hrtime.bigint() - constructorHrTimeStart).toString(),
420
421
  eventHandlerSetupDurationNs: eventHandlerSetupDurationNs.toString(),
421
- eventHandlerSetupDurationMs: Number(eventHandlerSetupDurationNs) / 1000000,
422
+ eventHandlerSetupDurationMs: Number(eventHandlerSetupDurationNs) / NANOSECOND_TO_MS_DIVISOR,
422
423
  eventHandlersCount: 5,
423
424
  eventHandlerTypes: [
424
425
  "connected",
@@ -441,7 +442,7 @@ export class NeuroLink {
441
442
  constructorId,
442
443
  timestamp: new Date().toISOString(),
443
444
  constructorDurationNs: constructorDurationNs.toString(),
444
- constructorDurationMs: Number(constructorDurationNs) / 1000000,
445
+ constructorDurationMs: Number(constructorDurationNs) / NANOSECOND_TO_MS_DIVISOR,
445
446
  totalElapsedMs: Date.now() - constructorStartTime,
446
447
  finalState: {
447
448
  hasConversationMemory: !!this.conversationMemory,
@@ -542,7 +543,7 @@ export class NeuroLink {
542
543
  elapsedMs: Date.now() - mcpInitStartTime,
543
544
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
544
545
  performanceImportDurationNs: performanceImportDurationNs.toString(),
545
- performanceImportDurationMs: Number(performanceImportDurationNs) / 1000000,
546
+ performanceImportDurationMs: Number(performanceImportDurationNs) / NANOSECOND_TO_MS_DIVISOR,
546
547
  hasMemoryManager: !!MemoryManager,
547
548
  message: "MemoryManager imported successfully",
548
549
  });
@@ -558,7 +559,7 @@ export class NeuroLink {
558
559
  elapsedMs: Date.now() - mcpInitStartTime,
559
560
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
560
561
  performanceImportDurationNs: performanceImportDurationNs.toString(),
561
- performanceImportDurationMs: Number(performanceImportDurationNs) / 1000000,
562
+ performanceImportDurationMs: Number(performanceImportDurationNs) / NANOSECOND_TO_MS_DIVISOR,
562
563
  error: error instanceof Error ? error.message : String(error),
563
564
  errorName: error instanceof Error ? error.name : "UnknownError",
564
565
  message: "MemoryManager import failed - continuing without performance tracking",
@@ -590,7 +591,7 @@ export class NeuroLink {
590
591
  */
591
592
  async initializeToolRegistryInternal(mcpInitId, mcpInitStartTime, mcpInitHrTimeStart) {
592
593
  const toolRegistryStartTime = process.hrtime.bigint();
593
- const initTimeout = 3000;
594
+ const initTimeout = MCP_TIMEOUTS.INITIALIZATION_MS;
594
595
  logger.debug(`[NeuroLink] ⏱️ LOG_POINT_M007_TOOL_REGISTRY_TIMEOUT_SETUP`, {
595
596
  logPoint: "M007_TOOL_REGISTRY_TIMEOUT_SETUP",
596
597
  mcpInitId,
@@ -616,7 +617,7 @@ export class NeuroLink {
616
617
  elapsedMs: Date.now() - mcpInitStartTime,
617
618
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
618
619
  toolRegistryDurationNs: toolRegistryDurationNs.toString(),
619
- toolRegistryDurationMs: Number(toolRegistryDurationNs) / 1000000,
620
+ toolRegistryDurationMs: Number(toolRegistryDurationNs) / NANOSECOND_TO_MS_DIVISOR,
620
621
  message: "Tool registry initialization completed within timeout",
621
622
  });
622
623
  }
@@ -644,7 +645,7 @@ export class NeuroLink {
644
645
  elapsedMs: Date.now() - mcpInitStartTime,
645
646
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
646
647
  providerRegistryDurationNs: providerRegistryDurationNs.toString(),
647
- providerRegistryDurationMs: Number(providerRegistryDurationNs) / 1000000,
648
+ providerRegistryDurationMs: Number(providerRegistryDurationNs) / NANOSECOND_TO_MS_DIVISOR,
648
649
  message: "Provider registry registration completed successfully",
649
650
  });
650
651
  }
@@ -674,7 +675,7 @@ export class NeuroLink {
674
675
  elapsedMs: Date.now() - mcpInitStartTime,
675
676
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
676
677
  directToolsDurationNs: directToolsDurationNs.toString(),
677
- directToolsDurationMs: Number(directToolsDurationNs) / 1000000,
678
+ directToolsDurationMs: Number(directToolsDurationNs) / NANOSECOND_TO_MS_DIVISOR,
678
679
  serverId: "neurolink-direct",
679
680
  message: "Direct tools server registered successfully",
680
681
  });
@@ -692,7 +693,7 @@ export class NeuroLink {
692
693
  elapsedMs: Date.now() - mcpInitStartTime,
693
694
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
694
695
  directToolsDurationNs: directToolsDurationNs.toString(),
695
- directToolsDurationMs: Number(directToolsDurationNs) / 1000000,
696
+ directToolsDurationMs: Number(directToolsDurationNs) / NANOSECOND_TO_MS_DIVISOR,
696
697
  error: error instanceof Error ? error.message : String(error),
697
698
  errorName: error instanceof Error ? error.name : "UnknownError",
698
699
  errorStack: error instanceof Error ? error.stack : undefined,
@@ -731,7 +732,7 @@ export class NeuroLink {
731
732
  elapsedMs: Date.now() - mcpInitStartTime,
732
733
  elapsedNs: (process.hrtime.bigint() - mcpInitHrTimeStart).toString(),
733
734
  mcpConfigDurationNs: mcpConfigDurationNs.toString(),
734
- mcpConfigDurationMs: Number(mcpConfigDurationNs) / 1000000,
735
+ mcpConfigDurationMs: Number(mcpConfigDurationNs) / NANOSECOND_TO_MS_DIVISOR,
735
736
  serversLoaded: configResult.serversLoaded,
736
737
  errorsCount: configResult.errors.length,
737
738
  configResult: {
@@ -774,7 +775,7 @@ export class NeuroLink {
774
775
  initTime: `${initTime}ms`,
775
776
  memoryUsed: `${memoryDelta}MB`,
776
777
  });
777
- if (memoryDelta > 30) {
778
+ if (memoryDelta > MEMORY_THRESHOLDS.MODERATE_USAGE_MB) {
778
779
  mcpLogger.debug("💡 Memory cleanup suggestion: MCP initialization used significant memory. Consider calling MemoryManager.forceGC() after heavy operations.");
779
780
  }
780
781
  }
@@ -1120,7 +1121,7 @@ export class NeuroLink {
1120
1121
  elapsedMs: Date.now() - generateInternalStartTime,
1121
1122
  elapsedNs: (process.hrtime.bigint() - generateInternalHrTimeStart).toString(),
1122
1123
  conversationMemoryDurationNs: conversationMemoryDurationNs.toString(),
1123
- conversationMemoryDurationMs: Number(conversationMemoryDurationNs) / 1000000,
1124
+ conversationMemoryDurationMs: Number(conversationMemoryDurationNs) / NANOSECOND_TO_MS_DIVISOR,
1124
1125
  message: "Conversation memory initialization completed successfully",
1125
1126
  });
1126
1127
  }
@@ -1165,7 +1166,7 @@ export class NeuroLink {
1165
1166
  * Perform MCP generation with retry logic
1166
1167
  */
1167
1168
  async performMCPGenerationRetries(options, generateInternalId, generateInternalStartTime, generateInternalHrTimeStart, functionTag) {
1168
- const maxMcpRetries = 2;
1169
+ const maxMcpRetries = RETRY_ATTEMPTS.QUICK;
1169
1170
  const mcpRetryLoopStartTime = process.hrtime.bigint();
1170
1171
  logger.debug(`[NeuroLink] 🔄 LOG_POINT_G006_MCP_RETRY_LOOP_START`, {
1171
1172
  logPoint: "G006_MCP_RETRY_LOOP_START",
@@ -1207,7 +1208,7 @@ export class NeuroLink {
1207
1208
  elapsedMs: Date.now() - generateInternalStartTime,
1208
1209
  elapsedNs: (process.hrtime.bigint() - generateInternalHrTimeStart).toString(),
1209
1210
  mcpAttemptDurationNs: mcpAttemptDurationNs.toString(),
1210
- mcpAttemptDurationMs: Number(mcpAttemptDurationNs) / 1000000,
1211
+ mcpAttemptDurationMs: Number(mcpAttemptDurationNs) / NANOSECOND_TO_MS_DIVISOR,
1211
1212
  currentAttempt: attempt,
1212
1213
  resultAnalysis: {
1213
1214
  hasResult: !!mcpResult,
@@ -1326,7 +1327,7 @@ export class NeuroLink {
1326
1327
  elapsedMs: Date.now() - tryMCPStartTime,
1327
1328
  elapsedNs: (process.hrtime.bigint() - tryMCPHrTimeStart).toString(),
1328
1329
  mcpInitCheckDurationNs: mcpInitCheckDurationNs.toString(),
1329
- mcpInitCheckDurationMs: Number(mcpInitCheckDurationNs) / 1000000,
1330
+ mcpInitCheckDurationMs: Number(mcpInitCheckDurationNs) / NANOSECOND_TO_MS_DIVISOR,
1330
1331
  mcpInitializedAfter: this.mcpInitialized,
1331
1332
  initializationSuccessful: this.mcpInitialized,
1332
1333
  message: "MCP initialization check completed",
@@ -1782,7 +1783,7 @@ export class NeuroLink {
1782
1783
  elapsedMs: Date.now() - startTime,
1783
1784
  elapsedNs: (process.hrtime.bigint() - hrTimeStart).toString(),
1784
1785
  validationDurationNs: validationDurationNs.toString(),
1785
- validationDurationMs: Number(validationDurationNs) / 1000000,
1786
+ validationDurationMs: Number(validationDurationNs) / NANOSECOND_TO_MS_DIVISOR,
1786
1787
  validationError: "Stream options must include either input.text or input.audio",
1787
1788
  message: "EXHAUSTIVE validation failure analysis with character-level debugging",
1788
1789
  });
@@ -1797,7 +1798,7 @@ export class NeuroLink {
1797
1798
  elapsedMs: Date.now() - startTime,
1798
1799
  elapsedNs: (process.hrtime.bigint() - hrTimeStart).toString(),
1799
1800
  validationDurationNs: validationDurationNs.toString(),
1800
- validationDurationMs: Number(validationDurationNs) / 1000000,
1801
+ validationDurationMs: Number(validationDurationNs) / NANOSECOND_TO_MS_DIVISOR,
1801
1802
  inputTextValid: hasText,
1802
1803
  inputAudioPresent: hasAudio,
1803
1804
  inputTextLength: hasText ? options.input.text.length : 0,
@@ -2315,16 +2316,16 @@ export class NeuroLink {
2315
2316
  this.emitter.emit("tool:start", toolName, params);
2316
2317
  // Set default options
2317
2318
  const finalOptions = {
2318
- timeout: options?.timeout || 30000, // 30 second default timeout
2319
- maxRetries: options?.maxRetries || 2, // Default 2 retries for retriable errors
2320
- retryDelayMs: options?.retryDelayMs || 1000, // 1 second delay between retries
2319
+ timeout: options?.timeout || TOOL_TIMEOUTS.EXECUTION_DEFAULT_MS, // 30 second default timeout
2320
+ maxRetries: options?.maxRetries || RETRY_ATTEMPTS.DEFAULT, // Default 2 retries for retriable errors
2321
+ retryDelayMs: options?.retryDelayMs || RETRY_DELAYS.BASE_MS, // 1 second delay between retries
2321
2322
  };
2322
2323
  // Track memory usage for tool execution
2323
2324
  const { MemoryManager } = await import("./utils/performance.js");
2324
2325
  const startMemory = MemoryManager.getMemoryUsageMB();
2325
2326
  // Get or create circuit breaker for this tool
2326
2327
  if (!this.toolCircuitBreakers.has(toolName)) {
2327
- this.toolCircuitBreakers.set(toolName, new CircuitBreaker(5, 60000)); // 5 failures, 1 minute timeout
2328
+ this.toolCircuitBreakers.set(toolName, new CircuitBreaker(CIRCUIT_BREAKER.FAILURE_THRESHOLD, CIRCUIT_BREAKER_RESET_MS));
2328
2329
  }
2329
2330
  const circuitBreaker = this.toolCircuitBreakers.get(toolName);
2330
2331
  // Initialize metrics for this tool if not exists
@@ -2647,10 +2648,10 @@ export class NeuroLink {
2647
2648
  // Check memory usage after tool enumeration
2648
2649
  const endMemory = MemoryManager.getMemoryUsageMB();
2649
2650
  const memoryDelta = endMemory.heapUsed - startMemory.heapUsed;
2650
- if (memoryDelta > 10) {
2651
+ if (memoryDelta > MEMORY_THRESHOLDS.LOW_USAGE_MB) {
2651
2652
  mcpLogger.debug(`🔍 Tool listing used ${memoryDelta}MB memory (large tool registry detected)`);
2652
2653
  // Optimized collection patterns should reduce memory usage significantly
2653
- if (uniqueTools.length > 100) {
2654
+ if (uniqueTools.length > PERFORMANCE_THRESHOLDS.LARGE_TOOL_COLLECTION) {
2654
2655
  mcpLogger.debug("💡 Tool collection optimized for large sets. Memory usage reduced through efficient object reuse.");
2655
2656
  }
2656
2657
  }
@@ -2722,7 +2723,7 @@ export class NeuroLink {
2722
2723
  try {
2723
2724
  const response = await fetch("http://localhost:11434/api/tags", {
2724
2725
  method: "GET",
2725
- signal: AbortSignal.timeout(2000),
2726
+ signal: AbortSignal.timeout(PROVIDER_TIMEOUTS.AUTH_MS),
2726
2727
  });
2727
2728
  if (!response.ok) {
2728
2729
  throw new Error("Ollama service not responding");
@@ -3099,7 +3100,7 @@ export class NeuroLink {
3099
3100
  resetToolCircuitBreaker(toolName) {
3100
3101
  if (this.toolCircuitBreakers.has(toolName)) {
3101
3102
  // Create a new circuit breaker (effectively resets it)
3102
- this.toolCircuitBreakers.set(toolName, new CircuitBreaker(5, 60000));
3103
+ this.toolCircuitBreakers.set(toolName, new CircuitBreaker(CIRCUIT_BREAKER.FAILURE_THRESHOLD, CIRCUIT_BREAKER_RESET_MS));
3103
3104
  mcpLogger.info(`Circuit breaker reset for tool: ${toolName}`);
3104
3105
  }
3105
3106
  }
@@ -157,7 +157,7 @@ export class AmazonBedrockProvider extends BaseProvider {
157
157
  },
158
158
  ],
159
159
  inferenceConfig: {
160
- maxTokens: options.maxTokens || 4096,
160
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
161
161
  temperature: options.temperature || 0.7,
162
162
  },
163
163
  };
@@ -718,7 +718,7 @@ export class AmazonBedrockProvider extends BaseProvider {
718
718
  },
719
719
  ],
720
720
  inferenceConfig: {
721
- maxTokens: options.maxTokens || 4096,
721
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
722
722
  temperature: options.temperature || 0.7,
723
723
  },
724
724
  };
@@ -5,7 +5,7 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
7
  import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
8
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
10
10
  import { buildMessagesArray } from "../utils/messageBuilder.js";
11
11
  import { createProxyFetch } from "../proxy/proxyFetch.js";
@@ -98,7 +98,7 @@ export class AnthropicProvider extends BaseProvider {
98
98
  model: this.model,
99
99
  messages: messages,
100
100
  temperature: options.temperature,
101
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
101
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
102
102
  tools,
103
103
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
104
104
  toolChoice: shouldUseTools ? "auto" : "none",
@@ -111,22 +111,13 @@ export class AnthropicProvider extends BaseProvider {
111
111
  // Full tool support is now available with real streaming
112
112
  const toolCalls = [];
113
113
  const toolResults = [];
114
- const usage = await result.usage;
115
- const finishReason = await result.finishReason;
116
114
  return {
117
115
  stream: transformedStream,
118
116
  provider: this.providerName,
119
117
  model: this.modelName,
120
118
  toolCalls, // ✅ Include tool calls in stream result
121
119
  toolResults, // ✅ Include tool results in stream result
122
- usage: usage
123
- ? {
124
- input: usage.promptTokens || 0,
125
- output: usage.completionTokens || 0,
126
- total: usage.totalTokens || 0,
127
- }
128
- : undefined,
129
- finishReason: finishReason || undefined,
120
+ // Note: omit usage/finishReason to avoid blocking streaming; compute asynchronously if needed.
130
121
  };
131
122
  }
132
123
  catch (error) {
@@ -4,7 +4,6 @@ import { AnthropicModels } from "../types/index.js";
4
4
  import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
8
7
  import { validateApiKey, createAnthropicBaseConfig, } from "../utils/providerConfig.js";
9
8
  /**
10
9
  * Anthropic provider implementation using BaseProvider pattern
@@ -70,7 +69,7 @@ export class AnthropicProviderV2 extends BaseProvider {
70
69
  prompt: options.input.text,
71
70
  system: options.systemPrompt,
72
71
  temperature: options.temperature,
73
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
72
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
74
73
  tools: options.tools,
75
74
  toolChoice: "auto",
76
75
  abortSignal: timeoutController?.controller.signal,
@@ -1,6 +1,6 @@
1
1
  import { type LanguageModelV1 } from "ai";
2
2
  import { BaseProvider } from "../core/baseProvider.js";
3
- import type { AIProviderName } from "../types/index.js";
3
+ import type { AIProviderName } from "../core/types.js";
4
4
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
5
  export declare class AzureOpenAIProvider extends BaseProvider {
6
6
  private apiKey;
@@ -1,10 +1,12 @@
1
1
  import { createAzure } from "@ai-sdk/azure";
2
2
  import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
+ import { APIVersions } from "../core/types.js";
4
5
  import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } from "../utils/providerConfig.js";
5
6
  import { logger } from "../utils/logger.js";
6
7
  import { buildMessagesArray } from "../utils/messageBuilder.js";
7
8
  import { createProxyFetch } from "../proxy/proxyFetch.js";
9
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
8
10
  export class AzureOpenAIProvider extends BaseProvider {
9
11
  apiKey;
10
12
  resourceName;
@@ -18,13 +20,15 @@ export class AzureOpenAIProvider extends BaseProvider {
18
20
  this.resourceName = endpoint
19
21
  .replace("https://", "")
20
22
  .replace(/\/+$/, "") // Remove trailing slashes
21
- .replace(".openai.azure.com", "");
23
+ .replace(".openai.azure.com", "")
24
+ .replace(".cognitiveservices.azure.com", "");
22
25
  this.deployment =
23
26
  modelName ||
27
+ process.env.AZURE_OPENAI_MODEL ||
24
28
  process.env.AZURE_OPENAI_DEPLOYMENT ||
25
29
  process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
26
30
  "gpt-4o";
27
- this.apiVersion = process.env.AZURE_API_VERSION || "2024-10-01-preview";
31
+ this.apiVersion = process.env.AZURE_API_VERSION || APIVersions.AZURE_LATEST;
28
32
  // Configuration validation - now using consolidated utility
29
33
  if (!this.apiKey) {
30
34
  validateApiKey(createAzureAPIKeyConfig());
@@ -33,6 +37,7 @@ export class AzureOpenAIProvider extends BaseProvider {
33
37
  validateApiKey(createAzureEndpointConfig());
34
38
  }
35
39
  // Create the Azure provider instance with proxy support
40
+ // Let the Azure SDK handle all URL construction automatically
36
41
  this.azureProvider = createAzure({
37
42
  resourceName: this.resourceName,
38
43
  apiKey: this.apiKey,
@@ -72,20 +77,57 @@ export class AzureOpenAIProvider extends BaseProvider {
72
77
  // executeGenerate removed - BaseProvider handles all generation with tools
73
78
  async executeStream(options, _analysisSchema) {
74
79
  try {
80
+ // Get ALL available tools (direct + MCP + external from options) - EXACTLY like BaseProvider
81
+ const shouldUseTools = !options.disableTools && this.supportsTools();
82
+ const baseTools = shouldUseTools ? await this.getAllTools() : {};
83
+ const tools = shouldUseTools
84
+ ? {
85
+ ...baseTools,
86
+ ...(options.tools || {}), // Include external tools passed from NeuroLink
87
+ }
88
+ : undefined;
89
+ // DEBUG: Log detailed tool information
90
+ logger.debug("Azure Stream - Tool Loading Debug", {
91
+ shouldUseTools,
92
+ baseToolsProvided: !!baseTools,
93
+ baseToolCount: baseTools ? Object.keys(baseTools).length : 0,
94
+ finalToolCount: tools ? Object.keys(tools).length : 0,
95
+ toolNames: tools ? Object.keys(tools).slice(0, 10) : [],
96
+ disableTools: options.disableTools,
97
+ supportsTools: this.supportsTools(),
98
+ externalToolsCount: options.tools
99
+ ? Object.keys(options.tools).length
100
+ : 0,
101
+ });
102
+ if (tools && Object.keys(tools).length > 0) {
103
+ logger.debug("Azure Stream - First 5 Tools Detail", {
104
+ tools: Object.keys(tools)
105
+ .slice(0, 5)
106
+ .map((name) => ({
107
+ name,
108
+ description: tools[name]?.description?.substring(0, 100),
109
+ })),
110
+ });
111
+ }
75
112
  // Build message array from options
76
113
  const messages = buildMessagesArray(options);
77
114
  const stream = await streamText({
78
115
  model: this.azureProvider(this.deployment),
79
116
  messages: messages,
80
- maxTokens: options.maxTokens || 1000,
81
- temperature: options.temperature || 0.7,
117
+ ...(options.maxTokens !== null && options.maxTokens !== undefined
118
+ ? { maxTokens: options.maxTokens }
119
+ : {}),
120
+ ...(options.temperature !== null && options.temperature !== undefined
121
+ ? { temperature: options.temperature }
122
+ : {}),
123
+ tools,
124
+ toolChoice: shouldUseTools ? "auto" : "none",
125
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
82
126
  });
127
+ // Transform string stream to content object stream using BaseProvider method
128
+ const transformedStream = this.createTextStream(stream);
83
129
  return {
84
- stream: (async function* () {
85
- for await (const chunk of stream.textStream) {
86
- yield { content: chunk };
87
- }
88
- })(),
130
+ stream: transformedStream,
89
131
  provider: "azure",
90
132
  model: this.deployment,
91
133
  metadata: {
@@ -5,12 +5,12 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
7
7
  import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } from "../types/errors.js";
8
- import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
+ import { DEFAULT_MAX_STEPS } from "../core/constants.js";
9
9
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
10
10
  import { buildMessagesArray } from "../utils/messageBuilder.js";
11
11
  // Create Google GenAI client
12
12
  async function createGoogleGenAIClient(apiKey) {
13
- const mod = await import("@google/genai");
13
+ const mod = await import("@google/generative-ai");
14
14
  const ctor = mod.GoogleGenAI;
15
15
  if (!ctor) {
16
16
  throw new Error("@google/genai does not export GoogleGenAI");
@@ -96,7 +96,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
96
96
  model,
97
97
  messages: messages,
98
98
  temperature: options.temperature,
99
- maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
99
+ maxTokens: options.maxTokens, // No default limit - unlimited unless specified
100
100
  tools,
101
101
  maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
102
102
  toolChoice: shouldUseTools ? "auto" : "none",