@juspay/neurolink 7.33.1 → 7.33.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. package/CHANGELOG.md +8 -0
  2. package/dist/constants/index.d.ts +192 -0
  3. package/dist/constants/index.js +195 -0
  4. package/dist/constants/performance.d.ts +366 -0
  5. package/dist/constants/performance.js +389 -0
  6. package/dist/constants/retry.d.ts +224 -0
  7. package/dist/constants/retry.js +266 -0
  8. package/dist/constants/timeouts.d.ts +225 -0
  9. package/dist/constants/timeouts.js +182 -0
  10. package/dist/constants/tokens.d.ts +234 -0
  11. package/dist/constants/tokens.js +314 -0
  12. package/dist/core/types.d.ts +268 -0
  13. package/dist/core/types.js +153 -0
  14. package/dist/lib/constants/index.d.ts +192 -0
  15. package/dist/lib/constants/index.js +195 -0
  16. package/dist/lib/constants/performance.d.ts +366 -0
  17. package/dist/lib/constants/performance.js +389 -0
  18. package/dist/lib/constants/retry.d.ts +224 -0
  19. package/dist/lib/constants/retry.js +266 -0
  20. package/dist/lib/constants/timeouts.d.ts +225 -0
  21. package/dist/lib/constants/timeouts.js +182 -0
  22. package/dist/lib/constants/tokens.d.ts +234 -0
  23. package/dist/lib/constants/tokens.js +314 -0
  24. package/dist/lib/core/types.d.ts +268 -0
  25. package/dist/lib/core/types.js +153 -0
  26. package/dist/lib/mcp/externalServerManager.d.ts +18 -3
  27. package/dist/lib/mcp/externalServerManager.js +125 -3
  28. package/dist/lib/models/modelRegistry.d.ts +1 -1
  29. package/dist/lib/models/modelRegistry.js +63 -37
  30. package/dist/lib/neurolink.d.ts +1 -1
  31. package/dist/lib/neurolink.js +38 -36
  32. package/dist/lib/providers/azureOpenai.d.ts +1 -1
  33. package/dist/lib/providers/azureOpenai.js +2 -1
  34. package/dist/lib/utils/providerConfig.d.ts +25 -0
  35. package/dist/lib/utils/providerConfig.js +24 -3
  36. package/dist/lib/utils/providerHealth.d.ts +1 -1
  37. package/dist/lib/utils/providerHealth.js +40 -33
  38. package/dist/lib/utils/providerSetupMessages.js +7 -6
  39. package/dist/lib/utils/providerUtils.js +16 -24
  40. package/dist/mcp/externalServerManager.d.ts +18 -3
  41. package/dist/mcp/externalServerManager.js +125 -3
  42. package/dist/models/modelRegistry.d.ts +1 -1
  43. package/dist/models/modelRegistry.js +63 -37
  44. package/dist/neurolink.d.ts +1 -1
  45. package/dist/neurolink.js +38 -36
  46. package/dist/providers/azureOpenai.d.ts +1 -1
  47. package/dist/providers/azureOpenai.js +2 -1
  48. package/dist/utils/providerConfig.d.ts +25 -0
  49. package/dist/utils/providerConfig.js +24 -3
  50. package/dist/utils/providerHealth.d.ts +1 -1
  51. package/dist/utils/providerHealth.js +40 -33
  52. package/dist/utils/providerSetupMessages.js +7 -6
  53. package/dist/utils/providerUtils.js +16 -24
  54. package/package.json +1 -1
@@ -3,7 +3,8 @@
3
3
  * Prevents 500 errors by validating provider availability and configuration
4
4
  */
5
5
  import { logger } from "./logger.js";
6
- import { AIProviderName } from "../types/index.js";
6
+ import { AIProviderName, OpenAIModels, GoogleAIModels, AnthropicModels, BedrockModels, } from "../core/types.js";
7
+ import { API_KEY_LENGTHS, PROJECT_ID_FORMAT } from "./providerConfig.js";
7
8
  import { basename } from "path";
8
9
  import { createProxyFetch } from "../proxy/proxyFetch.js";
9
10
  export class ProviderHealthChecker {
@@ -402,17 +403,19 @@ export class ProviderHealthChecker {
402
403
  static validateApiKeyFormat(providerName, apiKey) {
403
404
  switch (providerName) {
404
405
  case AIProviderName.ANTHROPIC:
405
- return apiKey.startsWith("sk-ant-") && apiKey.length > 20;
406
+ return (apiKey.startsWith("sk-ant-") &&
407
+ apiKey.length >= API_KEY_LENGTHS.ANTHROPIC_MIN);
406
408
  case AIProviderName.OPENAI:
407
- return apiKey.startsWith("sk-") && apiKey.length > 20;
409
+ return (apiKey.startsWith("sk-") &&
410
+ apiKey.length >= API_KEY_LENGTHS.OPENAI_MIN);
408
411
  case AIProviderName.GOOGLE_AI:
409
- return apiKey.length > 20; // Basic length check
412
+ return apiKey.length >= API_KEY_LENGTHS.GOOGLE_AI_EXACT; // Basic length check
410
413
  case AIProviderName.VERTEX:
411
414
  return apiKey.endsWith(".json") || apiKey.includes("type"); // JSON key format
412
415
  case AIProviderName.BEDROCK:
413
- return apiKey.length >= 20; // AWS access key length
416
+ return apiKey.length >= API_KEY_LENGTHS.AWS_ACCESS_KEY; // AWS access key length
414
417
  case AIProviderName.AZURE:
415
- return apiKey.length >= 32; // Azure OpenAI API key length
418
+ return apiKey.length >= API_KEY_LENGTHS.AZURE_MIN; // Azure OpenAI API key length
416
419
  case AIProviderName.OLLAMA:
417
420
  return true; // Ollama usually doesn't require specific format
418
421
  default:
@@ -604,14 +607,14 @@ export class ProviderHealthChecker {
604
607
  static checkBedrockModels(healthStatus) {
605
608
  const bedrockModel = process.env.BEDROCK_MODEL || process.env.BEDROCK_MODEL_ID;
606
609
  const supportedModels = [
607
- "anthropic.claude-3-sonnet-20240229-v1:0",
608
- "anthropic.claude-3-haiku-20240307-v1:0",
609
- "anthropic.claude-3-opus-20240229-v1:0",
610
+ BedrockModels.CLAUDE_3_SONNET,
611
+ BedrockModels.CLAUDE_3_HAIKU,
612
+ BedrockModels.CLAUDE_3_5_SONNET,
610
613
  "anthropic.claude-v2:1",
611
614
  "amazon.titan-text-express-v1",
612
615
  ];
613
616
  if (!bedrockModel) {
614
- healthStatus.recommendations.push("Set BEDROCK_MODEL or BEDROCK_MODEL_ID for faster startup (e.g., anthropic.claude-3-sonnet-20240229-v1:0)");
617
+ healthStatus.recommendations.push(`Set BEDROCK_MODEL or BEDROCK_MODEL_ID for faster startup (e.g., ${BedrockModels.CLAUDE_3_SONNET})`);
615
618
  }
616
619
  else if (!supportedModels.includes(bedrockModel)) {
617
620
  healthStatus.recommendations.push(`Consider using a popular Bedrock model: ${supportedModels.slice(0, 3).join(", ")}`);
@@ -658,39 +661,44 @@ export class ProviderHealthChecker {
658
661
  switch (providerName) {
659
662
  case AIProviderName.ANTHROPIC:
660
663
  return [
661
- "claude-3-5-sonnet-20241022",
662
- "claude-3-haiku-20240307",
663
- "claude-3-opus-20240229",
664
+ AnthropicModels.CLAUDE_3_5_SONNET,
665
+ AnthropicModels.CLAUDE_3_HAIKU,
666
+ AnthropicModels.CLAUDE_3_OPUS,
664
667
  ];
665
668
  case AIProviderName.OPENAI:
666
- return ["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo"];
669
+ return [
670
+ OpenAIModels.GPT_4O,
671
+ OpenAIModels.GPT_4O_MINI,
672
+ OpenAIModels.GPT_3_5_TURBO,
673
+ ];
667
674
  case AIProviderName.GOOGLE_AI:
668
- return ["gemini-1.5-pro", "gemini-1.5-flash", "gemini-pro"];
675
+ return [
676
+ GoogleAIModels.GEMINI_1_5_PRO,
677
+ GoogleAIModels.GEMINI_1_5_FLASH,
678
+ GoogleAIModels.GEMINI_2_5_PRO,
679
+ ];
669
680
  case AIProviderName.VERTEX:
670
681
  return [
671
682
  // Google models (via vertex provider)
672
- "gemini-2.5-pro",
673
- "gemini-2.5-flash",
674
- "gemini-2.5-flash-lite",
675
- "gemini-2.0-flash-001",
676
- "gemini-1.5-pro",
677
- "gemini-1.5-flash",
683
+ GoogleAIModels.GEMINI_2_5_PRO,
684
+ GoogleAIModels.GEMINI_2_5_FLASH,
685
+ GoogleAIModels.GEMINI_2_5_FLASH_LITE,
686
+ GoogleAIModels.GEMINI_2_0_FLASH_001,
687
+ GoogleAIModels.GEMINI_1_5_PRO,
688
+ GoogleAIModels.GEMINI_1_5_FLASH,
678
689
  // Anthropic models (via vertexAnthropic provider)
679
690
  "claude-sonnet-4@20250514",
680
691
  "claude-opus-4@20250514",
681
- "claude-3-5-sonnet-20241022",
682
- "claude-3-5-haiku-20241022",
683
- "claude-3-sonnet-20240229",
684
- "claude-3-haiku-20240307",
685
- "claude-3-opus-20240229",
692
+ AnthropicModels.CLAUDE_3_5_SONNET,
693
+ AnthropicModels.CLAUDE_3_5_HAIKU,
694
+ AnthropicModels.CLAUDE_3_SONNET,
695
+ AnthropicModels.CLAUDE_3_HAIKU,
696
+ AnthropicModels.CLAUDE_3_OPUS,
686
697
  ];
687
698
  case AIProviderName.BEDROCK:
688
- return [
689
- "anthropic.claude-3-sonnet-20240229-v1:0",
690
- "anthropic.claude-3-haiku-20240307-v1:0",
691
- ];
699
+ return [BedrockModels.CLAUDE_3_SONNET, BedrockModels.CLAUDE_3_HAIKU];
692
700
  case AIProviderName.AZURE:
693
- return ["gpt-4o", "gpt-4o-mini", "gpt-35-turbo"];
701
+ return [OpenAIModels.GPT_4O, OpenAIModels.GPT_4O_MINI, "gpt-35-turbo"];
694
702
  case AIProviderName.OLLAMA:
695
703
  return ["llama3.2:latest", "llama3.1:latest", "mistral:latest"];
696
704
  default:
@@ -951,8 +959,7 @@ export class ProviderHealthChecker {
951
959
  if (projectId) {
952
960
  result.projectId = projectId;
953
961
  // Validate project ID format
954
- const projectIdPattern = /^[a-z][a-z0-9-]{4,28}[a-z0-9]$/;
955
- if (projectIdPattern.test(projectId)) {
962
+ if (PROJECT_ID_FORMAT.PATTERN.test(projectId)) {
956
963
  result.isValid = true;
957
964
  }
958
965
  else {
@@ -2,6 +2,7 @@
2
2
  * Enhanced Provider Setup Messages
3
3
  * Provides detailed setup instructions for AI providers
4
4
  */
5
+ import { OpenAIModels, GoogleAIModels, AnthropicModels, APIVersions, } from "../core/types.js";
5
6
  /**
6
7
  * Generate enhanced error message with setup instructions
7
8
  */
@@ -12,7 +13,7 @@ export function getProviderSetupMessage(provider, missingVars) {
12
13
  envVars: [
13
14
  'OPENAI_API_KEY="sk-proj-your-openai-api-key"',
14
15
  "# Optional:",
15
- 'OPENAI_MODEL="gpt-4o"',
16
+ `OPENAI_MODEL="${OpenAIModels.GPT_4O}"`,
16
17
  'OPENAI_BASE_URL="https://api.openai.com"',
17
18
  ],
18
19
  },
@@ -21,7 +22,7 @@ export function getProviderSetupMessage(provider, missingVars) {
21
22
  envVars: [
22
23
  'ANTHROPIC_API_KEY="sk-ant-api03-your-anthropic-key"',
23
24
  "# Optional:",
24
- 'ANTHROPIC_MODEL="claude-3-5-sonnet-20241022"',
25
+ `ANTHROPIC_MODEL="${AnthropicModels.CLAUDE_3_5_SONNET}"`,
25
26
  ],
26
27
  },
27
28
  "google-ai": {
@@ -29,7 +30,7 @@ export function getProviderSetupMessage(provider, missingVars) {
29
30
  envVars: [
30
31
  'GOOGLE_AI_API_KEY="AIza-your-google-ai-api-key"',
31
32
  "# Optional:",
32
- 'GOOGLE_AI_MODEL="gemini-2.5-pro"',
33
+ `GOOGLE_AI_MODEL="${GoogleAIModels.GEMINI_2_5_PRO}"`,
33
34
  ],
34
35
  },
35
36
  vertex: {
@@ -39,7 +40,7 @@ export function getProviderSetupMessage(provider, missingVars) {
39
40
  'GOOGLE_VERTEX_PROJECT="your-gcp-project-id"',
40
41
  'GOOGLE_VERTEX_LOCATION="us-central1"',
41
42
  "# Optional:",
42
- 'VERTEX_MODEL="gemini-2.5-pro"',
43
+ `VERTEX_MODEL="${GoogleAIModels.GEMINI_2_5_PRO}"`,
43
44
  ],
44
45
  },
45
46
  bedrock: {
@@ -61,8 +62,8 @@ export function getProviderSetupMessage(provider, missingVars) {
61
62
  'AZURE_OPENAI_ENDPOINT="https://your-resource.openai.azure.com/"',
62
63
  'AZURE_OPENAI_DEPLOYMENT_ID="your-deployment-name"',
63
64
  "# Optional:",
64
- 'AZURE_MODEL="gpt-4o"',
65
- 'AZURE_API_VERSION="2024-02-15-preview"',
65
+ `AZURE_MODEL="${OpenAIModels.GPT_4O}"`,
66
+ `AZURE_API_VERSION="${APIVersions.AZURE_STABLE}"`,
66
67
  ],
67
68
  },
68
69
  huggingface: {
@@ -5,6 +5,7 @@
5
5
  import { AIProviderFactory } from "../core/factory.js";
6
6
  import { logger } from "./logger.js";
7
7
  import { ProviderHealthChecker } from "./providerHealth.js";
8
+ import { API_KEY_FORMATS, API_KEY_LENGTHS, PROJECT_ID_FORMAT, } from "./providerConfig.js";
8
9
  /**
9
10
  * Get the best available provider based on real-time availability checks
10
11
  * Enhanced version consolidated from providerUtils-fixed.ts
@@ -123,15 +124,6 @@ async function isProviderAvailable(providerName) {
123
124
  return false;
124
125
  }
125
126
  }
126
- /**
127
- * Google Cloud Project ID validation regex
128
- * Format requirements:
129
- * - Must start with a lowercase letter
130
- * - Can contain lowercase letters, numbers, and hyphens
131
- * - Must end with a lowercase letter or number
132
- * - Total length must be 6-30 characters
133
- */
134
- const GOOGLE_CLOUD_PROJECT_ID_REGEX = /^[a-z][a-z0-9-]{4,28}[a-z0-9]$/;
135
127
  /**
136
128
  * Validate environment variable values for a provider
137
129
  * Addresses GitHub Copilot comment about adding environment variable validation
@@ -166,7 +158,7 @@ export function validateProviderEnvVars(provider) {
166
158
  validateAnthropicCredentials(result);
167
159
  break;
168
160
  case "azure":
169
- case "azureOpenai":
161
+ case "azureopenai":
170
162
  validateAzureCredentials(result);
171
163
  break;
172
164
  case "google-ai":
@@ -240,7 +232,7 @@ function validateVertexCredentials(result) {
240
232
  if (!projectId) {
241
233
  result.missingVars.push("GOOGLE_CLOUD_PROJECT_ID (or variant)");
242
234
  }
243
- else if (!GOOGLE_CLOUD_PROJECT_ID_REGEX.test(projectId)) {
235
+ else if (!PROJECT_ID_FORMAT.PATTERN.test(projectId)) {
244
236
  result.invalidVars.push("Project ID format invalid (must be 6-30 lowercase letters, digits, hyphens)");
245
237
  }
246
238
  if (!hasCredentials) {
@@ -259,8 +251,8 @@ function validateOpenAICredentials(result) {
259
251
  if (!apiKey) {
260
252
  result.missingVars.push("OPENAI_API_KEY");
261
253
  }
262
- else if (!/^sk-[A-Za-z0-9]{48,}$/.test(apiKey)) {
263
- result.invalidVars.push("OPENAI_API_KEY (should start with 'sk-' followed by 48+ characters)");
254
+ else if (!API_KEY_FORMATS.openai.test(apiKey)) {
255
+ result.invalidVars.push(`OPENAI_API_KEY (should start with 'sk-' followed by ${API_KEY_LENGTHS.OPENAI_MIN}+ characters)`);
264
256
  }
265
257
  }
266
258
  /**
@@ -271,8 +263,8 @@ function validateAnthropicCredentials(result) {
271
263
  if (!apiKey) {
272
264
  result.missingVars.push("ANTHROPIC_API_KEY");
273
265
  }
274
- else if (!/^sk-ant-[A-Za-z0-9-_]{95,}$/.test(apiKey)) {
275
- result.invalidVars.push("ANTHROPIC_API_KEY (should start with 'sk-ant-' followed by 95+ characters)");
266
+ else if (!API_KEY_FORMATS.anthropic.test(apiKey)) {
267
+ result.invalidVars.push(`ANTHROPIC_API_KEY (should start with 'sk-ant-' followed by ${API_KEY_LENGTHS.ANTHROPIC_MIN}+ characters)`);
276
268
  }
277
269
  }
278
270
  /**
@@ -284,8 +276,8 @@ function validateAzureCredentials(result) {
284
276
  if (!apiKey) {
285
277
  result.missingVars.push("AZURE_OPENAI_API_KEY");
286
278
  }
287
- else if (!/^[a-f0-9]{32}$/.test(apiKey)) {
288
- result.invalidVars.push("AZURE_OPENAI_API_KEY (should be 32 hexadecimal characters)");
279
+ else if (!API_KEY_FORMATS.azure.test(apiKey)) {
280
+ result.invalidVars.push(`AZURE_OPENAI_API_KEY (should be at least ${API_KEY_LENGTHS.AZURE_MIN} alphanumeric characters)`);
289
281
  }
290
282
  if (!endpoint) {
291
283
  result.missingVars.push("AZURE_OPENAI_ENDPOINT");
@@ -302,8 +294,8 @@ function validateGoogleAICredentials(result) {
302
294
  if (!apiKey) {
303
295
  result.missingVars.push("GOOGLE_AI_API_KEY (or GOOGLE_GENERATIVE_AI_API_KEY)");
304
296
  }
305
- else if (!/^[A-Za-z0-9_-]{39}$/.test(apiKey)) {
306
- result.invalidVars.push("GOOGLE_AI_API_KEY (should be 39 alphanumeric characters with dashes/underscores)");
297
+ else if (!API_KEY_FORMATS["google-ai"].test(apiKey)) {
298
+ result.invalidVars.push(`GOOGLE_AI_API_KEY (should be ${API_KEY_LENGTHS.GOOGLE_AI_EXACT} alphanumeric characters with dashes/underscores)`);
307
299
  }
308
300
  }
309
301
  /**
@@ -314,8 +306,8 @@ function validateHuggingFaceCredentials(result) {
314
306
  if (!apiKey) {
315
307
  result.missingVars.push("HUGGINGFACE_API_KEY (or HF_TOKEN)");
316
308
  }
317
- else if (!/^hf_[A-Za-z0-9]{37}$/.test(apiKey)) {
318
- result.invalidVars.push("HUGGINGFACE_API_KEY (should start with 'hf_' followed by 37 characters)");
309
+ else if (!API_KEY_FORMATS.huggingface.test(apiKey)) {
310
+ result.invalidVars.push(`HUGGINGFACE_API_KEY (should start with 'hf_' followed by ${API_KEY_LENGTHS.HUGGINGFACE_EXACT} characters)`);
319
311
  }
320
312
  }
321
313
  /**
@@ -326,8 +318,8 @@ function validateMistralCredentials(result) {
326
318
  if (!apiKey) {
327
319
  result.missingVars.push("MISTRAL_API_KEY");
328
320
  }
329
- else if (!/^[A-Za-z0-9]{32,}$/.test(apiKey)) {
330
- result.invalidVars.push("MISTRAL_API_KEY (should be 32+ alphanumeric characters)");
321
+ else if (!API_KEY_FORMATS.mistral.test(apiKey)) {
322
+ result.invalidVars.push(`MISTRAL_API_KEY (should be ${API_KEY_LENGTHS.MISTRAL_EXACT} alphanumeric characters)`);
331
323
  }
332
324
  }
333
325
  /**
@@ -381,7 +373,7 @@ export function hasProviderEnvVars(provider) {
381
373
  case "claude":
382
374
  return !!process.env.ANTHROPIC_API_KEY;
383
375
  case "azure":
384
- case "azureOpenai":
376
+ case "azureopenai":
385
377
  return !!process.env.AZURE_OPENAI_API_KEY;
386
378
  case "google-ai":
387
379
  case "google-studio":
@@ -22,12 +22,27 @@ export declare class ExternalServerManager extends EventEmitter {
22
22
  enableMainRegistryIntegration?: boolean;
23
23
  });
24
24
  /**
25
- * Load MCP server configurations from .mcp-config.json file
25
+ * Load MCP server configurations from .mcp-config.json file with parallel loading support
26
26
  * Automatically registers servers found in the configuration
27
27
  * @param configPath Optional path to config file (defaults to .mcp-config.json in cwd)
28
- * @returns Promise resolving to number of servers loaded
28
+ * @param options Loading options including parallel support
29
+ * @returns Promise resolving to { serversLoaded, errors }
29
30
  */
30
- loadMCPConfiguration(configPath?: string): Promise<ServerLoadResult>;
31
+ loadMCPConfiguration(configPath?: string, options?: {
32
+ parallel?: boolean;
33
+ }): Promise<ServerLoadResult>;
34
+ /**
35
+ * Load MCP servers in parallel for improved performance
36
+ * @param configPath Optional path to config file (defaults to .mcp-config.json in cwd)
37
+ * @returns Promise resolving to batch operation result
38
+ */
39
+ loadMCPConfigurationParallel(configPath?: string | null): Promise<ServerLoadResult>;
40
+ /**
41
+ * Load MCP servers sequentially (original implementation for backward compatibility)
42
+ * @param configPath Optional path to config file (defaults to .mcp-config.json in cwd)
43
+ * @returns Promise resolving to batch operation result
44
+ */
45
+ loadMCPConfigurationSequential(configPath?: string): Promise<ServerLoadResult>;
31
46
  /**
32
47
  * Validate external MCP server configuration
33
48
  */
@@ -107,12 +107,134 @@ export class ExternalServerManager extends EventEmitter {
107
107
  process.on("beforeExit", () => this.shutdown());
108
108
  }
109
109
  /**
110
- * Load MCP server configurations from .mcp-config.json file
110
+ * Load MCP server configurations from .mcp-config.json file with parallel loading support
111
111
  * Automatically registers servers found in the configuration
112
112
  * @param configPath Optional path to config file (defaults to .mcp-config.json in cwd)
113
- * @returns Promise resolving to number of servers loaded
113
+ * @param options Loading options including parallel support
114
+ * @returns Promise resolving to { serversLoaded, errors }
114
115
  */
115
- async loadMCPConfiguration(configPath) {
116
+ async loadMCPConfiguration(configPath, options = {}) {
117
+ if (options.parallel) {
118
+ return this.loadMCPConfigurationParallel(configPath);
119
+ }
120
+ return this.loadMCPConfigurationSequential(configPath);
121
+ }
122
+ /**
123
+ * Load MCP servers in parallel for improved performance
124
+ * @param configPath Optional path to config file (defaults to .mcp-config.json in cwd)
125
+ * @returns Promise resolving to batch operation result
126
+ */
127
+ async loadMCPConfigurationParallel(configPath) {
128
+ const fs = await import("fs");
129
+ const path = await import("path");
130
+ const finalConfigPath = configPath || path.join(process.cwd(), ".mcp-config.json");
131
+ if (!fs.existsSync(finalConfigPath)) {
132
+ mcpLogger.debug(`[ExternalServerManager] No MCP config found at ${finalConfigPath}`);
133
+ return { serversLoaded: 0, errors: [] };
134
+ }
135
+ mcpLogger.debug(`[ExternalServerManager] Loading MCP configuration in PARALLEL mode from ${finalConfigPath}`);
136
+ try {
137
+ const configContent = fs.readFileSync(finalConfigPath, "utf8");
138
+ const config = JSON.parse(configContent);
139
+ if (!config.mcpServers || typeof config.mcpServers !== "object") {
140
+ mcpLogger.debug("[ExternalServerManager] No mcpServers found in configuration");
141
+ return { serversLoaded: 0, errors: [] };
142
+ }
143
+ // Create promises for all servers to start them concurrently
144
+ const serverPromises = Object.entries(config.mcpServers).map(async ([serverId, serverConfig]) => {
145
+ try {
146
+ // Validate and convert config format to MCPServerInfo
147
+ if (!isValidExternalMCPServerConfig(serverConfig)) {
148
+ throw new Error(`Invalid server config for ${serverId}: missing required properties or wrong types`);
149
+ }
150
+ const externalConfig = {
151
+ id: serverId,
152
+ name: serverId,
153
+ description: `External MCP server: ${serverId}`,
154
+ transport: typeof serverConfig.transport === "string"
155
+ ? serverConfig.transport
156
+ : "stdio",
157
+ status: "initializing",
158
+ tools: [],
159
+ command: serverConfig.command,
160
+ args: Array.isArray(serverConfig.args)
161
+ ? serverConfig.args
162
+ : [],
163
+ env: isNonNullObject(serverConfig.env)
164
+ ? serverConfig.env
165
+ : {},
166
+ timeout: typeof serverConfig.timeout === "number"
167
+ ? serverConfig.timeout
168
+ : undefined,
169
+ retries: typeof serverConfig.retries === "number"
170
+ ? serverConfig.retries
171
+ : undefined,
172
+ healthCheckInterval: typeof serverConfig.healthCheckInterval === "number"
173
+ ? serverConfig.healthCheckInterval
174
+ : undefined,
175
+ autoRestart: typeof serverConfig.autoRestart === "boolean"
176
+ ? serverConfig.autoRestart
177
+ : undefined,
178
+ cwd: typeof serverConfig.cwd === "string"
179
+ ? serverConfig.cwd
180
+ : undefined,
181
+ url: typeof serverConfig.url === "string"
182
+ ? serverConfig.url
183
+ : undefined,
184
+ metadata: safeMetadataConversion(serverConfig.metadata),
185
+ };
186
+ const result = await this.addServer(serverId, externalConfig);
187
+ return { serverId, result };
188
+ }
189
+ catch (error) {
190
+ const errorMsg = `Failed to load MCP server ${serverId}: ${error instanceof Error ? error.message : String(error)}`;
191
+ mcpLogger.warn(`[ExternalServerManager] ${errorMsg}`);
192
+ return { serverId, error: errorMsg };
193
+ }
194
+ });
195
+ // Start all servers concurrently and wait for completion
196
+ const results = await Promise.allSettled(serverPromises);
197
+ // Process results to count successes and collect errors
198
+ let serversLoaded = 0;
199
+ const errors = [];
200
+ for (const result of results) {
201
+ if (result.status === "fulfilled") {
202
+ const { serverId, result: serverResult, error } = result.value;
203
+ if (serverResult && serverResult.success) {
204
+ serversLoaded++;
205
+ mcpLogger.debug(`[ExternalServerManager] Successfully loaded MCP server in parallel: ${serverId}`);
206
+ }
207
+ else if (error) {
208
+ errors.push(error);
209
+ }
210
+ else if (serverResult && !serverResult.success) {
211
+ const errorMsg = `Failed to load server ${serverId}: ${serverResult.error}`;
212
+ errors.push(errorMsg);
213
+ mcpLogger.warn(`[ExternalServerManager] ${errorMsg}`);
214
+ }
215
+ }
216
+ else {
217
+ // Promise.allSettled rejected - this shouldn't happen with our error handling
218
+ const errorMsg = `Unexpected error during parallel loading: ${result.reason}`;
219
+ errors.push(errorMsg);
220
+ mcpLogger.error(`[ExternalServerManager] ${errorMsg}`);
221
+ }
222
+ }
223
+ mcpLogger.info(`[ExternalServerManager] PARALLEL MCP configuration loading complete: ${serversLoaded} servers loaded, ${errors.length} errors`);
224
+ return { serversLoaded, errors };
225
+ }
226
+ catch (error) {
227
+ const errorMsg = `Failed to load MCP configuration in parallel mode: ${error instanceof Error ? error.message : String(error)}`;
228
+ mcpLogger.error(`[ExternalServerManager] ${errorMsg}`);
229
+ return { serversLoaded: 0, errors: [errorMsg] };
230
+ }
231
+ }
232
+ /**
233
+ * Load MCP servers sequentially (original implementation for backward compatibility)
234
+ * @param configPath Optional path to config file (defaults to .mcp-config.json in cwd)
235
+ * @returns Promise resolving to batch operation result
236
+ */
237
+ async loadMCPConfigurationSequential(configPath) {
116
238
  const fs = await import("fs");
117
239
  const path = await import("path");
118
240
  const finalConfigPath = configPath || path.join(process.cwd(), ".mcp-config.json");
@@ -3,7 +3,7 @@
3
3
  * Provides centralized model data for models command system
4
4
  * Part of Phase 4.1 - Models Command System
5
5
  */
6
- import { AIProviderName } from "../types/index.js";
6
+ import { AIProviderName } from "../core/types.js";
7
7
  import type { JsonValue } from "../types/common.js";
8
8
  /**
9
9
  * Model capabilities interface
@@ -3,14 +3,14 @@
3
3
  * Provides centralized model data for models command system
4
4
  * Part of Phase 4.1 - Models Command System
5
5
  */
6
- import { AIProviderName } from "../types/index.js";
6
+ import { AIProviderName, OpenAIModels, GoogleAIModels, AnthropicModels, DEFAULT_MODEL_ALIASES, } from "../core/types.js";
7
7
  /**
8
8
  * Comprehensive model registry
9
9
  */
10
10
  export const MODEL_REGISTRY = {
11
11
  // OpenAI Models
12
- "gpt-4o": {
13
- id: "gpt-4o",
12
+ [OpenAIModels.GPT_4O]: {
13
+ id: OpenAIModels.GPT_4O,
14
14
  name: "GPT-4 Omni",
15
15
  provider: AIProviderName.OPENAI,
16
16
  description: "Most capable OpenAI model with vision and advanced reasoning",
@@ -53,8 +53,8 @@ export const MODEL_REGISTRY = {
53
53
  releaseDate: "2024-05-13",
54
54
  category: "general",
55
55
  },
56
- "gpt-4o-mini": {
57
- id: "gpt-4o-mini",
56
+ [OpenAIModels.GPT_4O_MINI]: {
57
+ id: OpenAIModels.GPT_4O_MINI,
58
58
  name: "GPT-4 Omni Mini",
59
59
  provider: AIProviderName.OPENAI,
60
60
  description: "Fast and cost-effective model with strong performance",
@@ -98,8 +98,8 @@ export const MODEL_REGISTRY = {
98
98
  category: "general",
99
99
  },
100
100
  // Google AI Studio Models
101
- "gemini-2.5-pro": {
102
- id: "gemini-2.5-pro",
101
+ [GoogleAIModels.GEMINI_2_5_PRO]: {
102
+ id: GoogleAIModels.GEMINI_2_5_PRO,
103
103
  name: "Gemini 2.5 Pro",
104
104
  provider: AIProviderName.GOOGLE_AI,
105
105
  description: "Google's most capable multimodal model with large context window",
@@ -142,8 +142,8 @@ export const MODEL_REGISTRY = {
142
142
  releaseDate: "2024-12-11",
143
143
  category: "reasoning",
144
144
  },
145
- "gemini-2.5-flash": {
146
- id: "gemini-2.5-flash",
145
+ [GoogleAIModels.GEMINI_2_5_FLASH]: {
146
+ id: GoogleAIModels.GEMINI_2_5_FLASH,
147
147
  name: "Gemini 2.5 Flash",
148
148
  provider: AIProviderName.GOOGLE_AI,
149
149
  description: "Fast and efficient multimodal model with large context",
@@ -187,8 +187,8 @@ export const MODEL_REGISTRY = {
187
187
  category: "general",
188
188
  },
189
189
  // Anthropic Models
190
- "claude-3-5-sonnet-20241022": {
191
- id: "claude-3-5-sonnet-20241022",
190
+ [AnthropicModels.CLAUDE_3_5_SONNET]: {
191
+ id: AnthropicModels.CLAUDE_3_5_SONNET,
192
192
  name: "Claude 3.5 Sonnet",
193
193
  provider: AIProviderName.ANTHROPIC,
194
194
  description: "Anthropic's most capable model with excellent reasoning and coding",
@@ -236,8 +236,8 @@ export const MODEL_REGISTRY = {
236
236
  releaseDate: "2024-10-22",
237
237
  category: "coding",
238
238
  },
239
- "claude-3-5-haiku-20241022": {
240
- id: "claude-3-5-haiku-20241022",
239
+ [AnthropicModels.CLAUDE_3_5_HAIKU]: {
240
+ id: AnthropicModels.CLAUDE_3_5_HAIKU,
241
241
  name: "Claude 3.5 Haiku",
242
242
  provider: AIProviderName.ANTHROPIC,
243
243
  description: "Fast and efficient Claude model for quick tasks",
@@ -380,39 +380,65 @@ Object.values(MODEL_REGISTRY).forEach((model) => {
380
380
  MODEL_ALIASES[alias.toLowerCase()] = model.id;
381
381
  });
382
382
  });
383
- // Add common aliases
384
- Object.assign(MODEL_ALIASES, {
385
- latest: "gpt-4o", // Default latest model
386
- fastest: "gpt-4o-mini",
387
- cheapest: "gemini-2.5-flash",
388
- "best-coding": "claude-3-5-sonnet-20241022",
389
- "best-analysis": "gemini-2.5-pro",
390
- "best-creative": "claude-3-5-sonnet-20241022",
391
- "best-value": "gemini-2.5-flash",
392
- local: "llama3.2:latest",
383
+ // Pull canonical alias recommendations from core/types
384
+ Object.entries(DEFAULT_MODEL_ALIASES).forEach(([k, v]) => {
385
+ MODEL_ALIASES[k.toLowerCase().replace(/_/g, "-")] = v;
393
386
  });
387
+ MODEL_ALIASES.local = "llama3.2:latest";
394
388
  /**
395
389
  * Use case to model mappings
396
390
  */
397
391
  export const USE_CASE_RECOMMENDATIONS = {
398
- coding: ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
399
- creative: ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
400
- analysis: ["gemini-2.5-pro", "claude-3-5-sonnet-20241022", "gpt-4o"],
392
+ coding: [
393
+ AnthropicModels.CLAUDE_3_5_SONNET,
394
+ OpenAIModels.GPT_4O,
395
+ GoogleAIModels.GEMINI_2_5_PRO,
396
+ ],
397
+ creative: [
398
+ AnthropicModels.CLAUDE_3_5_SONNET,
399
+ OpenAIModels.GPT_4O,
400
+ GoogleAIModels.GEMINI_2_5_PRO,
401
+ ],
402
+ analysis: [
403
+ GoogleAIModels.GEMINI_2_5_PRO,
404
+ AnthropicModels.CLAUDE_3_5_SONNET,
405
+ OpenAIModels.GPT_4O,
406
+ ],
401
407
  conversation: [
402
- "gpt-4o",
403
- "claude-3-5-sonnet-20241022",
404
- "claude-3-5-haiku-20241022",
408
+ OpenAIModels.GPT_4O,
409
+ AnthropicModels.CLAUDE_3_5_SONNET,
410
+ AnthropicModels.CLAUDE_3_5_HAIKU,
411
+ ],
412
+ reasoning: [
413
+ AnthropicModels.CLAUDE_3_5_SONNET,
414
+ GoogleAIModels.GEMINI_2_5_PRO,
415
+ OpenAIModels.GPT_4O,
416
+ ],
417
+ translation: [
418
+ GoogleAIModels.GEMINI_2_5_PRO,
419
+ OpenAIModels.GPT_4O,
420
+ AnthropicModels.CLAUDE_3_5_HAIKU,
405
421
  ],
406
- reasoning: ["claude-3-5-sonnet-20241022", "gemini-2.5-pro", "gpt-4o"],
407
- translation: ["gemini-2.5-pro", "gpt-4o", "claude-3-5-haiku-20241022"],
408
422
  summarization: [
409
- "gemini-2.5-flash",
410
- "gpt-4o-mini",
411
- "claude-3-5-haiku-20241022",
423
+ GoogleAIModels.GEMINI_2_5_FLASH,
424
+ OpenAIModels.GPT_4O_MINI,
425
+ AnthropicModels.CLAUDE_3_5_HAIKU,
426
+ ],
427
+ "cost-effective": [
428
+ GoogleAIModels.GEMINI_2_5_FLASH,
429
+ OpenAIModels.GPT_4O_MINI,
430
+ "mistral-small-latest",
431
+ ],
432
+ "high-quality": [
433
+ AnthropicModels.CLAUDE_3_5_SONNET,
434
+ OpenAIModels.GPT_4O,
435
+ GoogleAIModels.GEMINI_2_5_PRO,
436
+ ],
437
+ fast: [
438
+ OpenAIModels.GPT_4O_MINI,
439
+ GoogleAIModels.GEMINI_2_5_FLASH,
440
+ AnthropicModels.CLAUDE_3_5_HAIKU,
412
441
  ],
413
- "cost-effective": ["gemini-2.5-flash", "gpt-4o-mini", "mistral-small-latest"],
414
- "high-quality": ["claude-3-5-sonnet-20241022", "gpt-4o", "gemini-2.5-pro"],
415
- fast: ["gpt-4o-mini", "gemini-2.5-flash", "claude-3-5-haiku-20241022"],
416
442
  };
417
443
  /**
418
444
  * Get all models
@@ -146,7 +146,7 @@ export declare class NeuroLink {
146
146
  */
147
147
  private registerDirectToolsServerInternal;
148
148
  /**
149
- * Load MCP configuration from .mcp-config.json
149
+ * Load MCP configuration from .mcp-config.json with parallel loading for improved performance
150
150
  */
151
151
  private loadMCPConfigurationInternal;
152
152
  /**