@juspay/neurolink 7.0.0 → 7.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/CHANGELOG.md +15 -4
  2. package/README.md +16 -11
  3. package/dist/cli/commands/config.d.ts +2 -2
  4. package/dist/cli/commands/config.js +22 -21
  5. package/dist/cli/commands/mcp.d.ts +79 -0
  6. package/dist/cli/commands/mcp.js +916 -0
  7. package/dist/cli/commands/models.d.ts +63 -0
  8. package/dist/cli/commands/models.js +653 -0
  9. package/dist/cli/commands/ollama.js +56 -55
  10. package/dist/cli/factories/commandFactory.d.ts +67 -2
  11. package/dist/cli/factories/commandFactory.js +840 -92
  12. package/dist/cli/index.d.ts +6 -0
  13. package/dist/cli/index.js +42 -999
  14. package/dist/cli/utils/completeSetup.js +9 -8
  15. package/dist/cli/utils/envManager.js +7 -6
  16. package/dist/cli/utils/interactiveSetup.js +20 -19
  17. package/dist/core/analytics.js +25 -38
  18. package/dist/core/baseProvider.d.ts +8 -0
  19. package/dist/core/baseProvider.js +177 -68
  20. package/dist/core/constants.d.ts +11 -0
  21. package/dist/core/constants.js +17 -0
  22. package/dist/core/evaluation.js +25 -14
  23. package/dist/core/factory.js +21 -18
  24. package/dist/core/streamAnalytics.d.ts +65 -0
  25. package/dist/core/streamAnalytics.js +125 -0
  26. package/dist/factories/providerRegistry.js +3 -1
  27. package/dist/lib/core/analytics.js +25 -38
  28. package/dist/lib/core/baseProvider.d.ts +8 -0
  29. package/dist/lib/core/baseProvider.js +177 -68
  30. package/dist/lib/core/constants.d.ts +11 -0
  31. package/dist/lib/core/constants.js +17 -0
  32. package/dist/lib/core/evaluation.js +25 -14
  33. package/dist/lib/core/factory.js +22 -18
  34. package/dist/lib/core/streamAnalytics.d.ts +65 -0
  35. package/dist/lib/core/streamAnalytics.js +125 -0
  36. package/dist/lib/factories/providerRegistry.js +3 -1
  37. package/dist/lib/mcp/toolRegistry.d.ts +5 -0
  38. package/dist/lib/mcp/toolRegistry.js +60 -0
  39. package/dist/lib/models/modelRegistry.d.ts +132 -0
  40. package/dist/lib/models/modelRegistry.js +483 -0
  41. package/dist/lib/models/modelResolver.d.ts +115 -0
  42. package/dist/lib/models/modelResolver.js +467 -0
  43. package/dist/lib/neurolink.d.ts +4 -1
  44. package/dist/lib/neurolink.js +108 -69
  45. package/dist/lib/providers/anthropic.js +3 -0
  46. package/dist/lib/providers/googleAiStudio.js +13 -0
  47. package/dist/lib/providers/huggingFace.js +15 -3
  48. package/dist/lib/providers/mistral.js +19 -7
  49. package/dist/lib/providers/ollama.js +31 -7
  50. package/dist/lib/providers/openAI.js +12 -0
  51. package/dist/lib/sdk/toolRegistration.js +17 -0
  52. package/dist/lib/types/cli.d.ts +56 -1
  53. package/dist/lib/types/contextTypes.d.ts +110 -0
  54. package/dist/lib/types/contextTypes.js +176 -0
  55. package/dist/lib/types/index.d.ts +4 -1
  56. package/dist/lib/types/mcpTypes.d.ts +118 -7
  57. package/dist/lib/types/providers.d.ts +81 -0
  58. package/dist/lib/types/streamTypes.d.ts +44 -7
  59. package/dist/lib/types/tools.d.ts +9 -0
  60. package/dist/lib/types/universalProviderOptions.d.ts +3 -1
  61. package/dist/lib/types/universalProviderOptions.js +2 -1
  62. package/dist/lib/utils/logger.d.ts +7 -0
  63. package/dist/lib/utils/logger.js +16 -6
  64. package/dist/lib/utils/performance.d.ts +105 -0
  65. package/dist/lib/utils/performance.js +210 -0
  66. package/dist/lib/utils/providerUtils.js +9 -2
  67. package/dist/lib/utils/retryHandler.d.ts +89 -0
  68. package/dist/lib/utils/retryHandler.js +269 -0
  69. package/dist/mcp/toolRegistry.d.ts +5 -0
  70. package/dist/mcp/toolRegistry.js +60 -0
  71. package/dist/models/modelRegistry.d.ts +132 -0
  72. package/dist/models/modelRegistry.js +483 -0
  73. package/dist/models/modelResolver.d.ts +115 -0
  74. package/dist/models/modelResolver.js +468 -0
  75. package/dist/neurolink.d.ts +4 -1
  76. package/dist/neurolink.js +108 -69
  77. package/dist/providers/anthropic.js +3 -0
  78. package/dist/providers/googleAiStudio.js +13 -0
  79. package/dist/providers/huggingFace.js +15 -3
  80. package/dist/providers/mistral.js +19 -7
  81. package/dist/providers/ollama.js +31 -7
  82. package/dist/providers/openAI.js +12 -0
  83. package/dist/sdk/toolRegistration.js +17 -0
  84. package/dist/types/cli.d.ts +56 -1
  85. package/dist/types/contextTypes.d.ts +110 -0
  86. package/dist/types/contextTypes.js +177 -0
  87. package/dist/types/index.d.ts +4 -1
  88. package/dist/types/mcpTypes.d.ts +118 -7
  89. package/dist/types/providers.d.ts +81 -0
  90. package/dist/types/streamTypes.d.ts +44 -7
  91. package/dist/types/tools.d.ts +9 -0
  92. package/dist/types/universalProviderOptions.d.ts +3 -1
  93. package/dist/types/universalProviderOptions.js +3 -1
  94. package/dist/utils/logger.d.ts +7 -0
  95. package/dist/utils/logger.js +16 -6
  96. package/dist/utils/performance.d.ts +105 -0
  97. package/dist/utils/performance.js +210 -0
  98. package/dist/utils/providerUtils.js +9 -2
  99. package/dist/utils/retryHandler.d.ts +89 -0
  100. package/dist/utils/retryHandler.js +269 -0
  101. package/package.json +2 -1
@@ -1,4 +1,5 @@
1
1
  import { logger } from "../utils/logger.js";
2
+ import { SYSTEM_LIMITS } from "../core/constants.js";
2
3
  import { directAgentTools } from "../agent/directTools.js";
3
4
  /**
4
5
  * Validates if a result contains a valid toolsObject structure
@@ -49,80 +50,96 @@ export class BaseProvider {
49
50
  */
50
51
  async stream(optionsOrPrompt, analysisSchema) {
51
52
  const options = this.normalizeStreamOptions(optionsOrPrompt);
52
- // If tools are not disabled AND provider supports tools, use generate() and create synthetic stream
53
- if (!options.disableTools && this.supportsTools()) {
54
- try {
55
- // Convert stream options to text generation options
56
- const textOptions = {
57
- prompt: options.input?.text || "",
58
- systemPrompt: options.systemPrompt,
59
- temperature: options.temperature,
60
- maxTokens: options.maxTokens,
61
- disableTools: false,
62
- maxSteps: options.maxSteps || 5,
63
- provider: options.provider,
64
- model: options.model,
65
- };
66
- const result = await this.generate(textOptions, analysisSchema);
67
- // Create a synthetic stream from the generate result that simulates progressive delivery
68
- return {
69
- stream: (async function* () {
70
- if (result?.content) {
71
- // Split content into words for more natural streaming
72
- const words = result.content.split(/(\s+)/); // Keep whitespace
73
- let buffer = "";
74
- for (let i = 0; i < words.length; i++) {
75
- buffer += words[i];
76
- // Yield chunks of roughly 5-10 words or at punctuation
77
- const shouldYield = i === words.length - 1 || // Last word
78
- buffer.length > 50 || // Buffer getting long
79
- /[.!?;,]\s*$/.test(buffer); // End of sentence/clause
80
- if (shouldYield && buffer.trim()) {
53
+ // CRITICAL FIX: Always prefer real streaming over fake streaming
54
+ // Try real streaming first, use fake streaming only as fallback
55
+ try {
56
+ const realStreamResult = await this.executeStream(options, analysisSchema);
57
+ // If real streaming succeeds, return it (with tools support via Vercel AI SDK)
58
+ return realStreamResult;
59
+ }
60
+ catch (realStreamError) {
61
+ logger.warn(`Real streaming failed for ${this.providerName}, falling back to fake streaming:`, realStreamError);
62
+ // Fallback to fake streaming only if real streaming fails AND tools are enabled
63
+ if (!options.disableTools && this.supportsTools()) {
64
+ try {
65
+ // Convert stream options to text generation options
66
+ const textOptions = {
67
+ prompt: options.input?.text || "",
68
+ systemPrompt: options.systemPrompt,
69
+ temperature: options.temperature,
70
+ maxTokens: options.maxTokens,
71
+ disableTools: false,
72
+ maxSteps: options.maxSteps || 5,
73
+ provider: options.provider,
74
+ model: options.model,
75
+ // 🔧 FIX: Include analytics and evaluation options from stream options
76
+ enableAnalytics: options.enableAnalytics,
77
+ enableEvaluation: options.enableEvaluation,
78
+ evaluationDomain: options.evaluationDomain,
79
+ toolUsageContext: options.toolUsageContext,
80
+ context: options.context,
81
+ };
82
+ const result = await this.generate(textOptions, analysisSchema);
83
+ // Create a synthetic stream from the generate result that simulates progressive delivery
84
+ return {
85
+ stream: (async function* () {
86
+ if (result?.content) {
87
+ // Split content into words for more natural streaming
88
+ const words = result.content.split(/(\s+)/); // Keep whitespace
89
+ let buffer = "";
90
+ for (let i = 0; i < words.length; i++) {
91
+ buffer += words[i];
92
+ // Yield chunks of roughly 5-10 words or at punctuation
93
+ const shouldYield = i === words.length - 1 || // Last word
94
+ buffer.length > 50 || // Buffer getting long
95
+ /[.!?;,]\s*$/.test(buffer); // End of sentence/clause
96
+ if (shouldYield && buffer.trim()) {
97
+ yield { content: buffer };
98
+ buffer = "";
99
+ // Small delay to simulate streaming (1-10ms)
100
+ await new Promise((resolve) => setTimeout(resolve, Math.random() * 9 + 1));
101
+ }
102
+ }
103
+ // Yield any remaining content
104
+ if (buffer.trim()) {
81
105
  yield { content: buffer };
82
- buffer = "";
83
- // Small delay to simulate streaming (1-10ms)
84
- await new Promise((resolve) => setTimeout(resolve, Math.random() * 9 + 1));
85
106
  }
86
107
  }
87
- // Yield any remaining content
88
- if (buffer.trim()) {
89
- yield { content: buffer };
90
- }
91
- }
92
- })(),
93
- usage: result?.usage,
94
- provider: result?.provider,
95
- model: result?.model,
96
- toolCalls: result?.toolCalls?.map((call) => ({
97
- toolName: call.toolName,
98
- parameters: call.args,
99
- id: call.toolCallId,
100
- })),
101
- toolResults: result?.toolResults
102
- ? result.toolResults.map((tr) => ({
103
- toolName: tr.toolName || "unknown",
104
- status: (tr.status === "error"
105
- ? "failure"
106
- : "success"),
107
- result: tr.result,
108
- error: tr.error,
109
- }))
110
- : undefined,
111
- };
108
+ })(),
109
+ usage: result?.usage,
110
+ provider: result?.provider,
111
+ model: result?.model,
112
+ toolCalls: result?.toolCalls?.map((call) => ({
113
+ toolName: call.toolName,
114
+ parameters: call.args,
115
+ id: call.toolCallId,
116
+ })),
117
+ toolResults: result?.toolResults
118
+ ? result.toolResults.map((tr) => ({
119
+ toolName: tr.toolName || "unknown",
120
+ status: (tr.status === "error"
121
+ ? "failure"
122
+ : "success"),
123
+ result: tr.result,
124
+ error: tr.error,
125
+ }))
126
+ : undefined,
127
+ // 🔧 FIX: Include analytics and evaluation from generate result
128
+ analytics: result?.analytics,
129
+ evaluation: result?.evaluation,
130
+ };
131
+ }
132
+ catch (error) {
133
+ logger.error(`Fake streaming fallback failed for ${this.providerName}:`, error);
134
+ throw this.handleProviderError(error);
135
+ }
112
136
  }
113
- catch (error) {
114
- logger.error(`Stream with tools failed for ${this.providerName}:`, error);
115
- throw this.handleProviderError(error);
137
+ else {
138
+ // If real streaming failed and no tools are enabled, re-throw the original error
139
+ logger.error(`Real streaming failed for ${this.providerName}:`, realStreamError);
140
+ throw this.handleProviderError(realStreamError);
116
141
  }
117
142
  }
118
- // Traditional streaming without tools
119
- try {
120
- return await this.executeStream(options, analysisSchema);
121
- }
122
- catch (error) {
123
- logger.error(`Stream failed for ${this.providerName}:`, error);
124
- throw this.handleProviderError(error);
125
- }
126
143
  }
127
144
  /**
128
145
  * Text generation method - implements AIProvider interface
@@ -150,6 +167,31 @@ export class BaseProvider {
150
167
  temperature: options.temperature,
151
168
  maxTokens: options.maxTokens || 8192,
152
169
  });
170
+ // Extract tool names from tool calls for tracking
171
+ // AI SDK puts tool calls in steps array for multi-step generation
172
+ const toolsUsed = [];
173
+ // First check direct tool calls (fallback)
174
+ if (result.toolCalls && result.toolCalls.length > 0) {
175
+ toolsUsed.push(...result.toolCalls.map((tc) => {
176
+ return (tc.toolName ||
177
+ tc.name ||
178
+ "unknown");
179
+ }));
180
+ }
181
+ // Then check steps for tool calls (primary source for multi-step)
182
+ if (result.steps &&
183
+ Array.isArray(result.steps)) {
184
+ for (const step of result.steps ||
185
+ []) {
186
+ if (step?.toolCalls && Array.isArray(step.toolCalls)) {
187
+ toolsUsed.push(...step.toolCalls.map((tc) => {
188
+ return tc.toolName || tc.name || "unknown";
189
+ }));
190
+ }
191
+ }
192
+ }
193
+ // Remove duplicates
194
+ const uniqueToolsUsed = [...new Set(toolsUsed)];
153
195
  // Format the result with tool executions included
154
196
  const enhancedResult = {
155
197
  content: result.text,
@@ -174,6 +216,7 @@ export class BaseProvider {
174
216
  }))
175
217
  : [],
176
218
  toolResults: result.toolResults,
219
+ toolsUsed: uniqueToolsUsed,
177
220
  };
178
221
  // Enhanced result with analytics and evaluation
179
222
  return await this.enhanceResult(enhancedResult, options, startTime);
@@ -348,9 +391,43 @@ export class BaseProvider {
348
391
  return evaluation;
349
392
  }
350
393
  validateOptions(options) {
394
+ // 🔧 EDGE CASE: Basic prompt validation
351
395
  if (!options.prompt || options.prompt.trim().length === 0) {
352
396
  throw new Error("Prompt is required and cannot be empty");
353
397
  }
398
+ // 🔧 EDGE CASE: Handle very large prompts (>1M characters)
399
+ if (options.prompt.length > SYSTEM_LIMITS.MAX_PROMPT_LENGTH) {
400
+ throw new Error(`Prompt too large: ${options.prompt.length} characters (max: ${SYSTEM_LIMITS.MAX_PROMPT_LENGTH}). Consider breaking into smaller chunks. Use BaseProvider.chunkPrompt(prompt, maxSize, overlap) static method for chunking.`);
401
+ }
402
+ // 🔧 EDGE CASE: Validate token limits
403
+ if (options.maxTokens && options.maxTokens > 200000) {
404
+ throw new Error(`Max tokens too high: ${options.maxTokens} (recommended max: 200,000). This may cause timeouts or API errors.`);
405
+ }
406
+ if (options.maxTokens && options.maxTokens < 1) {
407
+ throw new Error("Max tokens must be at least 1");
408
+ }
409
+ // 🔧 EDGE CASE: Validate temperature range
410
+ if (options.temperature !== undefined) {
411
+ if (options.temperature < 0 || options.temperature > 2) {
412
+ throw new Error(`Temperature must be between 0 and 2, got: ${options.temperature}`);
413
+ }
414
+ }
415
+ // 🔧 EDGE CASE: Validate timeout values
416
+ if (options.timeout !== undefined) {
417
+ const timeoutMs = typeof options.timeout === "string"
418
+ ? parseInt(options.timeout, 10)
419
+ : options.timeout;
420
+ if (isNaN(timeoutMs) || timeoutMs < 1000) {
421
+ throw new Error(`Timeout must be at least 1000ms (1 second), got: ${options.timeout}`);
422
+ }
423
+ if (timeoutMs > SYSTEM_LIMITS.LONG_TIMEOUT_WARNING) {
424
+ logger.warn(`⚠️ Very long timeout: ${timeoutMs}ms. This may cause the CLI to hang.`);
425
+ }
426
+ }
427
+ // 🔧 EDGE CASE: Validate maxSteps for tool execution
428
+ if (options.maxSteps !== undefined && options.maxSteps > 20) {
429
+ throw new Error(`Max steps too high: ${options.maxSteps} (recommended max: 20). This may cause long execution times.`);
430
+ }
354
431
  }
355
432
  getProviderInfo() {
356
433
  return {
@@ -382,4 +459,36 @@ export class BaseProvider {
382
459
  }
383
460
  return this.defaultTimeout;
384
461
  }
462
+ /**
463
+ * Utility method to chunk large prompts into smaller pieces
464
+ * @param prompt The prompt to chunk
465
+ * @param maxChunkSize Maximum size per chunk (default: 900,000 characters)
466
+ * @param overlap Overlap between chunks to maintain context (default: 100 characters)
467
+ * @returns Array of prompt chunks
468
+ */
469
+ static chunkPrompt(prompt, maxChunkSize = 900000, overlap = 100) {
470
+ if (prompt.length <= maxChunkSize) {
471
+ return [prompt];
472
+ }
473
+ const chunks = [];
474
+ let start = 0;
475
+ while (start < prompt.length) {
476
+ const end = Math.min(start + maxChunkSize, prompt.length);
477
+ chunks.push(prompt.slice(start, end));
478
+ // Break if we've reached the end
479
+ if (end >= prompt.length) {
480
+ break;
481
+ }
482
+ // Move start forward, accounting for overlap
483
+ const nextStart = end - overlap;
484
+ // Ensure we make progress (avoid infinite loops)
485
+ if (nextStart <= start) {
486
+ start = end;
487
+ }
488
+ else {
489
+ start = Math.max(nextStart, 0);
490
+ }
491
+ }
492
+ return chunks;
493
+ }
385
494
  }
@@ -36,6 +36,17 @@ export declare const CLI_LIMITS: {
36
36
  default: number;
37
37
  };
38
38
  };
39
+ export declare const SYSTEM_LIMITS: {
40
+ MAX_PROMPT_LENGTH: number;
41
+ HIGH_MEMORY_THRESHOLD: number;
42
+ LONG_TIMEOUT_WARNING: number;
43
+ DEFAULT_CONCURRENCY_LIMIT: number;
44
+ MAX_CONCURRENCY_LIMIT: number;
45
+ DEFAULT_RETRY_ATTEMPTS: number;
46
+ DEFAULT_INITIAL_DELAY: number;
47
+ DEFAULT_MAX_DELAY: number;
48
+ DEFAULT_BACKOFF_MULTIPLIER: number;
49
+ };
39
50
  export declare const ENV_DEFAULTS: {
40
51
  maxTokens: number;
41
52
  temperature: number;
@@ -40,6 +40,23 @@ export const CLI_LIMITS = {
40
40
  default: DEFAULT_TEMPERATURE,
41
41
  },
42
42
  };
43
+ // Performance and System Limits
44
+ export const SYSTEM_LIMITS = {
45
+ // Prompt size limits (baseProvider.ts magic number fix)
46
+ MAX_PROMPT_LENGTH: 1000000, // 1M characters - prevents memory issues
47
+ // Memory monitoring thresholds (performance.ts)
48
+ HIGH_MEMORY_THRESHOLD: 100, // MB - when to warn about memory usage
49
+ // Timeout warnings (baseProvider.ts)
50
+ LONG_TIMEOUT_WARNING: 300000, // 5 minutes - when to warn about long timeouts
51
+ // Concurrency control (neurolink.ts provider testing)
52
+ DEFAULT_CONCURRENCY_LIMIT: 3, // Max parallel provider tests
53
+ MAX_CONCURRENCY_LIMIT: 5, // Upper bound for concurrency
54
+ // Retry system defaults (retryHandler.ts)
55
+ DEFAULT_RETRY_ATTEMPTS: 3,
56
+ DEFAULT_INITIAL_DELAY: 1000, // 1 second
57
+ DEFAULT_MAX_DELAY: 30000, // 30 seconds
58
+ DEFAULT_BACKOFF_MULTIPLIER: 2,
59
+ };
43
60
  // Environment Variable Support (for future use)
44
61
  export const ENV_DEFAULTS = {
45
62
  maxTokens: process.env.NEUROLINK_DEFAULT_MAX_TOKENS
@@ -84,24 +84,32 @@ function parseUnifiedEvaluationResult(response, context) {
84
84
  accuracy: /accuracy[:\s]*([0-9]+(?:\.[0-9]+)?)/i,
85
85
  completeness: /completeness[:\s]*([0-9]+(?:\.[0-9]+)?)/i,
86
86
  overall: /overall[:\s]*([0-9]+(?:\.[0-9]+)?)/i,
87
+ reasoning: /reasoning[:\s]*(.+?)(?=\n\s*\w+:|\n\s*$|$)/is,
87
88
  };
88
89
  for (const [key, pattern] of Object.entries(patterns)) {
89
90
  const match = response.match(pattern);
90
91
  if (match) {
91
- const value = parseFloat(match[1]);
92
- if (value >= 1 && value <= 10) {
93
- const roundedValue = Math.round(value);
94
- if (key === "relevance") {
95
- result.relevance = roundedValue;
96
- }
97
- else if (key === "accuracy") {
98
- result.accuracy = roundedValue;
99
- }
100
- else if (key === "completeness") {
101
- result.completeness = roundedValue;
102
- }
103
- else if (key === "overall") {
104
- result.overall = roundedValue;
92
+ if (key === "reasoning") {
93
+ // Extract reasoning text
94
+ result.reasoning = match[1].trim();
95
+ }
96
+ else {
97
+ // Extract numerical scores
98
+ const value = parseFloat(match[1]);
99
+ if (value >= 1 && value <= 10) {
100
+ const roundedValue = Math.round(value);
101
+ if (key === "relevance") {
102
+ result.relevance = roundedValue;
103
+ }
104
+ else if (key === "accuracy") {
105
+ result.accuracy = roundedValue;
106
+ }
107
+ else if (key === "completeness") {
108
+ result.completeness = roundedValue;
109
+ }
110
+ else if (key === "overall") {
111
+ result.overall = roundedValue;
112
+ }
105
113
  }
106
114
  }
107
115
  }
@@ -112,6 +120,7 @@ function parseUnifiedEvaluationResult(response, context) {
112
120
  accuracy: result.accuracy || 1,
113
121
  completeness: result.completeness || 1,
114
122
  overall: result.overall || 1,
123
+ reasoning: result.reasoning || "No detailed reasoning provided",
115
124
  };
116
125
  }
117
126
  catch (error) {
@@ -123,6 +132,7 @@ function parseUnifiedEvaluationResult(response, context) {
123
132
  accuracy: 1,
124
133
  completeness: 1,
125
134
  overall: 1,
135
+ reasoning: "Error occurred during evaluation parsing",
126
136
  };
127
137
  }
128
138
  }
@@ -167,6 +177,7 @@ Relevance: [score]
167
177
  Accuracy: [score]
168
178
  Completeness: [score]
169
179
  Overall: [score]
180
+ Reasoning: [Provide a detailed explanation of your evaluation, explaining why you gave these scores. Include specific observations about the response's strengths and any areas for improvement.]
170
181
  `;
171
182
  // Generate evaluation
172
183
  const result = await provider.generate(prompt);
@@ -1,6 +1,7 @@
1
1
  // ✅ CIRCULAR DEPENDENCY FIX: Remove barrel export import
2
2
  // Providers are now managed via ProviderFactory instead of direct imports
3
3
  import { ProviderFactory } from "../factories/providerFactory.js";
4
+ import { ProviderRegistry } from "../factories/providerRegistry.js";
4
5
  import { getBestProvider } from "../utils/providerUtils.js";
5
6
  import { logger } from "../utils/logger.js";
6
7
  const componentIdentifier = "aiProviderFactory";
@@ -36,24 +37,25 @@ export class AIProviderFactory {
36
37
  enableMCP,
37
38
  });
38
39
  try {
39
- // EMERGENCY FIX: Skip dynamic model provider initialization to prevent hanging
40
- // TODO: Fix the hanging dynamic model provider.initialize()
41
- // Initialize dynamic model provider if not already done
42
- // try {
43
- // if (dynamicModelProvider.needsRefresh()) {
44
- // // Add timeout to prevent hanging
45
- // await Promise.race([
46
- // dynamicModelProvider.initialize(),
47
- // new Promise((_, reject) =>
48
- // setTimeout(() => reject(new Error('Dynamic model provider timeout')), 3000)
49
- // )
50
- // ]);
51
- // }
52
- // } catch (dynamicError) {
53
- // logger.warn(`[${functionTag}] Dynamic model provider initialization failed, using fallback`, {
54
- // error: dynamicError instanceof Error ? dynamicError.message : String(dynamicError),
55
- // });
56
- // }
40
+ // DYNAMIC MODEL PROVIDER STATUS (2025): Disabled due to reliability issues
41
+ //
42
+ // Root Cause: Dynamic model provider initialization can hang when:
43
+ // - Local model server (localhost:3001) is not running or responding
44
+ // - GitHub raw URL requests timeout due to network issues
45
+ // - Local config file doesn't exist
46
+ //
47
+ // Current Behavior: Static model resolution works reliably
48
+ // Impact: No functionality loss - providers use built-in model defaults
49
+ //
50
+ // Implementation Requirements (if re-enabling):
51
+ // 1. Add robust timeout handling (3s max per source)
52
+ // 2. Implement exponential backoff for network requests
53
+ // 3. Add graceful degradation when all sources fail
54
+ // 4. Create health check for localhost:3001 before attempting connection
55
+ // 5. Add comprehensive error handling and logging
56
+ //
57
+ // Until these improvements are implemented, dynamic model provider remains disabled
58
+ // for system reliability. Static model defaults provide stable functionality.
57
59
  // COMPREHENSIVE FIX: Disable dynamic model resolution completely until provider is fixed
58
60
  // This prevents stale gemini-1.5-pro-latest from overriding correct gemini-2.5-pro defaults
59
61
  const resolvedModelName = modelName;
@@ -87,6 +89,8 @@ export class AIProviderFactory {
87
89
  // );
88
90
  // }
89
91
  // }
92
+ // CRITICAL FIX: Initialize providers before using them
93
+ await ProviderRegistry.registerAllProviders();
90
94
  // PURE FACTORY PATTERN: No switch statements - use ProviderFactory exclusively
91
95
  const normalizedName = this.normalizeProviderName(providerName);
92
96
  const finalModelName = resolvedModelName === "default" || resolvedModelName === null
@@ -0,0 +1,65 @@
1
+ import type { AnalyticsData } from "./types.js";
2
+ import type { TokenUsage } from "../types/providers.js";
3
+ import type { ToolCall, ToolResult } from "../types/streamTypes.js";
4
+ /**
5
+ * Stream analytics result from Vercel AI SDK streamText
6
+ */
7
+ export interface StreamTextResult {
8
+ textStream: AsyncIterable<string>;
9
+ text: Promise<string>;
10
+ usage: Promise<{
11
+ promptTokens: number;
12
+ completionTokens: number;
13
+ totalTokens: number;
14
+ } | undefined>;
15
+ response: Promise<{
16
+ id?: string;
17
+ model?: string;
18
+ timestamp?: number | Date;
19
+ } | undefined>;
20
+ finishReason: Promise<"stop" | "length" | "content-filter" | "tool-calls" | "error" | "other" | "unknown">;
21
+ toolResults?: Promise<ToolResult[]>;
22
+ toolCalls?: Promise<ToolCall[]>;
23
+ }
24
+ /**
25
+ * Interface for collecting analytics from streaming results
26
+ */
27
+ export interface StreamAnalyticsCollector {
28
+ collectUsage(result: StreamTextResult): Promise<TokenUsage>;
29
+ collectMetadata(result: StreamTextResult): Promise<ResponseMetadata>;
30
+ createAnalytics(provider: string, model: string, result: StreamTextResult, responseTime: number, metadata?: Record<string, unknown>): Promise<AnalyticsData>;
31
+ }
32
+ /**
33
+ * Response metadata from stream result
34
+ */
35
+ export interface ResponseMetadata {
36
+ id?: string;
37
+ model?: string;
38
+ timestamp?: number;
39
+ finishReason?: string;
40
+ }
41
+ /**
42
+ * Base implementation for collecting analytics from Vercel AI SDK stream results
43
+ */
44
+ export declare class BaseStreamAnalyticsCollector implements StreamAnalyticsCollector {
45
+ /**
46
+ * Collect token usage from stream result
47
+ */
48
+ collectUsage(result: StreamTextResult): Promise<TokenUsage>;
49
+ /**
50
+ * Collect response metadata from stream result
51
+ */
52
+ collectMetadata(result: StreamTextResult): Promise<ResponseMetadata>;
53
+ /**
54
+ * Create comprehensive analytics from stream result
55
+ */
56
+ createAnalytics(provider: string, model: string, result: StreamTextResult, responseTime: number, metadata?: Record<string, unknown>): Promise<AnalyticsData>;
57
+ /**
58
+ * Clean up resources and force garbage collection if needed
59
+ */
60
+ cleanup(): void;
61
+ }
62
+ /**
63
+ * Global instance of stream analytics collector
64
+ */
65
+ export declare const streamAnalyticsCollector: BaseStreamAnalyticsCollector;
@@ -0,0 +1,125 @@
1
+ import { createAnalytics } from "./analytics.js";
2
+ import { logger } from "../utils/logger.js";
3
+ /**
4
+ * Base implementation for collecting analytics from Vercel AI SDK stream results
5
+ */
6
+ export class BaseStreamAnalyticsCollector {
7
+ /**
8
+ * Collect token usage from stream result
9
+ */
10
+ async collectUsage(result) {
11
+ try {
12
+ const usage = await result.usage;
13
+ if (!usage) {
14
+ logger.debug("No usage data available from stream result");
15
+ return {
16
+ inputTokens: 0,
17
+ outputTokens: 0,
18
+ totalTokens: 0,
19
+ };
20
+ }
21
+ return {
22
+ inputTokens: usage.promptTokens || 0,
23
+ outputTokens: usage.completionTokens || 0,
24
+ totalTokens: usage.totalTokens ||
25
+ (usage.promptTokens || 0) + (usage.completionTokens || 0),
26
+ };
27
+ }
28
+ catch (error) {
29
+ logger.warn("Failed to collect usage from stream result", { error });
30
+ return {
31
+ inputTokens: 0,
32
+ outputTokens: 0,
33
+ totalTokens: 0,
34
+ };
35
+ }
36
+ }
37
+ /**
38
+ * Collect response metadata from stream result
39
+ */
40
+ async collectMetadata(result) {
41
+ try {
42
+ const [response, finishReason] = await Promise.all([
43
+ result.response,
44
+ result.finishReason,
45
+ ]);
46
+ return {
47
+ id: response?.id,
48
+ model: response?.model,
49
+ timestamp: response?.timestamp instanceof Date
50
+ ? response.timestamp.getTime()
51
+ : response?.timestamp || Date.now(),
52
+ finishReason: finishReason,
53
+ };
54
+ }
55
+ catch (error) {
56
+ logger.warn("Failed to collect metadata from stream result", { error });
57
+ const finishReason = await result.finishReason.catch(() => "error");
58
+ return {
59
+ timestamp: Date.now(),
60
+ finishReason: finishReason,
61
+ };
62
+ }
63
+ }
64
+ /**
65
+ * Create comprehensive analytics from stream result
66
+ */
67
+ async createAnalytics(provider, model, result, responseTime, metadata) {
68
+ try {
69
+ // Collect analytics data in parallel
70
+ const [usage, responseMetadata] = await Promise.all([
71
+ this.collectUsage(result),
72
+ this.collectMetadata(result),
73
+ ]);
74
+ // Get final text content and finish reason
75
+ const [content, finishReason, toolResults, toolCalls] = await Promise.all([
76
+ result.text,
77
+ result.finishReason,
78
+ result.toolResults || Promise.resolve([]),
79
+ result.toolCalls || Promise.resolve([]),
80
+ ]);
81
+ // Create comprehensive analytics
82
+ return createAnalytics(provider, model, {
83
+ usage,
84
+ content,
85
+ response: responseMetadata,
86
+ finishReason: finishReason,
87
+ toolResults: toolResults,
88
+ toolCalls: toolCalls,
89
+ }, responseTime, {
90
+ ...metadata,
91
+ streamingMode: true,
92
+ responseId: responseMetadata.id,
93
+ finishReason: finishReason,
94
+ });
95
+ }
96
+ catch (error) {
97
+ logger.error("Failed to create analytics from stream result", {
98
+ provider,
99
+ model,
100
+ error: error instanceof Error ? error.message : String(error),
101
+ });
102
+ // Return minimal analytics on error
103
+ return createAnalytics(provider, model, { usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 } }, responseTime, {
104
+ ...metadata,
105
+ streamingMode: true,
106
+ analyticsError: true,
107
+ });
108
+ }
109
+ }
110
+ /**
111
+ * Clean up resources and force garbage collection if needed
112
+ */
113
+ cleanup() {
114
+ // Only force garbage collection if memory usage exceeds 500 MB
115
+ const heapUsed = process.memoryUsage().heapUsed;
116
+ const GC_THRESHOLD = 500 * 1024 * 1024; // 500 MB
117
+ if (typeof global !== "undefined" && global.gc && heapUsed > GC_THRESHOLD) {
118
+ global.gc();
119
+ }
120
+ }
121
+ }
122
+ /**
123
+ * Global instance of stream analytics collector
124
+ */
125
+ export const streamAnalyticsCollector = new BaseStreamAnalyticsCollector();