@juspay/neurolink 7.0.0 → 7.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/CHANGELOG.md +15 -4
  2. package/README.md +16 -11
  3. package/dist/cli/commands/config.d.ts +2 -2
  4. package/dist/cli/commands/config.js +22 -21
  5. package/dist/cli/commands/mcp.d.ts +79 -0
  6. package/dist/cli/commands/mcp.js +916 -0
  7. package/dist/cli/commands/models.d.ts +63 -0
  8. package/dist/cli/commands/models.js +653 -0
  9. package/dist/cli/commands/ollama.js +56 -55
  10. package/dist/cli/factories/commandFactory.d.ts +67 -2
  11. package/dist/cli/factories/commandFactory.js +840 -92
  12. package/dist/cli/index.d.ts +6 -0
  13. package/dist/cli/index.js +42 -999
  14. package/dist/cli/utils/completeSetup.js +9 -8
  15. package/dist/cli/utils/envManager.js +7 -6
  16. package/dist/cli/utils/interactiveSetup.js +20 -19
  17. package/dist/core/analytics.js +25 -38
  18. package/dist/core/baseProvider.d.ts +8 -0
  19. package/dist/core/baseProvider.js +177 -68
  20. package/dist/core/constants.d.ts +11 -0
  21. package/dist/core/constants.js +17 -0
  22. package/dist/core/evaluation.js +25 -14
  23. package/dist/core/factory.js +21 -18
  24. package/dist/core/streamAnalytics.d.ts +65 -0
  25. package/dist/core/streamAnalytics.js +125 -0
  26. package/dist/factories/providerRegistry.js +3 -1
  27. package/dist/lib/core/analytics.js +25 -38
  28. package/dist/lib/core/baseProvider.d.ts +8 -0
  29. package/dist/lib/core/baseProvider.js +177 -68
  30. package/dist/lib/core/constants.d.ts +11 -0
  31. package/dist/lib/core/constants.js +17 -0
  32. package/dist/lib/core/evaluation.js +25 -14
  33. package/dist/lib/core/factory.js +22 -18
  34. package/dist/lib/core/streamAnalytics.d.ts +65 -0
  35. package/dist/lib/core/streamAnalytics.js +125 -0
  36. package/dist/lib/factories/providerRegistry.js +3 -1
  37. package/dist/lib/mcp/toolRegistry.d.ts +5 -0
  38. package/dist/lib/mcp/toolRegistry.js +60 -0
  39. package/dist/lib/models/modelRegistry.d.ts +132 -0
  40. package/dist/lib/models/modelRegistry.js +483 -0
  41. package/dist/lib/models/modelResolver.d.ts +115 -0
  42. package/dist/lib/models/modelResolver.js +467 -0
  43. package/dist/lib/neurolink.d.ts +4 -1
  44. package/dist/lib/neurolink.js +108 -69
  45. package/dist/lib/providers/anthropic.js +3 -0
  46. package/dist/lib/providers/googleAiStudio.js +13 -0
  47. package/dist/lib/providers/huggingFace.js +15 -3
  48. package/dist/lib/providers/mistral.js +19 -7
  49. package/dist/lib/providers/ollama.js +31 -7
  50. package/dist/lib/providers/openAI.js +12 -0
  51. package/dist/lib/sdk/toolRegistration.js +17 -0
  52. package/dist/lib/types/cli.d.ts +56 -1
  53. package/dist/lib/types/contextTypes.d.ts +110 -0
  54. package/dist/lib/types/contextTypes.js +176 -0
  55. package/dist/lib/types/index.d.ts +4 -1
  56. package/dist/lib/types/mcpTypes.d.ts +118 -7
  57. package/dist/lib/types/providers.d.ts +81 -0
  58. package/dist/lib/types/streamTypes.d.ts +44 -7
  59. package/dist/lib/types/tools.d.ts +9 -0
  60. package/dist/lib/types/universalProviderOptions.d.ts +3 -1
  61. package/dist/lib/types/universalProviderOptions.js +2 -1
  62. package/dist/lib/utils/logger.d.ts +7 -0
  63. package/dist/lib/utils/logger.js +16 -6
  64. package/dist/lib/utils/performance.d.ts +105 -0
  65. package/dist/lib/utils/performance.js +210 -0
  66. package/dist/lib/utils/providerUtils.js +9 -2
  67. package/dist/lib/utils/retryHandler.d.ts +89 -0
  68. package/dist/lib/utils/retryHandler.js +269 -0
  69. package/dist/mcp/toolRegistry.d.ts +5 -0
  70. package/dist/mcp/toolRegistry.js +60 -0
  71. package/dist/models/modelRegistry.d.ts +132 -0
  72. package/dist/models/modelRegistry.js +483 -0
  73. package/dist/models/modelResolver.d.ts +115 -0
  74. package/dist/models/modelResolver.js +468 -0
  75. package/dist/neurolink.d.ts +4 -1
  76. package/dist/neurolink.js +108 -69
  77. package/dist/providers/anthropic.js +3 -0
  78. package/dist/providers/googleAiStudio.js +13 -0
  79. package/dist/providers/huggingFace.js +15 -3
  80. package/dist/providers/mistral.js +19 -7
  81. package/dist/providers/ollama.js +31 -7
  82. package/dist/providers/openAI.js +12 -0
  83. package/dist/sdk/toolRegistration.js +17 -0
  84. package/dist/types/cli.d.ts +56 -1
  85. package/dist/types/contextTypes.d.ts +110 -0
  86. package/dist/types/contextTypes.js +177 -0
  87. package/dist/types/index.d.ts +4 -1
  88. package/dist/types/mcpTypes.d.ts +118 -7
  89. package/dist/types/providers.d.ts +81 -0
  90. package/dist/types/streamTypes.d.ts +44 -7
  91. package/dist/types/tools.d.ts +9 -0
  92. package/dist/types/universalProviderOptions.d.ts +3 -1
  93. package/dist/types/universalProviderOptions.js +3 -1
  94. package/dist/utils/logger.d.ts +7 -0
  95. package/dist/utils/logger.js +16 -6
  96. package/dist/utils/performance.d.ts +105 -0
  97. package/dist/utils/performance.js +210 -0
  98. package/dist/utils/providerUtils.js +9 -2
  99. package/dist/utils/retryHandler.d.ts +89 -0
  100. package/dist/utils/retryHandler.js +269 -0
  101. package/package.json +2 -1
@@ -84,24 +84,32 @@ function parseUnifiedEvaluationResult(response, context) {
84
84
  accuracy: /accuracy[:\s]*([0-9]+(?:\.[0-9]+)?)/i,
85
85
  completeness: /completeness[:\s]*([0-9]+(?:\.[0-9]+)?)/i,
86
86
  overall: /overall[:\s]*([0-9]+(?:\.[0-9]+)?)/i,
87
+ reasoning: /reasoning[:\s]*(.+?)(?=\n\s*\w+:|\n\s*$|$)/is,
87
88
  };
88
89
  for (const [key, pattern] of Object.entries(patterns)) {
89
90
  const match = response.match(pattern);
90
91
  if (match) {
91
- const value = parseFloat(match[1]);
92
- if (value >= 1 && value <= 10) {
93
- const roundedValue = Math.round(value);
94
- if (key === "relevance") {
95
- result.relevance = roundedValue;
96
- }
97
- else if (key === "accuracy") {
98
- result.accuracy = roundedValue;
99
- }
100
- else if (key === "completeness") {
101
- result.completeness = roundedValue;
102
- }
103
- else if (key === "overall") {
104
- result.overall = roundedValue;
92
+ if (key === "reasoning") {
93
+ // Extract reasoning text
94
+ result.reasoning = match[1].trim();
95
+ }
96
+ else {
97
+ // Extract numerical scores
98
+ const value = parseFloat(match[1]);
99
+ if (value >= 1 && value <= 10) {
100
+ const roundedValue = Math.round(value);
101
+ if (key === "relevance") {
102
+ result.relevance = roundedValue;
103
+ }
104
+ else if (key === "accuracy") {
105
+ result.accuracy = roundedValue;
106
+ }
107
+ else if (key === "completeness") {
108
+ result.completeness = roundedValue;
109
+ }
110
+ else if (key === "overall") {
111
+ result.overall = roundedValue;
112
+ }
105
113
  }
106
114
  }
107
115
  }
@@ -112,6 +120,7 @@ function parseUnifiedEvaluationResult(response, context) {
112
120
  accuracy: result.accuracy || 1,
113
121
  completeness: result.completeness || 1,
114
122
  overall: result.overall || 1,
123
+ reasoning: result.reasoning || "No detailed reasoning provided",
115
124
  };
116
125
  }
117
126
  catch (error) {
@@ -123,6 +132,7 @@ function parseUnifiedEvaluationResult(response, context) {
123
132
  accuracy: 1,
124
133
  completeness: 1,
125
134
  overall: 1,
135
+ reasoning: "Error occurred during evaluation parsing",
126
136
  };
127
137
  }
128
138
  }
@@ -167,6 +177,7 @@ Relevance: [score]
167
177
  Accuracy: [score]
168
178
  Completeness: [score]
169
179
  Overall: [score]
180
+ Reasoning: [Provide a detailed explanation of your evaluation, explaining why you gave these scores. Include specific observations about the response's strengths and any areas for improvement.]
170
181
  `;
171
182
  // Generate evaluation
172
183
  const result = await provider.generate(prompt);
@@ -38,24 +38,25 @@ export class AIProviderFactory {
38
38
  enableMCP,
39
39
  });
40
40
  try {
41
- // EMERGENCY FIX: Skip dynamic model provider initialization to prevent hanging
42
- // TODO: Fix the hanging dynamic model provider.initialize()
43
- // Initialize dynamic model provider if not already done
44
- // try {
45
- // if (dynamicModelProvider.needsRefresh()) {
46
- // // Add timeout to prevent hanging
47
- // await Promise.race([
48
- // dynamicModelProvider.initialize(),
49
- // new Promise((_, reject) =>
50
- // setTimeout(() => reject(new Error('Dynamic model provider timeout')), 3000)
51
- // )
52
- // ]);
53
- // }
54
- // } catch (dynamicError) {
55
- // logger.warn(`[${functionTag}] Dynamic model provider initialization failed, using fallback`, {
56
- // error: dynamicError instanceof Error ? dynamicError.message : String(dynamicError),
57
- // });
58
- // }
41
+ // DYNAMIC MODEL PROVIDER STATUS (2025): Disabled due to reliability issues
42
+ //
43
+ // Root Cause: Dynamic model provider initialization can hang when:
44
+ // - Local model server (localhost:3001) is not running or responding
45
+ // - GitHub raw URL requests timeout due to network issues
46
+ // - Local config file doesn't exist
47
+ //
48
+ // Current Behavior: Static model resolution works reliably
49
+ // Impact: No functionality loss - providers use built-in model defaults
50
+ //
51
+ // Implementation Requirements (if re-enabling):
52
+ // 1. Add robust timeout handling (3s max per source)
53
+ // 2. Implement exponential backoff for network requests
54
+ // 3. Add graceful degradation when all sources fail
55
+ // 4. Create health check for localhost:3001 before attempting connection
56
+ // 5. Add comprehensive error handling and logging
57
+ //
58
+ // Until these improvements are implemented, dynamic model provider remains disabled
59
+ // for system reliability. Static model defaults provide stable functionality.
59
60
  // COMPREHENSIVE FIX: Disable dynamic model resolution completely until provider is fixed
60
61
  // This prevents stale gemini-1.5-pro-latest from overriding correct gemini-2.5-pro defaults
61
62
  const resolvedModelName = modelName;
@@ -89,6 +90,8 @@ export class AIProviderFactory {
89
90
  // );
90
91
  // }
91
92
  // }
93
+ // CRITICAL FIX: Initialize providers before using them
94
+ await ProviderRegistry.registerAllProviders();
92
95
  // PURE FACTORY PATTERN: No switch statements - use ProviderFactory exclusively
93
96
  const normalizedName = this.normalizeProviderName(providerName);
94
97
  const finalModelName = resolvedModelName === "default" || resolvedModelName === null
@@ -0,0 +1,65 @@
1
+ import type { AnalyticsData } from "./types.js";
2
+ import type { TokenUsage } from "../types/providers.js";
3
+ import type { ToolCall, ToolResult } from "../types/streamTypes.js";
4
+ /**
5
+ * Stream analytics result from Vercel AI SDK streamText
6
+ */
7
+ export interface StreamTextResult {
8
+ textStream: AsyncIterable<string>;
9
+ text: Promise<string>;
10
+ usage: Promise<{
11
+ promptTokens: number;
12
+ completionTokens: number;
13
+ totalTokens: number;
14
+ } | undefined>;
15
+ response: Promise<{
16
+ id?: string;
17
+ model?: string;
18
+ timestamp?: number | Date;
19
+ } | undefined>;
20
+ finishReason: Promise<"stop" | "length" | "content-filter" | "tool-calls" | "error" | "other" | "unknown">;
21
+ toolResults?: Promise<ToolResult[]>;
22
+ toolCalls?: Promise<ToolCall[]>;
23
+ }
24
+ /**
25
+ * Interface for collecting analytics from streaming results
26
+ */
27
+ export interface StreamAnalyticsCollector {
28
+ collectUsage(result: StreamTextResult): Promise<TokenUsage>;
29
+ collectMetadata(result: StreamTextResult): Promise<ResponseMetadata>;
30
+ createAnalytics(provider: string, model: string, result: StreamTextResult, responseTime: number, metadata?: Record<string, unknown>): Promise<AnalyticsData>;
31
+ }
32
+ /**
33
+ * Response metadata from stream result
34
+ */
35
+ export interface ResponseMetadata {
36
+ id?: string;
37
+ model?: string;
38
+ timestamp?: number;
39
+ finishReason?: string;
40
+ }
41
+ /**
42
+ * Base implementation for collecting analytics from Vercel AI SDK stream results
43
+ */
44
+ export declare class BaseStreamAnalyticsCollector implements StreamAnalyticsCollector {
45
+ /**
46
+ * Collect token usage from stream result
47
+ */
48
+ collectUsage(result: StreamTextResult): Promise<TokenUsage>;
49
+ /**
50
+ * Collect response metadata from stream result
51
+ */
52
+ collectMetadata(result: StreamTextResult): Promise<ResponseMetadata>;
53
+ /**
54
+ * Create comprehensive analytics from stream result
55
+ */
56
+ createAnalytics(provider: string, model: string, result: StreamTextResult, responseTime: number, metadata?: Record<string, unknown>): Promise<AnalyticsData>;
57
+ /**
58
+ * Clean up resources and force garbage collection if needed
59
+ */
60
+ cleanup(): void;
61
+ }
62
+ /**
63
+ * Global instance of stream analytics collector
64
+ */
65
+ export declare const streamAnalyticsCollector: BaseStreamAnalyticsCollector;
@@ -0,0 +1,125 @@
1
+ import { createAnalytics } from "./analytics.js";
2
+ import { logger } from "../utils/logger.js";
3
+ /**
4
+ * Base implementation for collecting analytics from Vercel AI SDK stream results
5
+ */
6
+ export class BaseStreamAnalyticsCollector {
7
+ /**
8
+ * Collect token usage from stream result
9
+ */
10
+ async collectUsage(result) {
11
+ try {
12
+ const usage = await result.usage;
13
+ if (!usage) {
14
+ logger.debug("No usage data available from stream result");
15
+ return {
16
+ inputTokens: 0,
17
+ outputTokens: 0,
18
+ totalTokens: 0,
19
+ };
20
+ }
21
+ return {
22
+ inputTokens: usage.promptTokens || 0,
23
+ outputTokens: usage.completionTokens || 0,
24
+ totalTokens: usage.totalTokens ||
25
+ (usage.promptTokens || 0) + (usage.completionTokens || 0),
26
+ };
27
+ }
28
+ catch (error) {
29
+ logger.warn("Failed to collect usage from stream result", { error });
30
+ return {
31
+ inputTokens: 0,
32
+ outputTokens: 0,
33
+ totalTokens: 0,
34
+ };
35
+ }
36
+ }
37
+ /**
38
+ * Collect response metadata from stream result
39
+ */
40
+ async collectMetadata(result) {
41
+ try {
42
+ const [response, finishReason] = await Promise.all([
43
+ result.response,
44
+ result.finishReason,
45
+ ]);
46
+ return {
47
+ id: response?.id,
48
+ model: response?.model,
49
+ timestamp: response?.timestamp instanceof Date
50
+ ? response.timestamp.getTime()
51
+ : response?.timestamp || Date.now(),
52
+ finishReason: finishReason,
53
+ };
54
+ }
55
+ catch (error) {
56
+ logger.warn("Failed to collect metadata from stream result", { error });
57
+ const finishReason = await result.finishReason.catch(() => "error");
58
+ return {
59
+ timestamp: Date.now(),
60
+ finishReason: finishReason,
61
+ };
62
+ }
63
+ }
64
+ /**
65
+ * Create comprehensive analytics from stream result
66
+ */
67
+ async createAnalytics(provider, model, result, responseTime, metadata) {
68
+ try {
69
+ // Collect analytics data in parallel
70
+ const [usage, responseMetadata] = await Promise.all([
71
+ this.collectUsage(result),
72
+ this.collectMetadata(result),
73
+ ]);
74
+ // Get final text content and finish reason
75
+ const [content, finishReason, toolResults, toolCalls] = await Promise.all([
76
+ result.text,
77
+ result.finishReason,
78
+ result.toolResults || Promise.resolve([]),
79
+ result.toolCalls || Promise.resolve([]),
80
+ ]);
81
+ // Create comprehensive analytics
82
+ return createAnalytics(provider, model, {
83
+ usage,
84
+ content,
85
+ response: responseMetadata,
86
+ finishReason: finishReason,
87
+ toolResults: toolResults,
88
+ toolCalls: toolCalls,
89
+ }, responseTime, {
90
+ ...metadata,
91
+ streamingMode: true,
92
+ responseId: responseMetadata.id,
93
+ finishReason: finishReason,
94
+ });
95
+ }
96
+ catch (error) {
97
+ logger.error("Failed to create analytics from stream result", {
98
+ provider,
99
+ model,
100
+ error: error instanceof Error ? error.message : String(error),
101
+ });
102
+ // Return minimal analytics on error
103
+ return createAnalytics(provider, model, { usage: { inputTokens: 0, outputTokens: 0, totalTokens: 0 } }, responseTime, {
104
+ ...metadata,
105
+ streamingMode: true,
106
+ analyticsError: true,
107
+ });
108
+ }
109
+ }
110
+ /**
111
+ * Clean up resources and force garbage collection if needed
112
+ */
113
+ cleanup() {
114
+ // Only force garbage collection if memory usage exceeds 500 MB
115
+ const heapUsed = process.memoryUsage().heapUsed;
116
+ const GC_THRESHOLD = 500 * 1024 * 1024; // 500 MB
117
+ if (typeof global !== "undefined" && global.gc && heapUsed > GC_THRESHOLD) {
118
+ global.gc();
119
+ }
120
+ }
121
+ }
122
+ /**
123
+ * Global instance of stream analytics collector
124
+ */
125
+ export const streamAnalyticsCollector = new BaseStreamAnalyticsCollector();
@@ -48,7 +48,9 @@ export class ProviderRegistry {
48
48
  ProviderFactory.registerProvider(AIProviderName.AZURE, async (modelName) => {
49
49
  const { AzureOpenAIProvider } = await import("../providers/azureOpenai.js");
50
50
  return new AzureOpenAIProvider(modelName);
51
- }, "gpt-4o-mini", ["azure", "azureOpenai"]);
51
+ }, process.env.AZURE_MODEL ||
52
+ process.env.AZURE_OPENAI_DEPLOYMENT_ID ||
53
+ "gpt-4o-mini", ["azure", "azureOpenai"]);
52
54
  // Register Google Vertex AI provider
53
55
  ProviderFactory.registerProvider(AIProviderName.VERTEX, async (modelName) => {
54
56
  const { GoogleVertexProvider } = await import("../providers/googleVertex.js");
@@ -50,52 +50,39 @@ export function createAnalytics(provider, model, result, responseTime, context)
50
50
  * Extract token usage from various AI result formats
51
51
  */
52
52
  function extractTokenUsage(result) {
53
- // Handle different response formats
53
+ // Use properly typed usage object from BaseProvider or direct AI SDK
54
54
  if (result.usage &&
55
55
  typeof result.usage === "object" &&
56
56
  result.usage !== null) {
57
57
  const usage = result.usage;
58
- // Standard format
59
- if (typeof usage.promptTokens === "number" &&
60
- typeof usage.completionTokens === "number") {
61
- return {
62
- input: usage.promptTokens || 0,
63
- output: usage.completionTokens || 0,
64
- total: typeof usage.totalTokens === "number"
65
- ? usage.totalTokens
66
- : usage.promptTokens + usage.completionTokens,
67
- };
58
+ // Try BaseProvider normalized format first (inputTokens/outputTokens)
59
+ if (typeof usage.inputTokens === "number" ||
60
+ typeof usage.outputTokens === "number") {
61
+ const input = typeof usage.inputTokens === "number" ? usage.inputTokens : 0;
62
+ const output = typeof usage.outputTokens === "number" ? usage.outputTokens : 0;
63
+ const total = typeof usage.totalTokens === "number"
64
+ ? usage.totalTokens
65
+ : input + output;
66
+ return { input, output, total };
68
67
  }
69
- // Alternative formats
70
- if (typeof usage.input_tokens === "number" &&
71
- typeof usage.output_tokens === "number") {
72
- return {
73
- input: usage.input_tokens || 0,
74
- output: usage.output_tokens || 0,
75
- total: typeof usage.total_tokens === "number"
76
- ? usage.total_tokens
77
- : usage.input_tokens + usage.output_tokens,
78
- };
68
+ // Try OpenAI/Mistral format (promptTokens/completionTokens)
69
+ if (typeof usage.promptTokens === "number" ||
70
+ typeof usage.completionTokens === "number") {
71
+ const input = typeof usage.promptTokens === "number" ? usage.promptTokens : 0;
72
+ const output = typeof usage.completionTokens === "number" ? usage.completionTokens : 0;
73
+ const total = typeof usage.totalTokens === "number"
74
+ ? usage.totalTokens
75
+ : input + output;
76
+ return { input, output, total };
79
77
  }
80
- // Generic tokens field
81
- if (typeof usage.tokens === "number") {
82
- return {
83
- input: 0,
84
- output: 0,
85
- total: usage.tokens,
86
- };
78
+ // Handle total-only case
79
+ if (typeof usage.totalTokens === "number") {
80
+ return { input: 0, output: 0, total: usage.totalTokens };
87
81
  }
88
82
  }
89
- // Fallback: estimate from text length
90
- const textLength = (typeof result.text === "string" ? result.text.length : 0) ||
91
- (typeof result.content === "string" ? result.content.length : 0) ||
92
- 0;
93
- const estimatedTokens = Math.ceil(textLength / 4); // ~4 chars per token
94
- return {
95
- input: 0,
96
- output: estimatedTokens,
97
- total: estimatedTokens,
98
- };
83
+ // Fallback for edge cases
84
+ logger.debug("Token extraction failed: unknown usage format", { result });
85
+ return { input: 0, output: 0, total: 0 };
99
86
  }
100
87
  /**
101
88
  * Estimate cost based on provider, model, and token usage
@@ -447,5 +447,13 @@ export declare abstract class BaseProvider implements AIProvider {
447
447
  * Get timeout value in milliseconds
448
448
  */
449
449
  getTimeout(options: TextGenerationOptions | StreamOptions): number;
450
+ /**
451
+ * Utility method to chunk large prompts into smaller pieces
452
+ * @param prompt The prompt to chunk
453
+ * @param maxChunkSize Maximum size per chunk (default: 900,000 characters)
454
+ * @param overlap Overlap between chunks to maintain context (default: 100 characters)
455
+ * @returns Array of prompt chunks
456
+ */
457
+ static chunkPrompt(prompt: string, maxChunkSize?: number, overlap?: number): string[];
450
458
  }
451
459
  export {};