@juspay/neurolink 5.1.0 → 5.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (166) hide show
  1. package/CHANGELOG.md +15 -9
  2. package/README.md +123 -126
  3. package/dist/agent/direct-tools.d.ts +6 -6
  4. package/dist/cli/commands/config.d.ts +3 -3
  5. package/dist/cli/commands/mcp.js +8 -7
  6. package/dist/cli/factories/command-factory.d.ts +4 -0
  7. package/dist/cli/factories/command-factory.js +57 -3
  8. package/dist/cli/index.js +87 -140
  9. package/dist/core/base-provider.d.ts +423 -0
  10. package/dist/core/base-provider.js +365 -0
  11. package/dist/core/constants.d.ts +1 -1
  12. package/dist/core/constants.js +1 -1
  13. package/dist/core/dynamic-models.d.ts +6 -6
  14. package/dist/core/evaluation.d.ts +19 -80
  15. package/dist/core/evaluation.js +185 -484
  16. package/dist/core/factory.d.ts +3 -3
  17. package/dist/core/factory.js +31 -91
  18. package/dist/core/service-registry.d.ts +47 -0
  19. package/dist/core/service-registry.js +112 -0
  20. package/dist/core/types.d.ts +8 -1
  21. package/dist/factories/compatibility-factory.js +1 -1
  22. package/dist/factories/provider-factory.d.ts +72 -0
  23. package/dist/factories/provider-factory.js +144 -0
  24. package/dist/factories/provider-registry.d.ts +38 -0
  25. package/dist/factories/provider-registry.js +107 -0
  26. package/dist/index.d.ts +4 -3
  27. package/dist/index.js +2 -4
  28. package/dist/lib/agent/direct-tools.d.ts +6 -6
  29. package/dist/lib/core/base-provider.d.ts +423 -0
  30. package/dist/lib/core/base-provider.js +365 -0
  31. package/dist/lib/core/constants.d.ts +1 -1
  32. package/dist/lib/core/constants.js +1 -1
  33. package/dist/lib/core/dynamic-models.d.ts +6 -6
  34. package/dist/lib/core/evaluation.d.ts +19 -80
  35. package/dist/lib/core/evaluation.js +185 -484
  36. package/dist/lib/core/factory.d.ts +3 -3
  37. package/dist/lib/core/factory.js +30 -91
  38. package/dist/lib/core/service-registry.d.ts +47 -0
  39. package/dist/lib/core/service-registry.js +112 -0
  40. package/dist/lib/core/types.d.ts +8 -1
  41. package/dist/lib/factories/compatibility-factory.js +1 -1
  42. package/dist/lib/factories/provider-factory.d.ts +72 -0
  43. package/dist/lib/factories/provider-factory.js +144 -0
  44. package/dist/lib/factories/provider-registry.d.ts +38 -0
  45. package/dist/lib/factories/provider-registry.js +107 -0
  46. package/dist/lib/index.d.ts +4 -3
  47. package/dist/lib/index.js +2 -4
  48. package/dist/lib/mcp/config.js +28 -3
  49. package/dist/lib/mcp/function-calling.js +1 -1
  50. package/dist/lib/mcp/initialize-tools.d.ts +1 -1
  51. package/dist/lib/mcp/initialize-tools.js +45 -1
  52. package/dist/lib/mcp/initialize.js +16 -6
  53. package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  54. package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
  55. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +3 -1
  56. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  57. package/dist/lib/mcp/unified-registry.d.ts +4 -0
  58. package/dist/lib/mcp/unified-registry.js +42 -9
  59. package/dist/lib/neurolink.d.ts +156 -117
  60. package/dist/lib/neurolink.js +619 -404
  61. package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
  62. package/dist/lib/providers/amazon-bedrock.js +143 -0
  63. package/dist/lib/providers/analytics-helper.js +7 -4
  64. package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
  65. package/dist/lib/providers/anthropic-baseprovider.js +114 -0
  66. package/dist/lib/providers/anthropic.d.ts +19 -43
  67. package/dist/lib/providers/anthropic.js +82 -306
  68. package/dist/lib/providers/azure-openai.d.ts +20 -0
  69. package/dist/lib/providers/azure-openai.js +89 -0
  70. package/dist/lib/providers/google-ai-studio.d.ts +23 -0
  71. package/dist/lib/providers/google-ai-studio.js +107 -0
  72. package/dist/lib/providers/google-vertex.d.ts +47 -0
  73. package/dist/lib/providers/google-vertex.js +205 -0
  74. package/dist/lib/providers/huggingFace.d.ts +32 -25
  75. package/dist/lib/providers/huggingFace.js +97 -431
  76. package/dist/lib/providers/index.d.ts +9 -9
  77. package/dist/lib/providers/index.js +9 -9
  78. package/dist/lib/providers/mcp-provider.js +4 -0
  79. package/dist/lib/providers/mistral.d.ts +42 -0
  80. package/dist/lib/providers/mistral.js +160 -0
  81. package/dist/lib/providers/ollama.d.ts +52 -36
  82. package/dist/lib/providers/ollama.js +297 -520
  83. package/dist/lib/providers/openAI.d.ts +19 -18
  84. package/dist/lib/providers/openAI.js +76 -275
  85. package/dist/lib/sdk/tool-extension.d.ts +181 -0
  86. package/dist/lib/sdk/tool-extension.js +283 -0
  87. package/dist/lib/sdk/tool-registration.d.ts +95 -0
  88. package/dist/lib/sdk/tool-registration.js +167 -0
  89. package/dist/lib/types/generate-types.d.ts +1 -0
  90. package/dist/lib/types/mcp-types.d.ts +116 -0
  91. package/dist/lib/types/mcp-types.js +5 -0
  92. package/dist/lib/types/stream-types.d.ts +30 -18
  93. package/dist/lib/types/universal-provider-options.d.ts +87 -0
  94. package/dist/lib/types/universal-provider-options.js +53 -0
  95. package/dist/mcp/config.js +28 -3
  96. package/dist/mcp/function-calling.js +1 -1
  97. package/dist/mcp/initialize-tools.d.ts +1 -1
  98. package/dist/mcp/initialize-tools.js +45 -1
  99. package/dist/mcp/initialize.js +16 -6
  100. package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  101. package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
  102. package/dist/mcp/servers/ai-providers/ai-core-server.js +3 -1
  103. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  104. package/dist/mcp/unified-registry.d.ts +4 -0
  105. package/dist/mcp/unified-registry.js +42 -9
  106. package/dist/neurolink.d.ts +156 -117
  107. package/dist/neurolink.js +619 -404
  108. package/dist/providers/amazon-bedrock.d.ts +32 -0
  109. package/dist/providers/amazon-bedrock.js +143 -0
  110. package/dist/providers/analytics-helper.js +7 -4
  111. package/dist/providers/anthropic-baseprovider.d.ts +23 -0
  112. package/dist/providers/anthropic-baseprovider.js +114 -0
  113. package/dist/providers/anthropic.d.ts +19 -43
  114. package/dist/providers/anthropic.js +81 -305
  115. package/dist/providers/azure-openai.d.ts +20 -0
  116. package/dist/providers/azure-openai.js +89 -0
  117. package/dist/providers/google-ai-studio.d.ts +23 -0
  118. package/dist/providers/google-ai-studio.js +108 -0
  119. package/dist/providers/google-vertex.d.ts +47 -0
  120. package/dist/providers/google-vertex.js +205 -0
  121. package/dist/providers/huggingFace.d.ts +32 -25
  122. package/dist/providers/huggingFace.js +96 -430
  123. package/dist/providers/index.d.ts +9 -9
  124. package/dist/providers/index.js +9 -9
  125. package/dist/providers/mcp-provider.js +4 -0
  126. package/dist/providers/mistral.d.ts +42 -0
  127. package/dist/providers/mistral.js +160 -0
  128. package/dist/providers/ollama.d.ts +52 -36
  129. package/dist/providers/ollama.js +297 -519
  130. package/dist/providers/openAI.d.ts +19 -18
  131. package/dist/providers/openAI.js +76 -276
  132. package/dist/sdk/tool-extension.d.ts +181 -0
  133. package/dist/sdk/tool-extension.js +283 -0
  134. package/dist/sdk/tool-registration.d.ts +95 -0
  135. package/dist/sdk/tool-registration.js +168 -0
  136. package/dist/types/generate-types.d.ts +1 -0
  137. package/dist/types/mcp-types.d.ts +116 -0
  138. package/dist/types/mcp-types.js +5 -0
  139. package/dist/types/stream-types.d.ts +30 -18
  140. package/dist/types/universal-provider-options.d.ts +87 -0
  141. package/dist/types/universal-provider-options.js +53 -0
  142. package/package.json +15 -10
  143. package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -93
  144. package/dist/lib/providers/agent-enhanced-provider.js +0 -605
  145. package/dist/lib/providers/amazonBedrock.d.ts +0 -28
  146. package/dist/lib/providers/amazonBedrock.js +0 -364
  147. package/dist/lib/providers/azureOpenAI.d.ts +0 -42
  148. package/dist/lib/providers/azureOpenAI.js +0 -347
  149. package/dist/lib/providers/googleAIStudio.d.ts +0 -42
  150. package/dist/lib/providers/googleAIStudio.js +0 -364
  151. package/dist/lib/providers/googleVertexAI.d.ts +0 -34
  152. package/dist/lib/providers/googleVertexAI.js +0 -547
  153. package/dist/lib/providers/mistralAI.d.ts +0 -37
  154. package/dist/lib/providers/mistralAI.js +0 -325
  155. package/dist/providers/agent-enhanced-provider.d.ts +0 -93
  156. package/dist/providers/agent-enhanced-provider.js +0 -606
  157. package/dist/providers/amazonBedrock.d.ts +0 -28
  158. package/dist/providers/amazonBedrock.js +0 -364
  159. package/dist/providers/azureOpenAI.d.ts +0 -42
  160. package/dist/providers/azureOpenAI.js +0 -348
  161. package/dist/providers/googleAIStudio.d.ts +0 -42
  162. package/dist/providers/googleAIStudio.js +0 -364
  163. package/dist/providers/googleVertexAI.d.ts +0 -34
  164. package/dist/providers/googleVertexAI.js +0 -547
  165. package/dist/providers/mistralAI.d.ts +0 -37
  166. package/dist/providers/mistralAI.js +0 -325
@@ -0,0 +1,160 @@
1
+ import { createMistral } from "@ai-sdk/mistral";
2
+ import { streamText, Output } from "ai";
3
+ import { BaseProvider } from "../core/base-provider.js";
4
+ import { logger } from "../utils/logger.js";
5
+ import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
6
+ import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
+ // Configuration helpers
8
+ const getMistralApiKey = () => {
9
+ const apiKey = process.env.MISTRAL_API_KEY;
10
+ if (!apiKey) {
11
+ throw new Error(`❌ Mistral AI Provider Configuration Error\n\nMissing required environment variable: MISTRAL_API_KEY\n\n🔧 Step 1: Get Mistral AI API Key\n1. Visit: https://console.mistral.ai/\n2. Sign in or create an account\n3. Go to API Keys section\n4. Create a new API key\n\n🔧 Step 2: Set Environment Variable\nAdd to your .env file:\nMISTRAL_API_KEY=your_api_key_here\n\n🔧 Step 3: Restart Application\nRestart your application to load the new environment variables.`);
12
+ }
13
+ return apiKey;
14
+ };
15
+ const getDefaultMistralModel = () => {
16
+ return process.env.MISTRAL_MODEL || "mistral-small";
17
+ };
18
+ const hasMistralCredentials = () => {
19
+ return !!process.env.MISTRAL_API_KEY;
20
+ };
21
+ /**
22
+ * Mistral AI Provider v2 - BaseProvider Implementation
23
+ *
24
+ * PHASE 3.6: Simple BaseProvider wrap around existing @ai-sdk/mistral implementation
25
+ *
26
+ * Features:
27
+ * - Extends BaseProvider for shared functionality
28
+ * - Uses pre-configured Mistral instance for efficiency
29
+ * - Enhanced error handling with setup guidance
30
+ * - Supports all Mistral models (mistral-small, mistral-medium, mistral-large)
31
+ */
32
+ export class MistralProvider extends BaseProvider {
33
+ mistral;
34
+ model;
35
+ constructor(modelName, sdk) {
36
+ super(modelName, "mistral", sdk);
37
+ // Validate Mistral API credentials
38
+ if (!hasMistralCredentials()) {
39
+ throw new Error(`❌ Mistral AI Provider Configuration Error\n\nMissing Mistral AI API key.\n\n🔧 Required Environment Variable:\nMISTRAL_API_KEY=your_api_key_here\n\n🔧 Get API Key:\n1. Visit: https://console.mistral.ai/\n2. Sign in or create account\n3. Generate API key\n4. Add to .env file\n\n🔧 Restart Application\nRestart your application to load the new environment variables.`);
40
+ }
41
+ // Initialize Mistral provider
42
+ this.mistral = createMistral({
43
+ apiKey: getMistralApiKey(),
44
+ });
45
+ // Pre-initialize model for efficiency
46
+ this.model = this.mistral(this.modelName || getDefaultMistralModel());
47
+ logger.debug("Mistral AI BaseProvider v2 initialized", {
48
+ modelName: this.modelName,
49
+ provider: this.providerName,
50
+ });
51
+ }
52
+ getProviderName() {
53
+ return "mistral";
54
+ }
55
+ getDefaultModel() {
56
+ return getDefaultMistralModel();
57
+ }
58
+ /**
59
+ * Returns the Vercel AI SDK model instance for Mistral
60
+ */
61
+ getAISDKModel() {
62
+ return this.model;
63
+ }
64
+ // executeGenerate removed - BaseProvider handles all generation with tools
65
+ async executeStream(options, analysisSchema) {
66
+ try {
67
+ this.validateStreamOptions(options);
68
+ const result = await streamText({
69
+ model: this.model,
70
+ prompt: options.input.text,
71
+ system: options.systemPrompt,
72
+ maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
73
+ temperature: options.temperature,
74
+ });
75
+ return {
76
+ stream: (async function* () {
77
+ for await (const chunk of result.textStream) {
78
+ yield { content: chunk };
79
+ }
80
+ })(),
81
+ provider: this.providerName,
82
+ model: this.modelName,
83
+ };
84
+ }
85
+ catch (error) {
86
+ throw this.handleProviderError(error);
87
+ }
88
+ }
89
+ handleProviderError(error) {
90
+ if (error.name === "TimeoutError") {
91
+ return new TimeoutError(`Mistral AI request timed out. Consider increasing timeout or using a lighter model.`, this.defaultTimeout);
92
+ }
93
+ if (error.message?.includes("401") ||
94
+ error.message?.includes("Unauthorized")) {
95
+ return new Error(`❌ Mistral AI Authentication Error\n\nYour API key is invalid or expired.\n\n🔧 Steps to Fix:\n1. Check your MISTRAL_API_KEY in .env file\n2. Verify the API key is correct and active\n3. Generate a new API key if needed at https://console.mistral.ai/\n4. Restart your application after updating`);
96
+ }
97
+ if (error.message?.includes("403") ||
98
+ error.message?.includes("Forbidden")) {
99
+ return new Error(`❌ Mistral AI Access Denied\n\nYour account doesn't have permission to access this model.\n\n🔧 Possible Solutions:\n1. Check if your account has access to the model: ${this.modelName}\n2. Try a different model (e.g., 'mistral-small')\n3. Verify your subscription status\n4. Contact Mistral AI support if needed`);
100
+ }
101
+ if (error.message?.includes("429") ||
102
+ error.message?.includes("rate limit")) {
103
+ return new Error(`❌ Mistral AI Rate Limit Exceeded\n\n${error.message}\n\n🔧 Solutions:\n1. Wait a moment before retrying\n2. Reduce request frequency\n3. Check your usage quotas\n4. Consider upgrading your plan`);
104
+ }
105
+ if (error.message?.includes("400") ||
106
+ error.message?.includes("Bad Request")) {
107
+ return new Error(`❌ Mistral AI Invalid Request\n\n${error.message}\n\n🔧 Check:\n1. Input text is properly formatted\n2. Model name is correct: ${this.modelName}\n3. Parameters are within limits\n4. Request format matches API requirements`);
108
+ }
109
+ if (error.message?.includes("404") ||
110
+ error.message?.includes("Not Found")) {
111
+ return new Error(`❌ Mistral AI Model Not Found\n\nModel '${this.modelName}' is not available.\n\n🔧 Available Models:\n- mistral-small (fastest, cost-effective)\n- mistral-medium (balanced performance)\n- mistral-large (highest quality)\n\n🔧 Fix: Update MISTRAL_MODEL environment variable`);
112
+ }
113
+ return new Error(`❌ Mistral AI Provider Error\n\n${error.message || "Unknown error occurred"}\n\n🔧 Troubleshooting:\n1. Check API key and network connectivity\n2. Verify model availability\n3. Review request parameters\n4. Check Mistral AI status page`);
114
+ }
115
+ validateStreamOptions(options) {
116
+ if (!options.input?.text?.trim()) {
117
+ throw new Error("Prompt is required for streaming");
118
+ }
119
+ if (options.maxTokens &&
120
+ (options.maxTokens < 1 || options.maxTokens > 32768)) {
121
+ throw new Error("maxTokens must be between 1 and 32768 for Mistral AI");
122
+ }
123
+ if (options.temperature &&
124
+ (options.temperature < 0 || options.temperature > 1)) {
125
+ throw new Error("temperature must be between 0 and 1");
126
+ }
127
+ }
128
+ /**
129
+ * Check available Mistral models
130
+ * @returns Array of available model names
131
+ */
132
+ getAvailableModels() {
133
+ return [
134
+ "mistral-small",
135
+ "mistral-medium",
136
+ "mistral-large",
137
+ "mistral-7b-instruct",
138
+ "mistral-8x7b-instruct",
139
+ "mistral-8x22b-instruct",
140
+ ];
141
+ }
142
+ /**
143
+ * Get recommended model based on use case
144
+ * @param useCase - The intended use case
145
+ * @returns Recommended model name
146
+ */
147
+ getRecommendedModel(useCase) {
148
+ switch (useCase) {
149
+ case "speed":
150
+ return "mistral-small";
151
+ case "balanced":
152
+ return "mistral-medium";
153
+ case "quality":
154
+ return "mistral-large";
155
+ default:
156
+ return "mistral-small";
157
+ }
158
+ }
159
+ }
160
+ export default MistralProvider;
@@ -1,54 +1,70 @@
1
+ import type { AIProviderName } from "../core/types.js";
2
+ import type { LanguageModelV1 } from "ai";
3
+ import type { StreamOptions, StreamResult } from "../types/stream-types.js";
4
+ import type { ZodType, ZodTypeDef } from "zod";
5
+ import type { Schema } from "ai";
6
+ import { BaseProvider } from "../core/base-provider.js";
1
7
  /**
2
- * Ollama Provider for NeuroLink
8
+ * Ollama Provider v2 - BaseProvider Implementation
3
9
  *
4
- * Local AI model deployment and management using Ollama.
5
- * Provides offline AI capabilities with local model hosting.
10
+ * PHASE 3.7: BaseProvider wrap around existing custom Ollama implementation
6
11
  *
7
12
  * Features:
8
- * - Local model deployment (privacy-first)
9
- * - Model management (download, list, remove)
10
- * - Health checking and service validation
11
- * - Streaming and non-streaming text generation
13
+ * - Extends BaseProvider for shared functionality
14
+ * - Preserves custom OllamaLanguageModel implementation
15
+ * - Local model management and health checking
16
+ * - Enhanced error handling with Ollama-specific guidance
12
17
  */
13
- import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
14
- import type { GenerateResult } from "../types/generate-types.js";
15
- import type { StreamOptions, StreamResult } from "../types/stream-types.js";
16
- import type { ZodType, ZodTypeDef } from "zod";
17
- import type { Schema } from "ai";
18
- export declare class Ollama implements AIProvider {
18
+ export declare class OllamaProvider extends BaseProvider {
19
+ private ollamaModel;
19
20
  private baseUrl;
20
- private modelName;
21
- private defaultTimeout;
21
+ private timeout;
22
22
  constructor(modelName?: string);
23
+ protected getProviderName(): AIProviderName;
24
+ protected getDefaultModel(): string;
23
25
  /**
24
- * Gets the appropriate model instance
25
- * @private
26
- */
27
- private getModel;
28
- /**
29
- * Health check - verify Ollama service is running and accessible
26
+ * Returns the Vercel AI SDK model instance for Ollama
30
27
  */
31
- checkHealth(): Promise<boolean>;
28
+ protected getAISDKModel(): LanguageModelV1;
32
29
  /**
33
- * List available models on the Ollama instance
30
+ * Ollama tool/function calling support is currently disabled due to integration issues.
31
+ *
32
+ * **Current Issues:**
33
+ * 1. The OllamaLanguageModel from @ai-sdk/provider-utils doesn't properly integrate
34
+ * with BaseProvider's tool calling mechanism
35
+ * 2. Ollama models require specific prompt formatting for function calls that differs
36
+ * from the standardized AI SDK format
37
+ * 3. Tool response parsing and execution flow needs custom implementation
38
+ *
39
+ * **What's needed to enable tool support:**
40
+ * - Create a custom OllamaLanguageModel wrapper that handles tool schema formatting
41
+ * - Implement Ollama-specific tool calling prompt templates
42
+ * - Add proper response parsing for Ollama's function call format
43
+ * - Test with models that support function calling (llama3.1, mistral, etc.)
44
+ *
45
+ * **Tracking:**
46
+ * - See BaseProvider tool integration patterns in other providers
47
+ * - Monitor Ollama function calling documentation: https://ollama.com/blog/tool-support
48
+ * - Track AI SDK updates for better Ollama integration
49
+ *
50
+ * @returns false to disable tools by default
34
51
  */
35
- listModels(): Promise<string[]>;
52
+ supportsTools(): boolean;
53
+ protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
54
+ private createOllamaStream;
55
+ protected handleProviderError(error: any): Error;
56
+ private validateStreamOptions;
36
57
  /**
37
- * Check if a specific model is available
58
+ * Check if Ollama service is healthy and accessible
38
59
  */
39
- isModelAvailable(modelName: string): Promise<boolean>;
60
+ private checkOllamaHealth;
40
61
  /**
41
- * Pull/download a model to the local Ollama instance
62
+ * Get available models from Ollama
42
63
  */
43
- pullModel(modelName: string): Promise<void>;
64
+ getAvailableModels(): Promise<string[]>;
44
65
  /**
45
- * PRIMARY METHOD: Stream content using AI (recommended for new code)
46
- * Future-ready for multi-modal capabilities with current text focus
47
- */
48
- stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
49
- /**
50
- * Generate text using Ollama local models
66
+ * Check if a specific model is available
51
67
  */
52
- generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
53
- gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
68
+ isModelAvailable(modelName: string): Promise<boolean>;
54
69
  }
70
+ export default OllamaProvider;