@juspay/neurolink 1.5.2 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/CHANGELOG.md +87 -0
  2. package/README.md +17 -7
  3. package/dist/cli/commands/config.d.ts +70 -3
  4. package/dist/cli/commands/config.js +75 -3
  5. package/dist/cli/commands/ollama.d.ts +8 -0
  6. package/dist/cli/commands/ollama.js +323 -0
  7. package/dist/cli/index.js +13 -15
  8. package/dist/core/factory.js +17 -2
  9. package/dist/core/types.d.ts +4 -1
  10. package/dist/core/types.js +3 -0
  11. package/dist/lib/core/factory.js +17 -2
  12. package/dist/lib/core/types.d.ts +4 -1
  13. package/dist/lib/core/types.js +3 -0
  14. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +4 -4
  15. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +13 -9
  16. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +250 -152
  17. package/dist/lib/neurolink.d.ts +2 -2
  18. package/dist/lib/neurolink.js +18 -8
  19. package/dist/lib/providers/huggingFace.d.ts +31 -0
  20. package/dist/lib/providers/huggingFace.js +355 -0
  21. package/dist/lib/providers/index.d.ts +6 -0
  22. package/dist/lib/providers/index.js +7 -1
  23. package/dist/lib/providers/mistralAI.d.ts +32 -0
  24. package/dist/lib/providers/mistralAI.js +217 -0
  25. package/dist/lib/providers/ollama.d.ts +51 -0
  26. package/dist/lib/providers/ollama.js +493 -0
  27. package/dist/lib/utils/providerUtils.js +17 -2
  28. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +4 -4
  29. package/dist/mcp/servers/ai-providers/ai-core-server.js +13 -9
  30. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +248 -152
  31. package/dist/neurolink.d.ts +2 -2
  32. package/dist/neurolink.js +18 -8
  33. package/dist/providers/huggingFace.d.ts +31 -0
  34. package/dist/providers/huggingFace.js +355 -0
  35. package/dist/providers/index.d.ts +6 -0
  36. package/dist/providers/index.js +7 -1
  37. package/dist/providers/mistralAI.d.ts +32 -0
  38. package/dist/providers/mistralAI.js +217 -0
  39. package/dist/providers/ollama.d.ts +51 -0
  40. package/dist/providers/ollama.js +493 -0
  41. package/dist/utils/providerUtils.js +17 -2
  42. package/package.json +161 -151
package/CHANGELOG.md CHANGED
@@ -1,5 +1,92 @@
1
1
  # @juspay/neurolink
2
2
 
3
+ ## 1.6.0
4
+
5
+ ### Major Changes
6
+
7
+ - **🎉 Universal AI Provider Support**: Expanded from 6 to 9 AI providers with support for open source models, local AI, and European compliance
8
+ - **🆕 Hugging Face Provider**: Access to 100,000+ open source models with community-driven AI ecosystem
9
+ - **🆕 Ollama Provider**: 100% local AI execution with complete data privacy and no internet required
10
+ - **🆕 Mistral AI Provider**: European GDPR-compliant AI with competitive pricing and multilingual models
11
+
12
+ ### Features
13
+
14
+ - **🛠️ Enhanced CLI with Ollama Commands**: New Ollama-specific management commands
15
+ - `neurolink ollama list-models` - List installed local models
16
+ - `neurolink ollama pull <model>` - Download models locally
17
+ - `neurolink ollama remove <model>` - Remove installed models
18
+ - `neurolink ollama status` - Check Ollama service health
19
+ - `neurolink ollama start/stop` - Manage Ollama service
20
+ - `neurolink ollama setup` - Interactive setup wizard
21
+
22
+ - **📚 Comprehensive Documentation**: Complete documentation for all new providers
23
+ - **OLLAMA-SETUP.md**: Platform-specific installation guides
24
+ - **PROVIDER-COMPARISON.md**: Detailed provider comparison matrix
25
+ - Updated all documentation to reflect 9 providers
26
+ - Enhanced provider configuration guides
27
+
28
+ ### Technical Implementation
29
+
30
+ - **Provider Files**: `huggingFace.ts`, `ollama.ts`, `mistralAI.ts`
31
+ - **Dependencies**: Added `@huggingface/inference`, `@ai-sdk/mistral`, `inquirer`
32
+ - **MCP Integration**: All 10 MCP tools support new providers
33
+ - **Demo Updates**: Enhanced demo to showcase all 9 providers
34
+ - **CLI Enhancement**: Ollama command structure with 7 subcommands
35
+ - **Provider Priority**: Updated auto-selection to include new providers
36
+
37
+ ### Provider Comparison
38
+
39
+ | Provider | Best For | Setup Time | Privacy | Cost |
40
+ |----------|----------|------------|---------|------|
41
+ | OpenAI | General use | 2 min | Cloud | $$$ |
42
+ | Ollama | Privacy | 5 min | Local | Free |
43
+ | Hugging Face | Open source | 2 min | Cloud | Free/$$ |
44
+ | Mistral | EU compliance | 2 min | Cloud | $$ |
45
+
46
+ ### Bug Fixes
47
+
48
+ - **🔧 Local Provider Fallback**: Implemented no-fallback policy for Ollama
49
+ - When explicitly requesting `--provider ollama`, no cloud fallback occurs
50
+ - Preserves user privacy intent when using local providers
51
+ - Auto-selection still maintains intelligent fallback
52
+
53
+ ### Breaking Changes
54
+
55
+ - None - 100% backward compatibility maintained
56
+
57
+ ## 1.5.3
58
+
59
+ ### Patch Changes
60
+
61
+ - **🔧 CLI Debug Log Persistence Fix**: Fixed unwanted debug logs appearing in production deployments
62
+ - **Issue**: CLI showed debug logs even when `--debug` flag was not provided, cluttering production output
63
+ - **Root Cause**: CLI middleware had logical gap where `NEUROLINK_DEBUG` wasn't explicitly set to `'false'` when no debug flag provided, allowing inherited environment variables to persist
64
+ - **Solution**: Updated middleware to always set `NEUROLINK_DEBUG = 'false'` when debug mode not enabled
65
+ - **Impact**: **Deterministic logging behavior** - debug logs only appear when explicitly requested with `--debug` flag
66
+
67
+ ### Technical Changes
68
+
69
+ - **Clean Production Output**: No debug logs in deployed CLI unless `--debug` flag explicitly provided
70
+ - **Deterministic Behavior**: Logging controlled by CLI flags, not inherited environment variables
71
+ - **Backward Compatible**: Debug mode still works perfectly when `--debug` flag is used
72
+ - **Environment Independence**: CLI output no longer affected by external `NEUROLINK_DEBUG` settings
73
+
74
+ ### CLI Behavior Fix
75
+
76
+ ```bash
77
+ # Before Fix (Problematic)
78
+ neurolink generate-text "test"
79
+ # Could show debug logs if NEUROLINK_DEBUG was set in environment
80
+
81
+ # After Fix (Clean)
82
+ neurolink generate-text "test"
83
+ # Output: ⠋ 🤖 Generating text... ✔ ✅ Text generated successfully! [content]
84
+
85
+ # Debug still works when requested
86
+ neurolink generate-text "test" --debug
87
+ # Output: [debug logs] + spinner + success + content
88
+ ```
89
+
3
90
  ## 1.5.2
4
91
 
5
92
  ### Patch Changes
package/README.md CHANGED
@@ -1,12 +1,15 @@
1
1
  # 🧠 NeuroLink
2
2
 
3
- [![npm version](https://badge.fury.io/js/%40juspay%2Fneurolink.svg)](https://badge.fury.io/js/%40juspay%2Fneurolink)
4
- [![TypeScript](https://img.shields.io/badge/%3C%2F%3E-TypeScript-%230074c1.svg)](http://www.typescriptlang.org/)
5
- [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
3
+ [![NPM Version](https://img.shields.io/npm/v/@juspay/neurolink)](https://www.npmjs.com/package/@juspay/neurolink)
4
+ [![Downloads](https://img.shields.io/npm/dm/@juspay/neurolink)](https://www.npmjs.com/package/@juspay/neurolink)
5
+ [![GitHub Stars](https://img.shields.io/github/stars/juspay/neurolink)](https://github.com/juspay/neurolink/stargazers)
6
+ [![License](https://img.shields.io/npm/l/@juspay/neurolink)](https://github.com/juspay/neurolink/blob/main/LICENSE)
7
+ [![TypeScript](https://img.shields.io/badge/TypeScript-Ready-blue)](https://www.typescriptlang.org/)
8
+ [![CI](https://github.com/juspay/neurolink/workflows/CI/badge.svg)](https://github.com/juspay/neurolink/actions)
6
9
 
7
- > Universal AI toolkit with 5+ provider support, automatic fallback, and both CLI + SDK interfaces. Production-ready with TypeScript support.
10
+ > Universal AI toolkit with 9 major AI providers, automatic fallback, and both CLI + SDK interfaces. Production-ready with TypeScript support.
8
11
 
9
- **NeuroLink** unifies OpenAI, Bedrock, Vertex AI, Google AI Studio, Anthropic, and Azure OpenAI with intelligent fallback and streaming support. Available as both a **programmatic SDK** and **professional CLI tool**. Extracted from production use at Juspay.
12
+ **NeuroLink** unifies OpenAI, Bedrock, Vertex AI, Google AI Studio, Anthropic, Azure OpenAI, Hugging Face, Ollama, and Mistral AI with intelligent fallback and streaming support. Available as both a **programmatic SDK** and **professional CLI tool**. Extracted from production use at Juspay.
10
13
 
11
14
  ## 🚀 Quick Start
12
15
 
@@ -57,12 +60,15 @@ npx @juspay/neurolink status
57
60
 
58
61
  ## ✨ Key Features
59
62
 
60
- - 🔄 **5+ AI Providers** - OpenAI, Bedrock, Vertex AI, Google AI Studio, Anthropic, Azure
63
+ - 🔄 **9 AI Providers** - OpenAI, Bedrock, Vertex AI, Google AI Studio, Anthropic, Azure, Hugging Face, Ollama, Mistral AI
61
64
  - ⚡ **Automatic Fallback** - Never fail when providers are down
62
65
  - 🖥️ **CLI + SDK** - Use from command line or integrate programmatically
63
66
  - 🛡️ **Production Ready** - TypeScript, error handling, extracted from production
64
67
  - 🔧 **MCP Integration** - Model Context Protocol support for extensibility
65
68
  - 🤖 **AI Analysis Tools** - Built-in optimization and workflow assistance
69
+ - 🏠 **Local AI Support** - Run completely offline with Ollama
70
+ - 🌍 **Open Source Models** - Access 100,000+ models via Hugging Face
71
+ - 🇪🇺 **GDPR Compliance** - European data processing with Mistral AI
66
72
 
67
73
  ## 💻 Essential Examples
68
74
 
@@ -113,9 +119,10 @@ cd neurolink-demo && node server.js
113
119
  # Visit http://localhost:9876 for live demo
114
120
  ```
115
121
 
116
- - **Real AI Integration**: All 5 providers functional with live generation
122
+ - **Real AI Integration**: All 9 providers functional with live generation
117
123
  - **Complete Use Cases**: Business, creative, and developer scenarios
118
124
  - **Performance Metrics**: Live provider analytics and response times
125
+ - **Privacy Options**: Test local AI with Ollama
119
126
 
120
127
  ### 🖥️ CLI Demonstrations
121
128
  - **[CLI Help & Commands](./docs/visual-content/cli-videos/cli-01-cli-help.mp4)** - Complete command reference
@@ -157,6 +164,9 @@ cd neurolink-demo && node server.js
157
164
  | **Google Vertex AI** | Gemini 2.5 Flash | Service Account | ❌ |
158
165
  | **Anthropic** | Claude 3.5 Sonnet | API Key | ❌ |
159
166
  | **Azure OpenAI** | GPT-4, GPT-3.5 | API Key + Endpoint | ❌ |
167
+ | **Hugging Face** 🆕 | 100,000+ models | API Key | ✅ |
168
+ | **Ollama** 🆕 | Llama 2, Code Llama, Mistral | None (Local) | ✅ |
169
+ | **Mistral AI** 🆕 | Tiny, Small, Medium, Large | API Key | ❌ |
160
170
 
161
171
  **✨ Auto-Selection**: NeuroLink automatically chooses the best available provider based on speed, reliability, and configuration.
162
172
 
@@ -7,7 +7,7 @@
7
7
  */
8
8
  import { z } from 'zod';
9
9
  declare const ConfigSchema: z.ZodObject<{
10
- defaultProvider: z.ZodDefault<z.ZodEnum<["auto", "openai", "bedrock", "vertex", "anthropic", "azure", "google-ai", "huggingface"]>>;
10
+ defaultProvider: z.ZodDefault<z.ZodEnum<["auto", "openai", "bedrock", "vertex", "anthropic", "azure", "google-ai", "huggingface", "ollama", "mistral"]>>;
11
11
  providers: z.ZodDefault<z.ZodObject<{
12
12
  openai: z.ZodOptional<z.ZodObject<{
13
13
  apiKey: z.ZodOptional<z.ZodString>;
@@ -112,6 +112,29 @@ declare const ConfigSchema: z.ZodObject<{
112
112
  model?: string | undefined;
113
113
  apiKey?: string | undefined;
114
114
  }>>;
115
+ ollama: z.ZodOptional<z.ZodObject<{
116
+ baseUrl: z.ZodDefault<z.ZodString>;
117
+ model: z.ZodDefault<z.ZodString>;
118
+ timeout: z.ZodDefault<z.ZodNumber>;
119
+ }, "strip", z.ZodTypeAny, {
120
+ model: string;
121
+ timeout: number;
122
+ baseUrl: string;
123
+ }, {
124
+ model?: string | undefined;
125
+ timeout?: number | undefined;
126
+ baseUrl?: string | undefined;
127
+ }>>;
128
+ mistral: z.ZodOptional<z.ZodObject<{
129
+ apiKey: z.ZodOptional<z.ZodString>;
130
+ model: z.ZodDefault<z.ZodString>;
131
+ }, "strip", z.ZodTypeAny, {
132
+ model: string;
133
+ apiKey?: string | undefined;
134
+ }, {
135
+ model?: string | undefined;
136
+ apiKey?: string | undefined;
137
+ }>>;
115
138
  }, "strip", z.ZodTypeAny, {
116
139
  bedrock?: {
117
140
  model: string;
@@ -152,6 +175,15 @@ declare const ConfigSchema: z.ZodObject<{
152
175
  model: string;
153
176
  apiKey?: string | undefined;
154
177
  } | undefined;
178
+ ollama?: {
179
+ model: string;
180
+ timeout: number;
181
+ baseUrl: string;
182
+ } | undefined;
183
+ mistral?: {
184
+ model: string;
185
+ apiKey?: string | undefined;
186
+ } | undefined;
155
187
  }, {
156
188
  bedrock?: {
157
189
  model?: string | undefined;
@@ -192,6 +224,15 @@ declare const ConfigSchema: z.ZodObject<{
192
224
  model?: string | undefined;
193
225
  apiKey?: string | undefined;
194
226
  } | undefined;
227
+ ollama?: {
228
+ model?: string | undefined;
229
+ timeout?: number | undefined;
230
+ baseUrl?: string | undefined;
231
+ } | undefined;
232
+ mistral?: {
233
+ model?: string | undefined;
234
+ apiKey?: string | undefined;
235
+ } | undefined;
195
236
  }>>;
196
237
  profiles: z.ZodDefault<z.ZodRecord<z.ZodString, z.ZodAny>>;
197
238
  preferences: z.ZodDefault<z.ZodObject<{
@@ -217,7 +258,7 @@ declare const ConfigSchema: z.ZodObject<{
217
258
  cacheStrategy?: "file" | "memory" | "redis" | undefined;
218
259
  }>>;
219
260
  }, "strip", z.ZodTypeAny, {
220
- defaultProvider: "bedrock" | "openai" | "vertex" | "anthropic" | "azure" | "google-ai" | "auto" | "huggingface";
261
+ defaultProvider: "bedrock" | "openai" | "vertex" | "anthropic" | "azure" | "google-ai" | "huggingface" | "ollama" | "mistral" | "auto";
221
262
  providers: {
222
263
  bedrock?: {
223
264
  model: string;
@@ -258,6 +299,15 @@ declare const ConfigSchema: z.ZodObject<{
258
299
  model: string;
259
300
  apiKey?: string | undefined;
260
301
  } | undefined;
302
+ ollama?: {
303
+ model: string;
304
+ timeout: number;
305
+ baseUrl: string;
306
+ } | undefined;
307
+ mistral?: {
308
+ model: string;
309
+ apiKey?: string | undefined;
310
+ } | undefined;
261
311
  };
262
312
  profiles: Record<string, any>;
263
313
  preferences: {
@@ -269,7 +319,7 @@ declare const ConfigSchema: z.ZodObject<{
269
319
  cacheStrategy: "file" | "memory" | "redis";
270
320
  };
271
321
  }, {
272
- defaultProvider?: "bedrock" | "openai" | "vertex" | "anthropic" | "azure" | "google-ai" | "auto" | "huggingface" | undefined;
322
+ defaultProvider?: "bedrock" | "openai" | "vertex" | "anthropic" | "azure" | "google-ai" | "huggingface" | "ollama" | "mistral" | "auto" | undefined;
273
323
  providers?: {
274
324
  bedrock?: {
275
325
  model?: string | undefined;
@@ -310,6 +360,15 @@ declare const ConfigSchema: z.ZodObject<{
310
360
  model?: string | undefined;
311
361
  apiKey?: string | undefined;
312
362
  } | undefined;
363
+ ollama?: {
364
+ model?: string | undefined;
365
+ timeout?: number | undefined;
366
+ baseUrl?: string | undefined;
367
+ } | undefined;
368
+ mistral?: {
369
+ model?: string | undefined;
370
+ apiKey?: string | undefined;
371
+ } | undefined;
313
372
  } | undefined;
314
373
  profiles?: Record<string, any> | undefined;
315
374
  preferences?: {
@@ -375,6 +434,14 @@ export declare class ConfigManager {
375
434
  * Hugging Face provider setup
376
435
  */
377
436
  private setupHuggingFace;
437
+ /**
438
+ * Ollama provider setup
439
+ */
440
+ private setupOllama;
441
+ /**
442
+ * Mistral AI provider setup
443
+ */
444
+ private setupMistral;
378
445
  /**
379
446
  * Get current configuration
380
447
  */
@@ -13,7 +13,7 @@ import chalk from 'chalk';
13
13
  import { z } from 'zod';
14
14
  // Configuration schema for validation
15
15
  const ConfigSchema = z.object({
16
- defaultProvider: z.enum(['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai', 'huggingface']).default('auto'),
16
+ defaultProvider: z.enum(['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai', 'huggingface', 'ollama', 'mistral']).default('auto'),
17
17
  providers: z.object({
18
18
  openai: z.object({
19
19
  apiKey: z.string().optional(),
@@ -53,6 +53,15 @@ const ConfigSchema = z.object({
53
53
  huggingface: z.object({
54
54
  apiKey: z.string().optional(),
55
55
  model: z.string().default('microsoft/DialoGPT-large')
56
+ }).optional(),
57
+ ollama: z.object({
58
+ baseUrl: z.string().default('http://localhost:11434'),
59
+ model: z.string().default('llama2'),
60
+ timeout: z.number().default(60000)
61
+ }).optional(),
62
+ mistral: z.object({
63
+ apiKey: z.string().optional(),
64
+ model: z.string().default('mistral-small')
56
65
  }).optional()
57
66
  }).default({}),
58
67
  profiles: z.record(z.string(), z.any()).default({}),
@@ -128,7 +137,8 @@ export class ConfigManager {
128
137
  { name: 'Anthropic - Claude models (direct)', value: 'anthropic' },
129
138
  { name: 'Azure OpenAI - Enterprise GPT', value: 'azure' },
130
139
  { name: 'Google AI Studio - Gemini models (direct)', value: 'google-ai' },
131
- { name: 'Hugging Face - Open source models', value: 'huggingface' }
140
+ { name: 'Hugging Face - Open source models', value: 'huggingface' },
141
+ { name: 'Mistral AI - European AI with competitive pricing', value: 'mistral' }
132
142
  ],
133
143
  default: this.config.defaultProvider
134
144
  },
@@ -190,7 +200,9 @@ export class ConfigManager {
190
200
  { name: 'Anthropic Direct (Claude)', value: 'anthropic' },
191
201
  { name: 'Azure OpenAI (Enterprise)', value: 'azure' },
192
202
  { name: 'Google AI Studio (Gemini Direct)', value: 'google-ai' },
193
- { name: 'Hugging Face (Open Source)', value: 'huggingface' }
203
+ { name: 'Hugging Face (Open Source)', value: 'huggingface' },
204
+ { name: 'Ollama (Local AI Models)', value: 'ollama' },
205
+ { name: 'Mistral AI (European AI)', value: 'mistral' }
194
206
  ]
195
207
  }
196
208
  ]);
@@ -225,6 +237,12 @@ export class ConfigManager {
225
237
  case 'huggingface':
226
238
  await this.setupHuggingFace();
227
239
  break;
240
+ case 'ollama':
241
+ await this.setupOllama();
242
+ break;
243
+ case 'mistral':
244
+ await this.setupMistral();
245
+ break;
228
246
  }
229
247
  }
230
248
  /**
@@ -496,6 +514,60 @@ export class ConfigManager {
496
514
  ]);
497
515
  this.config.providers.huggingface = answers;
498
516
  }
517
+ /**
518
+ * Ollama provider setup
519
+ */
520
+ async setupOllama() {
521
+ const answers = await inquirer.prompt([
522
+ {
523
+ type: 'input',
524
+ name: 'baseUrl',
525
+ message: 'Ollama base URL:',
526
+ default: 'http://localhost:11434',
527
+ validate: (value) => value.startsWith('http') || 'URL should start with http:// or https://'
528
+ },
529
+ {
530
+ type: 'input',
531
+ name: 'model',
532
+ message: 'Default model:',
533
+ default: 'llama2'
534
+ },
535
+ {
536
+ type: 'number',
537
+ name: 'timeout',
538
+ message: 'Request timeout (milliseconds):',
539
+ default: 60000,
540
+ validate: (value) => value > 0 || 'Timeout must be positive'
541
+ }
542
+ ]);
543
+ this.config.providers.ollama = answers;
544
+ }
545
+ /**
546
+ * Mistral AI provider setup
547
+ */
548
+ async setupMistral() {
549
+ const answers = await inquirer.prompt([
550
+ {
551
+ type: 'password',
552
+ name: 'apiKey',
553
+ message: 'Mistral AI API Key:',
554
+ validate: (value) => value.length > 0 || 'API key is required'
555
+ },
556
+ {
557
+ type: 'list',
558
+ name: 'model',
559
+ message: 'Default model:',
560
+ choices: [
561
+ 'mistral-small',
562
+ 'mistral-medium',
563
+ 'mistral-large',
564
+ 'mistral-tiny'
565
+ ],
566
+ default: 'mistral-small'
567
+ }
568
+ ]);
569
+ this.config.providers.mistral = answers;
570
+ }
499
571
  /**
500
572
  * Get current configuration
501
573
  */
@@ -0,0 +1,8 @@
1
+ import type { Argv } from 'yargs';
2
+ export declare const ollamaCommand: {
3
+ command: string;
4
+ describe: string;
5
+ builder: (yargs: Argv) => Argv<{}>;
6
+ handler: () => void;
7
+ };
8
+ export default ollamaCommand;