@juspay/neurolink 1.5.2 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +87 -0
- package/README.md +17 -7
- package/dist/cli/commands/config.d.ts +70 -3
- package/dist/cli/commands/config.js +75 -3
- package/dist/cli/commands/ollama.d.ts +8 -0
- package/dist/cli/commands/ollama.js +323 -0
- package/dist/cli/index.js +13 -15
- package/dist/core/factory.js +17 -2
- package/dist/core/types.d.ts +4 -1
- package/dist/core/types.js +3 -0
- package/dist/lib/core/factory.js +17 -2
- package/dist/lib/core/types.d.ts +4 -1
- package/dist/lib/core/types.js +3 -0
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +4 -4
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +13 -9
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +250 -152
- package/dist/lib/neurolink.d.ts +2 -2
- package/dist/lib/neurolink.js +18 -8
- package/dist/lib/providers/huggingFace.d.ts +31 -0
- package/dist/lib/providers/huggingFace.js +355 -0
- package/dist/lib/providers/index.d.ts +6 -0
- package/dist/lib/providers/index.js +7 -1
- package/dist/lib/providers/mistralAI.d.ts +32 -0
- package/dist/lib/providers/mistralAI.js +217 -0
- package/dist/lib/providers/ollama.d.ts +51 -0
- package/dist/lib/providers/ollama.js +493 -0
- package/dist/lib/utils/providerUtils.js +17 -2
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +4 -4
- package/dist/mcp/servers/ai-providers/ai-core-server.js +13 -9
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +248 -152
- package/dist/neurolink.d.ts +2 -2
- package/dist/neurolink.js +18 -8
- package/dist/providers/huggingFace.d.ts +31 -0
- package/dist/providers/huggingFace.js +355 -0
- package/dist/providers/index.d.ts +6 -0
- package/dist/providers/index.js +7 -1
- package/dist/providers/mistralAI.d.ts +32 -0
- package/dist/providers/mistralAI.js +217 -0
- package/dist/providers/ollama.d.ts +51 -0
- package/dist/providers/ollama.js +493 -0
- package/dist/utils/providerUtils.js +17 -2
- package/package.json +161 -151
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
import { execSync } from 'child_process';
|
|
2
|
+
import chalk from 'chalk';
|
|
3
|
+
import ora from 'ora';
|
|
4
|
+
import inquirer from 'inquirer';
|
|
5
|
+
export const ollamaCommand = {
|
|
6
|
+
command: 'ollama <command>',
|
|
7
|
+
describe: 'Manage Ollama local AI models',
|
|
8
|
+
builder: (yargs) => {
|
|
9
|
+
return yargs
|
|
10
|
+
.command('list-models', 'List installed Ollama models', {}, listModelsHandler)
|
|
11
|
+
.command('pull <model>', 'Download an Ollama model', {
|
|
12
|
+
model: {
|
|
13
|
+
describe: 'Model name to download',
|
|
14
|
+
type: 'string',
|
|
15
|
+
demandOption: true
|
|
16
|
+
}
|
|
17
|
+
}, pullModelHandler)
|
|
18
|
+
.command('remove <model>', 'Remove an Ollama model', {
|
|
19
|
+
model: {
|
|
20
|
+
describe: 'Model name to remove',
|
|
21
|
+
type: 'string',
|
|
22
|
+
demandOption: true
|
|
23
|
+
}
|
|
24
|
+
}, removeModelHandler)
|
|
25
|
+
.command('status', 'Check Ollama service status', {}, statusHandler)
|
|
26
|
+
.command('start', 'Start Ollama service', {}, startHandler)
|
|
27
|
+
.command('stop', 'Stop Ollama service', {}, stopHandler)
|
|
28
|
+
.command('setup', 'Interactive Ollama setup', {}, setupHandler)
|
|
29
|
+
.demandCommand(1, 'Please specify a command');
|
|
30
|
+
},
|
|
31
|
+
handler: () => { } // No-op handler as subcommands handle everything
|
|
32
|
+
};
|
|
33
|
+
async function listModelsHandler() {
|
|
34
|
+
const spinner = ora('Fetching installed models...').start();
|
|
35
|
+
try {
|
|
36
|
+
const output = execSync('ollama list', { encoding: 'utf8' });
|
|
37
|
+
spinner.succeed('Installed models:');
|
|
38
|
+
if (output.trim()) {
|
|
39
|
+
console.log(output);
|
|
40
|
+
}
|
|
41
|
+
else {
|
|
42
|
+
console.log(chalk.yellow('No models installed. Use "neurolink ollama pull <model>" to download a model.'));
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
catch (error) {
|
|
46
|
+
spinner.fail('Failed to list models. Is Ollama installed?');
|
|
47
|
+
console.error(chalk.red('Error:', error.message));
|
|
48
|
+
console.log(chalk.blue('\nTip: Install Ollama from https://ollama.ai'));
|
|
49
|
+
process.exit(1);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
async function pullModelHandler(argv) {
|
|
53
|
+
const { model } = argv;
|
|
54
|
+
console.log(chalk.blue(`Downloading model: ${model}`));
|
|
55
|
+
console.log(chalk.gray('This may take several minutes...'));
|
|
56
|
+
try {
|
|
57
|
+
execSync(`ollama pull ${model}`, { stdio: 'inherit' });
|
|
58
|
+
console.log(chalk.green(`\n✅ Successfully downloaded ${model}`));
|
|
59
|
+
console.log(chalk.blue(`\nTest it with: npx @juspay/neurolink generate-text "Hello!" --provider ollama --model ${model}`));
|
|
60
|
+
}
|
|
61
|
+
catch (error) {
|
|
62
|
+
console.error(chalk.red(`\n❌ Failed to download ${model}`));
|
|
63
|
+
console.error(chalk.red('Error:', error.message));
|
|
64
|
+
process.exit(1);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
async function removeModelHandler(argv) {
|
|
68
|
+
const { model } = argv;
|
|
69
|
+
// Confirm removal
|
|
70
|
+
const { confirm } = await inquirer.prompt([
|
|
71
|
+
{
|
|
72
|
+
type: 'confirm',
|
|
73
|
+
name: 'confirm',
|
|
74
|
+
message: `Are you sure you want to remove model "${model}"?`,
|
|
75
|
+
default: false
|
|
76
|
+
}
|
|
77
|
+
]);
|
|
78
|
+
if (!confirm) {
|
|
79
|
+
console.log(chalk.yellow('Removal cancelled.'));
|
|
80
|
+
return;
|
|
81
|
+
}
|
|
82
|
+
const spinner = ora(`Removing model ${model}...`).start();
|
|
83
|
+
try {
|
|
84
|
+
execSync(`ollama rm ${model}`, { encoding: 'utf8' });
|
|
85
|
+
spinner.succeed(`Successfully removed ${model}`);
|
|
86
|
+
}
|
|
87
|
+
catch (error) {
|
|
88
|
+
spinner.fail(`Failed to remove ${model}`);
|
|
89
|
+
console.error(chalk.red('Error:', error.message));
|
|
90
|
+
process.exit(1);
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
async function statusHandler() {
|
|
94
|
+
const spinner = ora('Checking Ollama service status...').start();
|
|
95
|
+
try {
|
|
96
|
+
// Try to run a simple command
|
|
97
|
+
execSync('ollama list', { encoding: 'utf8' });
|
|
98
|
+
spinner.succeed('Ollama service is running');
|
|
99
|
+
// Get additional info
|
|
100
|
+
try {
|
|
101
|
+
const response = execSync('curl -s http://localhost:11434/api/tags', { encoding: 'utf8' });
|
|
102
|
+
const data = JSON.parse(response);
|
|
103
|
+
if (data.models && data.models.length > 0) {
|
|
104
|
+
console.log(chalk.green(`\n${data.models.length} models available`));
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
catch {
|
|
108
|
+
// Curl might not be available, that's ok
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
catch (error) {
|
|
112
|
+
spinner.fail('Ollama service is not running');
|
|
113
|
+
console.log(chalk.yellow('\nStart Ollama with: ollama serve'));
|
|
114
|
+
console.log(chalk.blue('Or restart the Ollama app if using the desktop version'));
|
|
115
|
+
process.exit(1);
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
async function startHandler() {
|
|
119
|
+
console.log(chalk.blue('Starting Ollama service...'));
|
|
120
|
+
try {
|
|
121
|
+
// Check if already running
|
|
122
|
+
try {
|
|
123
|
+
execSync('ollama list', { encoding: 'utf8' });
|
|
124
|
+
console.log(chalk.yellow('Ollama service is already running!'));
|
|
125
|
+
return;
|
|
126
|
+
}
|
|
127
|
+
catch {
|
|
128
|
+
// Not running, continue to start
|
|
129
|
+
}
|
|
130
|
+
// Different approaches for different platforms
|
|
131
|
+
if (process.platform === 'darwin') {
|
|
132
|
+
// macOS
|
|
133
|
+
console.log(chalk.gray('Starting Ollama on macOS...'));
|
|
134
|
+
try {
|
|
135
|
+
execSync('open -a Ollama');
|
|
136
|
+
console.log(chalk.green('✅ Ollama app started'));
|
|
137
|
+
}
|
|
138
|
+
catch {
|
|
139
|
+
// Try service command
|
|
140
|
+
execSync('ollama serve > /dev/null 2>&1 &', { stdio: 'ignore' });
|
|
141
|
+
console.log(chalk.green('✅ Ollama service started'));
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
else if (process.platform === 'linux') {
|
|
145
|
+
// Linux
|
|
146
|
+
console.log(chalk.gray('Starting Ollama service on Linux...'));
|
|
147
|
+
try {
|
|
148
|
+
execSync('systemctl start ollama', { encoding: 'utf8' });
|
|
149
|
+
console.log(chalk.green('✅ Ollama service started'));
|
|
150
|
+
}
|
|
151
|
+
catch {
|
|
152
|
+
// Try direct command
|
|
153
|
+
execSync('ollama serve > /dev/null 2>&1 &', { stdio: 'ignore' });
|
|
154
|
+
console.log(chalk.green('✅ Ollama service started'));
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
else {
|
|
158
|
+
// Windows
|
|
159
|
+
console.log(chalk.gray('Starting Ollama on Windows...'));
|
|
160
|
+
execSync('start ollama serve', { stdio: 'ignore' });
|
|
161
|
+
console.log(chalk.green('✅ Ollama service started'));
|
|
162
|
+
}
|
|
163
|
+
console.log(chalk.blue('\nWait a few seconds for the service to initialize...'));
|
|
164
|
+
}
|
|
165
|
+
catch (error) {
|
|
166
|
+
console.error(chalk.red('Failed to start Ollama service'));
|
|
167
|
+
console.error(chalk.red('Error:', error.message));
|
|
168
|
+
console.log(chalk.blue('\nTry starting Ollama manually or check installation'));
|
|
169
|
+
process.exit(1);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
async function stopHandler() {
|
|
173
|
+
const spinner = ora('Stopping Ollama service...').start();
|
|
174
|
+
try {
|
|
175
|
+
if (process.platform === 'darwin') {
|
|
176
|
+
// macOS
|
|
177
|
+
try {
|
|
178
|
+
execSync('pkill ollama', { encoding: 'utf8' });
|
|
179
|
+
}
|
|
180
|
+
catch {
|
|
181
|
+
execSync('killall Ollama', { encoding: 'utf8' });
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
else if (process.platform === 'linux') {
|
|
185
|
+
// Linux
|
|
186
|
+
try {
|
|
187
|
+
execSync('systemctl stop ollama', { encoding: 'utf8' });
|
|
188
|
+
}
|
|
189
|
+
catch {
|
|
190
|
+
execSync('pkill ollama', { encoding: 'utf8' });
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
else {
|
|
194
|
+
// Windows
|
|
195
|
+
execSync('taskkill /F /IM ollama.exe', { encoding: 'utf8' });
|
|
196
|
+
}
|
|
197
|
+
spinner.succeed('Ollama service stopped');
|
|
198
|
+
}
|
|
199
|
+
catch (error) {
|
|
200
|
+
spinner.fail('Failed to stop Ollama service');
|
|
201
|
+
console.error(chalk.red('It may not be running or requires manual stop'));
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
async function setupHandler() {
|
|
205
|
+
console.log(chalk.blue('🦙 Welcome to Ollama Setup!\n'));
|
|
206
|
+
// Check if Ollama is installed
|
|
207
|
+
const checkSpinner = ora('Checking Ollama installation...').start();
|
|
208
|
+
let isInstalled = false;
|
|
209
|
+
try {
|
|
210
|
+
execSync('ollama --version', { encoding: 'utf8' });
|
|
211
|
+
isInstalled = true;
|
|
212
|
+
checkSpinner.succeed('Ollama is installed');
|
|
213
|
+
}
|
|
214
|
+
catch {
|
|
215
|
+
checkSpinner.fail('Ollama is not installed');
|
|
216
|
+
}
|
|
217
|
+
if (!isInstalled) {
|
|
218
|
+
console.log(chalk.yellow('\nOllama needs to be installed first.'));
|
|
219
|
+
console.log(chalk.blue('\nInstallation instructions:'));
|
|
220
|
+
if (process.platform === 'darwin') {
|
|
221
|
+
console.log('\nFor macOS:');
|
|
222
|
+
console.log(chalk.gray(' brew install ollama'));
|
|
223
|
+
console.log(chalk.gray(' # or download from https://ollama.ai'));
|
|
224
|
+
}
|
|
225
|
+
else if (process.platform === 'linux') {
|
|
226
|
+
console.log('\nFor Linux:');
|
|
227
|
+
console.log(chalk.gray(' curl -fsSL https://ollama.ai/install.sh | sh'));
|
|
228
|
+
}
|
|
229
|
+
else {
|
|
230
|
+
console.log('\nFor Windows:');
|
|
231
|
+
console.log(chalk.gray(' Download from https://ollama.ai'));
|
|
232
|
+
}
|
|
233
|
+
const { proceedAnyway } = await inquirer.prompt([
|
|
234
|
+
{
|
|
235
|
+
type: 'confirm',
|
|
236
|
+
name: 'proceedAnyway',
|
|
237
|
+
message: 'Would you like to continue with setup anyway?',
|
|
238
|
+
default: false
|
|
239
|
+
}
|
|
240
|
+
]);
|
|
241
|
+
if (!proceedAnyway) {
|
|
242
|
+
console.log(chalk.blue('\nInstall Ollama and run setup again!'));
|
|
243
|
+
return;
|
|
244
|
+
}
|
|
245
|
+
}
|
|
246
|
+
// Check if service is running
|
|
247
|
+
let serviceRunning = false;
|
|
248
|
+
try {
|
|
249
|
+
execSync('ollama list', { encoding: 'utf8' });
|
|
250
|
+
serviceRunning = true;
|
|
251
|
+
console.log(chalk.green('\n✅ Ollama service is running'));
|
|
252
|
+
}
|
|
253
|
+
catch {
|
|
254
|
+
console.log(chalk.yellow('\n⚠️ Ollama service is not running'));
|
|
255
|
+
const { startService } = await inquirer.prompt([
|
|
256
|
+
{
|
|
257
|
+
type: 'confirm',
|
|
258
|
+
name: 'startService',
|
|
259
|
+
message: 'Would you like to start the Ollama service?',
|
|
260
|
+
default: true
|
|
261
|
+
}
|
|
262
|
+
]);
|
|
263
|
+
if (startService) {
|
|
264
|
+
await startHandler();
|
|
265
|
+
serviceRunning = true;
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
if (serviceRunning) {
|
|
269
|
+
// List available models
|
|
270
|
+
console.log(chalk.blue('\n📦 Popular Ollama models:'));
|
|
271
|
+
console.log(' • llama2 (7B) - General purpose');
|
|
272
|
+
console.log(' • codellama (7B) - Code generation');
|
|
273
|
+
console.log(' • mistral (7B) - Fast and efficient');
|
|
274
|
+
console.log(' • tinyllama (1B) - Lightweight');
|
|
275
|
+
console.log(' • phi (2.7B) - Microsoft\'s compact model');
|
|
276
|
+
const { downloadModel } = await inquirer.prompt([
|
|
277
|
+
{
|
|
278
|
+
type: 'confirm',
|
|
279
|
+
name: 'downloadModel',
|
|
280
|
+
message: 'Would you like to download a model?',
|
|
281
|
+
default: true
|
|
282
|
+
}
|
|
283
|
+
]);
|
|
284
|
+
if (downloadModel) {
|
|
285
|
+
const { selectedModel } = await inquirer.prompt([
|
|
286
|
+
{
|
|
287
|
+
type: 'list',
|
|
288
|
+
name: 'selectedModel',
|
|
289
|
+
message: 'Select a model to download:',
|
|
290
|
+
choices: [
|
|
291
|
+
{ name: 'llama2 (7B) - Recommended for general use', value: 'llama2' },
|
|
292
|
+
{ name: 'codellama (7B) - Best for code generation', value: 'codellama' },
|
|
293
|
+
{ name: 'mistral (7B) - Fast and efficient', value: 'mistral' },
|
|
294
|
+
{ name: 'tinyllama (1B) - Lightweight, fast', value: 'tinyllama' },
|
|
295
|
+
{ name: 'phi (2.7B) - Microsoft\'s compact model', value: 'phi' },
|
|
296
|
+
{ name: 'Other (enter manually)', value: 'other' }
|
|
297
|
+
]
|
|
298
|
+
}
|
|
299
|
+
]);
|
|
300
|
+
let modelToDownload = selectedModel;
|
|
301
|
+
if (selectedModel === 'other') {
|
|
302
|
+
const { customModel } = await inquirer.prompt([
|
|
303
|
+
{
|
|
304
|
+
type: 'input',
|
|
305
|
+
name: 'customModel',
|
|
306
|
+
message: 'Enter the model name:',
|
|
307
|
+
validate: (input) => input.trim().length > 0 || 'Model name is required'
|
|
308
|
+
}
|
|
309
|
+
]);
|
|
310
|
+
modelToDownload = customModel;
|
|
311
|
+
}
|
|
312
|
+
await pullModelHandler({ model: modelToDownload });
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
// Final instructions
|
|
316
|
+
console.log(chalk.green('\n✅ Setup complete!\n'));
|
|
317
|
+
console.log(chalk.blue('Next steps:'));
|
|
318
|
+
console.log('1. List models: ' + chalk.gray('neurolink ollama list-models'));
|
|
319
|
+
console.log('2. Generate text: ' + chalk.gray('neurolink generate-text "Hello!" --provider ollama'));
|
|
320
|
+
console.log('3. Use specific model: ' + chalk.gray('neurolink generate-text "Hello!" --provider ollama --model codellama'));
|
|
321
|
+
console.log(chalk.gray('\nFor more information, see: https://docs.neurolink.ai/providers/ollama'));
|
|
322
|
+
}
|
|
323
|
+
export default ollamaCommand;
|
package/dist/cli/index.js
CHANGED
|
@@ -7,14 +7,13 @@
|
|
|
7
7
|
* Implementation: ~300 lines using simple JS utility functions
|
|
8
8
|
*/
|
|
9
9
|
import { NeuroLink } from '../lib/neurolink.js';
|
|
10
|
-
import yargs from 'yargs';
|
|
10
|
+
import yargs from 'yargs';
|
|
11
11
|
import { hideBin } from 'yargs/helpers';
|
|
12
12
|
import ora from 'ora';
|
|
13
13
|
import chalk from 'chalk';
|
|
14
14
|
import fs from 'fs';
|
|
15
|
-
import { fileURLToPath } from 'url';
|
|
16
|
-
import { dirname } from 'path';
|
|
17
15
|
import { addMCPCommands } from './commands/mcp.js';
|
|
16
|
+
import ollamaCommand from './commands/ollama.js';
|
|
18
17
|
// Load environment variables from .env file
|
|
19
18
|
try {
|
|
20
19
|
// Try to import and configure dotenv
|
|
@@ -25,9 +24,6 @@ catch (error) {
|
|
|
25
24
|
// dotenv is not available (dev dependency only) - this is fine for production
|
|
26
25
|
// Environment variables should be set externally in production
|
|
27
26
|
}
|
|
28
|
-
// Get current directory for ESM
|
|
29
|
-
const __filename = fileURLToPath(import.meta.url);
|
|
30
|
-
const __dirname = dirname(__filename);
|
|
31
27
|
// Utility Functions (Simple, Zero Maintenance)
|
|
32
28
|
function formatOutput(result, format = 'text') {
|
|
33
29
|
if (format === 'json') {
|
|
@@ -156,8 +152,8 @@ const cli = yargs(args)
|
|
|
156
152
|
if (argv.debug) {
|
|
157
153
|
process.env.NEUROLINK_DEBUG = 'true';
|
|
158
154
|
}
|
|
159
|
-
else
|
|
160
|
-
//
|
|
155
|
+
else {
|
|
156
|
+
// Always set to false when debug is not enabled (including when not provided)
|
|
161
157
|
process.env.NEUROLINK_DEBUG = 'false';
|
|
162
158
|
}
|
|
163
159
|
// Keep existing quiet middleware
|
|
@@ -224,7 +220,7 @@ const cli = yargs(args)
|
|
|
224
220
|
demandOption: true,
|
|
225
221
|
})
|
|
226
222
|
.option('provider', {
|
|
227
|
-
choices: ['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai'],
|
|
223
|
+
choices: ['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai', 'huggingface', 'ollama', 'mistral'],
|
|
228
224
|
default: 'auto',
|
|
229
225
|
description: 'AI provider to use (auto-selects best available)'
|
|
230
226
|
})
|
|
@@ -306,7 +302,7 @@ const cli = yargs(args)
|
|
|
306
302
|
.command('stream <prompt>', 'Stream text generation in real-time', (yargsInstance) => yargsInstance
|
|
307
303
|
.usage('Usage: $0 stream <prompt> [options]')
|
|
308
304
|
.positional('prompt', { type: 'string', description: 'Text prompt for streaming', demandOption: true })
|
|
309
|
-
.option('provider', { choices: ['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai'], default: 'auto', description: 'AI provider to use' })
|
|
305
|
+
.option('provider', { choices: ['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai', 'huggingface', 'ollama', 'mistral'], default: 'auto', description: 'AI provider to use' })
|
|
310
306
|
.option('temperature', { type: 'number', default: 0.7, description: 'Creativity level' })
|
|
311
307
|
.option('debug', { type: 'boolean', default: false, description: 'Enable debug mode with interleaved logging' })
|
|
312
308
|
.example('$0 stream "Tell me a story"', 'Stream a story in real-time'), async (argv) => {
|
|
@@ -342,7 +338,7 @@ const cli = yargs(args)
|
|
|
342
338
|
.positional('file', { type: 'string', description: 'File with prompts (one per line)', demandOption: true })
|
|
343
339
|
.option('output', { type: 'string', description: 'Output file for results (default: stdout)' })
|
|
344
340
|
.option('delay', { type: 'number', default: 1000, description: 'Delay between requests in milliseconds' })
|
|
345
|
-
.option('provider', { choices: ['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai'], default: 'auto', description: 'AI provider to use' })
|
|
341
|
+
.option('provider', { choices: ['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai', 'huggingface', 'ollama', 'mistral'], default: 'auto', description: 'AI provider to use' })
|
|
346
342
|
.option('timeout', { type: 'number', default: 30000, description: 'Timeout for each request in milliseconds' })
|
|
347
343
|
.option('temperature', { type: 'number', description: 'Global temperature for batch jobs' })
|
|
348
344
|
.option('max-tokens', { type: 'number', description: 'Global max tokens for batch jobs' })
|
|
@@ -423,7 +419,7 @@ const cli = yargs(args)
|
|
|
423
419
|
const spinner = argv.quiet ? null : ora('🔍 Checking AI provider status...\n').start();
|
|
424
420
|
// Middleware sets argv.verbose if NEUROLINK_DEBUG is true and --verbose is not specified
|
|
425
421
|
// Removed the spinner.stopAndPersist logic from here as it's handled before spinner start
|
|
426
|
-
const providers = ['openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai'];
|
|
422
|
+
const providers = ['openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai', 'huggingface', 'ollama', 'mistral'];
|
|
427
423
|
const results = [];
|
|
428
424
|
for (const p of providers) {
|
|
429
425
|
if (spinner)
|
|
@@ -457,13 +453,13 @@ const cli = yargs(args)
|
|
|
457
453
|
}
|
|
458
454
|
})
|
|
459
455
|
.command('list', 'List available AI providers', (y) => y.usage('Usage: $0 provider list'), async () => {
|
|
460
|
-
console.log('Available providers: openai, bedrock, vertex, anthropic, azure, google-ai');
|
|
456
|
+
console.log('Available providers: openai, bedrock, vertex, anthropic, azure, google-ai, huggingface, ollama, mistral');
|
|
461
457
|
})
|
|
462
458
|
.command('configure <providerName>', 'Display configuration guidance for a provider', (y) => y
|
|
463
459
|
.usage('Usage: $0 provider configure <providerName>')
|
|
464
460
|
.positional('providerName', {
|
|
465
461
|
type: 'string',
|
|
466
|
-
choices: ['openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai'],
|
|
462
|
+
choices: ['openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai', 'huggingface', 'ollama', 'mistral'],
|
|
467
463
|
description: 'Name of the provider to configure',
|
|
468
464
|
demandOption: true,
|
|
469
465
|
})
|
|
@@ -495,7 +491,7 @@ const cli = yargs(args)
|
|
|
495
491
|
const spinner = argv.quiet ? null : ora('🔍 Checking AI provider status...\n').start();
|
|
496
492
|
// Middleware sets argv.verbose if NEUROLINK_DEBUG is true and --verbose is not specified
|
|
497
493
|
// Removed the spinner.stopAndPersist logic from here as it's handled before spinner start
|
|
498
|
-
const providers = ['openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai'];
|
|
494
|
+
const providers = ['openai', 'bedrock', 'vertex', 'anthropic', 'azure', 'google-ai', 'huggingface', 'ollama', 'mistral'];
|
|
499
495
|
const results = [];
|
|
500
496
|
for (const p of providers) {
|
|
501
497
|
if (spinner)
|
|
@@ -615,6 +611,8 @@ const cli = yargs(args)
|
|
|
615
611
|
.completion('completion', 'Generate shell completion script');
|
|
616
612
|
// Add MCP commands
|
|
617
613
|
addMCPCommands(cli);
|
|
614
|
+
// Add Ollama command
|
|
615
|
+
cli.command(ollamaCommand);
|
|
618
616
|
// Use an async IIFE to allow top-level await for parseAsync
|
|
619
617
|
(async () => {
|
|
620
618
|
try {
|
package/dist/core/factory.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { GoogleVertexAI, AmazonBedrock, OpenAI, AnthropicProvider, AzureOpenAIProvider, GoogleAIStudio } from '../providers/index.js';
|
|
1
|
+
import { GoogleVertexAI, AmazonBedrock, OpenAI, AnthropicProvider, AzureOpenAIProvider, GoogleAIStudio, HuggingFace, Ollama, MistralAI } from '../providers/index.js';
|
|
2
2
|
import { getBestProvider } from '../utils/providerUtils.js';
|
|
3
3
|
import { logger } from '../utils/logger.js';
|
|
4
4
|
const componentIdentifier = 'aiProviderFactory';
|
|
@@ -47,8 +47,23 @@ export class AIProviderFactory {
|
|
|
47
47
|
case 'google-studio':
|
|
48
48
|
provider = new GoogleAIStudio(modelName);
|
|
49
49
|
break;
|
|
50
|
+
case 'huggingface':
|
|
51
|
+
case 'hugging-face':
|
|
52
|
+
case 'hf':
|
|
53
|
+
provider = new HuggingFace(modelName);
|
|
54
|
+
break;
|
|
55
|
+
case 'ollama':
|
|
56
|
+
case 'local':
|
|
57
|
+
case 'local-ollama':
|
|
58
|
+
provider = new Ollama(modelName || undefined);
|
|
59
|
+
break;
|
|
60
|
+
case 'mistral':
|
|
61
|
+
case 'mistral-ai':
|
|
62
|
+
case 'mistralai':
|
|
63
|
+
provider = new MistralAI(modelName);
|
|
64
|
+
break;
|
|
50
65
|
default:
|
|
51
|
-
throw new Error(`Unknown provider: ${providerName}. Supported providers: vertex, bedrock, openai, anthropic, azure, google-ai`);
|
|
66
|
+
throw new Error(`Unknown provider: ${providerName}. Supported providers: vertex, bedrock, openai, anthropic, azure, google-ai, huggingface, ollama, mistral`);
|
|
52
67
|
}
|
|
53
68
|
logger.debug(`[${functionTag}] Provider creation succeeded`, {
|
|
54
69
|
providerName,
|
package/dist/core/types.d.ts
CHANGED
|
@@ -9,7 +9,10 @@ export declare enum AIProviderName {
|
|
|
9
9
|
VERTEX = "vertex",
|
|
10
10
|
ANTHROPIC = "anthropic",
|
|
11
11
|
AZURE = "azure",
|
|
12
|
-
GOOGLE_AI = "google-ai"
|
|
12
|
+
GOOGLE_AI = "google-ai",
|
|
13
|
+
HUGGINGFACE = "huggingface",
|
|
14
|
+
OLLAMA = "ollama",
|
|
15
|
+
MISTRAL = "mistral"
|
|
13
16
|
}
|
|
14
17
|
/**
|
|
15
18
|
* Supported Models for Amazon Bedrock
|
package/dist/core/types.js
CHANGED
|
@@ -9,6 +9,9 @@ export var AIProviderName;
|
|
|
9
9
|
AIProviderName["ANTHROPIC"] = "anthropic";
|
|
10
10
|
AIProviderName["AZURE"] = "azure";
|
|
11
11
|
AIProviderName["GOOGLE_AI"] = "google-ai";
|
|
12
|
+
AIProviderName["HUGGINGFACE"] = "huggingface";
|
|
13
|
+
AIProviderName["OLLAMA"] = "ollama";
|
|
14
|
+
AIProviderName["MISTRAL"] = "mistral";
|
|
12
15
|
})(AIProviderName || (AIProviderName = {}));
|
|
13
16
|
/**
|
|
14
17
|
* Supported Models for Amazon Bedrock
|
package/dist/lib/core/factory.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { GoogleVertexAI, AmazonBedrock, OpenAI, AnthropicProvider, AzureOpenAIProvider, GoogleAIStudio } from '../providers/index.js';
|
|
1
|
+
import { GoogleVertexAI, AmazonBedrock, OpenAI, AnthropicProvider, AzureOpenAIProvider, GoogleAIStudio, HuggingFace, Ollama, MistralAI } from '../providers/index.js';
|
|
2
2
|
import { getBestProvider } from '../utils/providerUtils.js';
|
|
3
3
|
import { logger } from '../utils/logger.js';
|
|
4
4
|
const componentIdentifier = 'aiProviderFactory';
|
|
@@ -47,8 +47,23 @@ export class AIProviderFactory {
|
|
|
47
47
|
case 'google-studio':
|
|
48
48
|
provider = new GoogleAIStudio(modelName);
|
|
49
49
|
break;
|
|
50
|
+
case 'huggingface':
|
|
51
|
+
case 'hugging-face':
|
|
52
|
+
case 'hf':
|
|
53
|
+
provider = new HuggingFace(modelName);
|
|
54
|
+
break;
|
|
55
|
+
case 'ollama':
|
|
56
|
+
case 'local':
|
|
57
|
+
case 'local-ollama':
|
|
58
|
+
provider = new Ollama(modelName || undefined);
|
|
59
|
+
break;
|
|
60
|
+
case 'mistral':
|
|
61
|
+
case 'mistral-ai':
|
|
62
|
+
case 'mistralai':
|
|
63
|
+
provider = new MistralAI(modelName);
|
|
64
|
+
break;
|
|
50
65
|
default:
|
|
51
|
-
throw new Error(`Unknown provider: ${providerName}. Supported providers: vertex, bedrock, openai, anthropic, azure, google-ai`);
|
|
66
|
+
throw new Error(`Unknown provider: ${providerName}. Supported providers: vertex, bedrock, openai, anthropic, azure, google-ai, huggingface, ollama, mistral`);
|
|
52
67
|
}
|
|
53
68
|
logger.debug(`[${functionTag}] Provider creation succeeded`, {
|
|
54
69
|
providerName,
|
package/dist/lib/core/types.d.ts
CHANGED
|
@@ -9,7 +9,10 @@ export declare enum AIProviderName {
|
|
|
9
9
|
VERTEX = "vertex",
|
|
10
10
|
ANTHROPIC = "anthropic",
|
|
11
11
|
AZURE = "azure",
|
|
12
|
-
GOOGLE_AI = "google-ai"
|
|
12
|
+
GOOGLE_AI = "google-ai",
|
|
13
|
+
HUGGINGFACE = "huggingface",
|
|
14
|
+
OLLAMA = "ollama",
|
|
15
|
+
MISTRAL = "mistral"
|
|
13
16
|
}
|
|
14
17
|
/**
|
|
15
18
|
* Supported Models for Amazon Bedrock
|
package/dist/lib/core/types.js
CHANGED
|
@@ -9,6 +9,9 @@ export var AIProviderName;
|
|
|
9
9
|
AIProviderName["ANTHROPIC"] = "anthropic";
|
|
10
10
|
AIProviderName["AZURE"] = "azure";
|
|
11
11
|
AIProviderName["GOOGLE_AI"] = "google-ai";
|
|
12
|
+
AIProviderName["HUGGINGFACE"] = "huggingface";
|
|
13
|
+
AIProviderName["OLLAMA"] = "ollama";
|
|
14
|
+
AIProviderName["MISTRAL"] = "mistral";
|
|
12
15
|
})(AIProviderName || (AIProviderName = {}));
|
|
13
16
|
/**
|
|
14
17
|
* Supported Models for Amazon Bedrock
|
|
@@ -12,12 +12,12 @@ import { getBestProvider, getAvailableProviders } from '../../../utils/providerU
|
|
|
12
12
|
const AnalyzeUsageSchema = z.object({
|
|
13
13
|
sessionId: z.string().optional(),
|
|
14
14
|
timeRange: z.enum(['1h', '24h', '7d', '30d']).default('24h'),
|
|
15
|
-
provider: z.enum(['openai', 'bedrock', 'vertex', 'anthropic', 'google-ai']).optional(),
|
|
15
|
+
provider: z.enum(['openai', 'bedrock', 'vertex', 'anthropic', 'google-ai', 'azure', 'huggingface', 'ollama', 'mistral']).optional(),
|
|
16
16
|
includeTokenBreakdown: z.boolean().default(true),
|
|
17
17
|
includeCostEstimation: z.boolean().default(true)
|
|
18
18
|
});
|
|
19
19
|
const BenchmarkSchema = z.object({
|
|
20
|
-
providers: z.array(z.enum(['openai', 'bedrock', 'vertex', 'anthropic', 'google-ai'])).optional(),
|
|
20
|
+
providers: z.array(z.enum(['openai', 'bedrock', 'vertex', 'anthropic', 'google-ai', 'azure', 'huggingface', 'ollama', 'mistral'])).optional(),
|
|
21
21
|
testPrompts: z.array(z.string()).optional(),
|
|
22
22
|
iterations: z.number().min(1).max(5).default(2),
|
|
23
23
|
metrics: z.array(z.enum(['latency', 'quality', 'cost', 'tokens'])).default(['latency', 'quality']),
|
|
@@ -25,7 +25,7 @@ const BenchmarkSchema = z.object({
|
|
|
25
25
|
});
|
|
26
26
|
const OptimizeParametersSchema = z.object({
|
|
27
27
|
prompt: z.string().min(1, 'Prompt is required for optimization'),
|
|
28
|
-
provider: z.enum(['openai', 'bedrock', 'vertex', 'anthropic', 'google-ai']).optional(),
|
|
28
|
+
provider: z.enum(['openai', 'bedrock', 'vertex', 'anthropic', 'google-ai', 'azure', 'huggingface', 'ollama', 'mistral']).optional(),
|
|
29
29
|
targetLength: z.number().positive().optional(),
|
|
30
30
|
style: z.enum(['creative', 'balanced', 'precise', 'factual']).default('balanced'),
|
|
31
31
|
optimizeFor: z.enum(['speed', 'quality', 'cost', 'tokens']).default('quality'),
|
|
@@ -59,7 +59,7 @@ export const analyzeAIUsageTool = {
|
|
|
59
59
|
|
|
60
60
|
Generate a realistic analysis including:
|
|
61
61
|
1. A summary of usage statistics (totalRequests, totalTokens).
|
|
62
|
-
2. A breakdown of usage by provider (OpenAI, Bedrock, Vertex).
|
|
62
|
+
2. A breakdown of usage by provider (OpenAI, Bedrock, Vertex, Google AI, Anthropic, Azure, Hugging Face, Ollama, Mistral).
|
|
63
63
|
3. Key insights and actionable recommendations for cost and performance optimization.
|
|
64
64
|
|
|
65
65
|
Return the result as a valid JSON object with keys: "analysis", "insights".
|
|
@@ -38,7 +38,7 @@ export const aiCoreServer = createMCPServer({
|
|
|
38
38
|
*/
|
|
39
39
|
const TextGenerationSchema = z.object({
|
|
40
40
|
prompt: z.string().min(1, 'Prompt is required'),
|
|
41
|
-
provider: z.enum(['openai', 'bedrock', 'vertex', 'anthropic', 'google-ai']).optional(),
|
|
41
|
+
provider: z.enum(['openai', 'bedrock', 'vertex', 'anthropic', 'google-ai', 'azure', 'huggingface', 'ollama', 'mistral']).optional(),
|
|
42
42
|
model: z.string().optional(),
|
|
43
43
|
temperature: z.number().min(0).max(2).optional(),
|
|
44
44
|
maxTokens: z.number().positive().optional(),
|
|
@@ -144,12 +144,14 @@ aiCoreServer.registerTool({
|
|
|
144
144
|
// Use existing provider selection logic
|
|
145
145
|
const availableProviders = getAvailableProviders();
|
|
146
146
|
const selectedProvider = getBestProvider(params.preferred);
|
|
147
|
-
// Get provider capabilities
|
|
147
|
+
// Get provider capabilities
|
|
148
148
|
const getProviderCapabilities = (provider) => ({
|
|
149
|
-
multimodal: provider === 'openai' || provider === 'vertex',
|
|
150
|
-
streaming: provider === 'openai' || provider === 'anthropic',
|
|
151
|
-
maxTokens: provider === 'anthropic' ? 100000 : 4000,
|
|
152
|
-
costEfficient: provider === '
|
|
149
|
+
multimodal: provider === 'openai' || provider === 'vertex' || provider === 'google-ai',
|
|
150
|
+
streaming: provider === 'openai' || provider === 'anthropic' || provider === 'azure' || provider === 'mistral',
|
|
151
|
+
maxTokens: provider === 'anthropic' ? 100000 : provider === 'huggingface' ? 2048 : 4000,
|
|
152
|
+
costEfficient: provider === 'google-ai' || provider === 'vertex' || provider === 'huggingface' || provider === 'ollama',
|
|
153
|
+
localExecution: provider === 'ollama',
|
|
154
|
+
openSource: provider === 'huggingface' || provider === 'ollama'
|
|
153
155
|
});
|
|
154
156
|
const capabilities = getProviderCapabilities(selectedProvider);
|
|
155
157
|
const executionTime = Date.now() - startTime;
|
|
@@ -224,9 +226,11 @@ aiCoreServer.registerTool({
|
|
|
224
226
|
status: isAvailable ? 'available' : 'unavailable',
|
|
225
227
|
capabilities: params.includeCapabilities ? {
|
|
226
228
|
textGeneration: true,
|
|
227
|
-
multimodal: provider === 'openai' || provider === 'vertex',
|
|
228
|
-
streaming: provider === 'openai' || provider === 'anthropic',
|
|
229
|
-
maxTokens: provider === 'anthropic' ? 100000 : 4000
|
|
229
|
+
multimodal: provider === 'openai' || provider === 'vertex' || provider === 'google-ai',
|
|
230
|
+
streaming: provider === 'openai' || provider === 'anthropic' || provider === 'azure' || provider === 'mistral',
|
|
231
|
+
maxTokens: provider === 'anthropic' ? 100000 : provider === 'huggingface' ? 2048 : 4000,
|
|
232
|
+
localExecution: provider === 'ollama',
|
|
233
|
+
openSource: provider === 'huggingface' || provider === 'ollama'
|
|
230
234
|
} : undefined,
|
|
231
235
|
lastChecked: new Date().toISOString()
|
|
232
236
|
});
|