@juspay/neurolink 1.2.3 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +108 -0
- package/README.md +213 -1138
- package/dist/cli/commands/config.d.ts +373 -0
- package/dist/cli/commands/config.js +532 -0
- package/dist/cli/commands/mcp.d.ts +7 -0
- package/dist/cli/commands/mcp.js +434 -0
- package/dist/cli/index.d.ts +9 -0
- package/dist/cli/index.js +451 -169
- package/dist/core/factory.js +10 -2
- package/dist/core/types.d.ts +3 -1
- package/dist/core/types.js +2 -0
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/mcp/context-manager.d.ts +164 -0
- package/dist/mcp/context-manager.js +273 -0
- package/dist/mcp/factory.d.ts +144 -0
- package/dist/mcp/factory.js +141 -0
- package/dist/mcp/orchestrator.d.ts +170 -0
- package/dist/mcp/orchestrator.js +372 -0
- package/dist/mcp/registry.d.ts +188 -0
- package/dist/mcp/registry.js +373 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.d.ts +10 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.js +280 -0
- package/dist/neurolink.d.ts +2 -2
- package/dist/neurolink.js +1 -1
- package/dist/providers/anthropic.d.ts +34 -0
- package/dist/providers/anthropic.js +307 -0
- package/dist/providers/azureOpenAI.d.ts +37 -0
- package/dist/providers/azureOpenAI.js +338 -0
- package/dist/providers/index.d.ts +4 -0
- package/dist/providers/index.js +5 -1
- package/dist/utils/providerUtils.js +8 -2
- package/package.json +163 -97
package/dist/cli/index.js
CHANGED
|
@@ -7,13 +7,24 @@
|
|
|
7
7
|
* Implementation: ~300 lines using simple JS utility functions
|
|
8
8
|
*/
|
|
9
9
|
import { NeuroLink } from '@juspay/neurolink';
|
|
10
|
-
import yargs from 'yargs';
|
|
10
|
+
import yargs from 'yargs'; // Keep default import
|
|
11
11
|
import { hideBin } from 'yargs/helpers';
|
|
12
12
|
import ora from 'ora';
|
|
13
13
|
import chalk from 'chalk';
|
|
14
14
|
import fs from 'fs';
|
|
15
15
|
import { fileURLToPath } from 'url';
|
|
16
16
|
import { dirname } from 'path';
|
|
17
|
+
import { addMCPCommands } from './commands/mcp.js';
|
|
18
|
+
// Load environment variables from .env file
|
|
19
|
+
try {
|
|
20
|
+
// Try to import and configure dotenv
|
|
21
|
+
const { config } = await import('dotenv');
|
|
22
|
+
config(); // Load .env from current working directory
|
|
23
|
+
}
|
|
24
|
+
catch (error) {
|
|
25
|
+
// dotenv is not available (dev dependency only) - this is fine for production
|
|
26
|
+
// Environment variables should be set externally in production
|
|
27
|
+
}
|
|
17
28
|
// Get current directory for ESM
|
|
18
29
|
const __filename = fileURLToPath(import.meta.url);
|
|
19
30
|
const __dirname = dirname(__filename);
|
|
@@ -32,123 +43,255 @@ function formatOutput(result, format = 'text') {
|
|
|
32
43
|
return JSON.stringify(result, null, 2);
|
|
33
44
|
}
|
|
34
45
|
function handleError(error, context) {
|
|
35
|
-
|
|
46
|
+
let specificErrorMessage = error.message;
|
|
47
|
+
const originalErrorMessageLowerCase = error.message ? error.message.toLowerCase() : '';
|
|
48
|
+
const errorStringLowerCase = String(error).toLowerCase();
|
|
49
|
+
let isAuthError = false;
|
|
50
|
+
let genericMessage = specificErrorMessage; // Initialize genericMessage with the specific one
|
|
51
|
+
if (originalErrorMessageLowerCase.includes('api_key') ||
|
|
52
|
+
originalErrorMessageLowerCase.includes('aws_access_key_id') ||
|
|
53
|
+
originalErrorMessageLowerCase.includes('aws_secret_access_key') ||
|
|
54
|
+
originalErrorMessageLowerCase.includes('aws_session_token') ||
|
|
55
|
+
originalErrorMessageLowerCase.includes('google_application_credentials') ||
|
|
56
|
+
originalErrorMessageLowerCase.includes('google_service_account_key') ||
|
|
57
|
+
originalErrorMessageLowerCase.includes('google_auth_client_email') ||
|
|
58
|
+
originalErrorMessageLowerCase.includes('anthropic_api_key') ||
|
|
59
|
+
originalErrorMessageLowerCase.includes('azure_openai_api_key')) {
|
|
60
|
+
isAuthError = true;
|
|
61
|
+
}
|
|
62
|
+
else if ( // Fallback to checking the full stringified error if direct message didn't match
|
|
63
|
+
errorStringLowerCase.includes('api_key') ||
|
|
64
|
+
errorStringLowerCase.includes('aws_access_key_id') ||
|
|
65
|
+
errorStringLowerCase.includes('aws_secret_access_key') ||
|
|
66
|
+
errorStringLowerCase.includes('aws_session_token') ||
|
|
67
|
+
errorStringLowerCase.includes('google_application_credentials') ||
|
|
68
|
+
errorStringLowerCase.includes('google_service_account_key') ||
|
|
69
|
+
errorStringLowerCase.includes('google_auth_client_email') ||
|
|
70
|
+
errorStringLowerCase.includes('anthropic_api_key') ||
|
|
71
|
+
errorStringLowerCase.includes('azure_openai_api_key')) {
|
|
72
|
+
isAuthError = true;
|
|
73
|
+
}
|
|
74
|
+
if (isAuthError) {
|
|
75
|
+
genericMessage = 'Authentication error: Missing or invalid API key/credentials for the selected provider.';
|
|
76
|
+
}
|
|
77
|
+
else if (originalErrorMessageLowerCase.includes('enotfound') || // Prefer direct message checks
|
|
78
|
+
originalErrorMessageLowerCase.includes('econnrefused') ||
|
|
79
|
+
originalErrorMessageLowerCase.includes('invalid-endpoint') ||
|
|
80
|
+
originalErrorMessageLowerCase.includes('network error') ||
|
|
81
|
+
originalErrorMessageLowerCase.includes('could not connect') ||
|
|
82
|
+
originalErrorMessageLowerCase.includes('timeout') ||
|
|
83
|
+
errorStringLowerCase.includes('enotfound') || // Fallback to full string
|
|
84
|
+
errorStringLowerCase.includes('econnrefused') ||
|
|
85
|
+
errorStringLowerCase.includes('invalid-endpoint') ||
|
|
86
|
+
errorStringLowerCase.includes('network error') ||
|
|
87
|
+
errorStringLowerCase.includes('could not connect') ||
|
|
88
|
+
errorStringLowerCase.includes('timeout') // General timeout
|
|
89
|
+
) {
|
|
90
|
+
genericMessage = 'Network error: Could not connect to the API endpoint or the request timed out.';
|
|
91
|
+
}
|
|
92
|
+
else if (errorStringLowerCase.includes('not authorized') || errorStringLowerCase.includes('permission denied')) {
|
|
93
|
+
genericMessage = 'Authorization error: You are not authorized to perform this action or access this resource.';
|
|
94
|
+
}
|
|
95
|
+
// If no specific condition matched, genericMessage remains error.message
|
|
96
|
+
console.error(chalk.red(`❌ ${context} failed: ${genericMessage}`));
|
|
36
97
|
// Smart hints for common errors (just string matching!)
|
|
37
|
-
if (
|
|
98
|
+
if (genericMessage.toLowerCase().includes('api key') || genericMessage.toLowerCase().includes('credential')) {
|
|
38
99
|
console.error(chalk.yellow('💡 Set API key: export OPENAI_API_KEY=sk-...'));
|
|
39
|
-
console.error(chalk.yellow('💡 Or set: export AWS_REGION=us-east-1'));
|
|
40
|
-
console.error(chalk.yellow('💡 Or set: export GOOGLE_APPLICATION_CREDENTIALS=/path/to/key.json'));
|
|
100
|
+
console.error(chalk.yellow('💡 Or set AWS credentials & region: export AWS_ACCESS_KEY_ID=... AWS_SECRET_ACCESS_KEY=... AWS_REGION=us-east-1'));
|
|
101
|
+
console.error(chalk.yellow('💡 Or set Google credentials: export GOOGLE_APPLICATION_CREDENTIALS=/path/to/key.json'));
|
|
102
|
+
console.error(chalk.yellow('💡 Or set Anthropic API key: export ANTHROPIC_API_KEY=sk-ant-...'));
|
|
103
|
+
console.error(chalk.yellow('💡 Or set Azure OpenAI credentials: export AZURE_OPENAI_API_KEY=... AZURE_OPENAI_ENDPOINT=...'));
|
|
41
104
|
}
|
|
42
|
-
if (error.message.includes('rate limit')) {
|
|
105
|
+
if (error.message.toLowerCase().includes('rate limit')) {
|
|
43
106
|
console.error(chalk.yellow('💡 Try again in a few moments or use --provider vertex'));
|
|
44
107
|
}
|
|
45
|
-
if (error.message.includes('not authorized')) {
|
|
46
|
-
console.error(chalk.yellow('💡 Check your account permissions for the selected model'));
|
|
47
|
-
console.error(chalk.yellow('💡 For AWS Bedrock
|
|
108
|
+
if (error.message.toLowerCase().includes('not authorized') || error.message.toLowerCase().includes('permission denied')) {
|
|
109
|
+
console.error(chalk.yellow('💡 Check your account permissions for the selected model/service.'));
|
|
110
|
+
console.error(chalk.yellow('💡 For AWS Bedrock, ensure you have permissions for the specific model and consider using inference profile ARNs.'));
|
|
48
111
|
}
|
|
49
112
|
process.exit(1);
|
|
50
113
|
}
|
|
51
114
|
function validateConfig() {
|
|
52
115
|
const hasOpenAI = !!process.env.OPENAI_API_KEY;
|
|
53
116
|
const hasAWS = !!(process.env.AWS_REGION || process.env.AWS_ACCESS_KEY_ID);
|
|
54
|
-
const hasGoogle = !!(process.env.GOOGLE_APPLICATION_CREDENTIALS || process.env.GOOGLE_SERVICE_ACCOUNT_KEY);
|
|
55
|
-
|
|
117
|
+
const hasGoogle = !!(process.env.GOOGLE_APPLICATION_CREDENTIALS || process.env.GOOGLE_SERVICE_ACCOUNT_KEY || process.env.GOOGLE_AUTH_CLIENT_EMAIL);
|
|
118
|
+
const hasAnthropic = !!process.env.ANTHROPIC_API_KEY;
|
|
119
|
+
const hasAzure = !!(process.env.AZURE_OPENAI_API_KEY && process.env.AZURE_OPENAI_ENDPOINT);
|
|
120
|
+
if (!hasOpenAI && !hasAWS && !hasGoogle && !hasAnthropic && !hasAzure) {
|
|
56
121
|
console.error(chalk.red('⚠️ No AI provider credentials found'));
|
|
57
122
|
console.error(chalk.yellow('💡 Set one of:'));
|
|
58
123
|
console.error(chalk.yellow(' • OPENAI_API_KEY=sk-...'));
|
|
59
124
|
console.error(chalk.yellow(' • AWS_REGION=us-east-1 (+ AWS credentials)'));
|
|
60
125
|
console.error(chalk.yellow(' • GOOGLE_APPLICATION_CREDENTIALS=/path/to/key.json'));
|
|
126
|
+
console.error(chalk.yellow(' • ANTHROPIC_API_KEY=sk-ant-...'));
|
|
127
|
+
console.error(chalk.yellow(' • AZURE_OPENAI_API_KEY=... (+ AZURE_OPENAI_ENDPOINT)'));
|
|
61
128
|
console.error(chalk.blue('\n📚 See: https://github.com/juspay/neurolink#setup'));
|
|
62
129
|
process.exit(1);
|
|
63
130
|
}
|
|
64
131
|
}
|
|
65
132
|
// Initialize SDK
|
|
66
133
|
const sdk = new NeuroLink();
|
|
134
|
+
// Manual pre-validation for unknown flags
|
|
135
|
+
const args = hideBin(process.argv);
|
|
67
136
|
// Enhanced CLI with Professional UX
|
|
68
|
-
const cli = yargs(
|
|
137
|
+
const cli = yargs(args)
|
|
69
138
|
.scriptName('neurolink')
|
|
70
|
-
.usage(
|
|
71
|
-
.middleware([validateConfig])
|
|
139
|
+
.usage('Usage: $0 <command> [options]')
|
|
72
140
|
.version()
|
|
73
141
|
.help()
|
|
74
|
-
.
|
|
75
|
-
.
|
|
76
|
-
.
|
|
77
|
-
|
|
78
|
-
.
|
|
142
|
+
.alias('h', 'help')
|
|
143
|
+
.alias('V', 'version')
|
|
144
|
+
.strictOptions()
|
|
145
|
+
.strictCommands()
|
|
146
|
+
.demandCommand(1, 'You need at least one command before moving on')
|
|
147
|
+
.epilogue('For more info: https://github.com/juspay/neurolink')
|
|
148
|
+
.showHelpOnFail(false)
|
|
149
|
+
.middleware((argv) => {
|
|
150
|
+
// Middleware for NEUROLINK_QUIET is fine
|
|
151
|
+
if (process.env.NEUROLINK_QUIET === 'true' && typeof argv.quiet === 'undefined') {
|
|
152
|
+
argv.quiet = true;
|
|
153
|
+
}
|
|
154
|
+
// NEUROLINK_DEBUG will be handled by option defaults
|
|
155
|
+
})
|
|
156
|
+
.fail((msg, err, yargsInstance) => {
|
|
157
|
+
const exitProcess = () => {
|
|
158
|
+
if (!process.exitCode)
|
|
159
|
+
process.exit(1);
|
|
160
|
+
};
|
|
161
|
+
if (err) {
|
|
162
|
+
// Error likely from an async command handler (e.g., via handleError)
|
|
163
|
+
// handleError already prints and calls process.exit(1).
|
|
164
|
+
// If we're here, it means handleError's process.exit might not have been caught by the top-level async IIFE.
|
|
165
|
+
// Or, it's a synchronous yargs error during parsing that yargs itself throws.
|
|
166
|
+
const alreadyExitedByHandleError = err?.exitCode !== undefined;
|
|
167
|
+
// A simple heuristic: if the error message doesn't look like one of our handled generic messages,
|
|
168
|
+
// it might be a direct yargs parsing error.
|
|
169
|
+
const isLikelyYargsInternalError = err.message && // Ensure err.message exists
|
|
170
|
+
!err.message.includes('Authentication error') &&
|
|
171
|
+
!err.message.includes('Network error') &&
|
|
172
|
+
!err.message.includes('Authorization error') &&
|
|
173
|
+
!err.message.includes('Permission denied') && // from config export
|
|
174
|
+
!err.message.includes('Invalid or unparseable JSON'); // from config import
|
|
175
|
+
if (!alreadyExitedByHandleError) {
|
|
176
|
+
process.stderr.write(chalk.red(`CLI Error: ${err.message || msg || 'An unexpected error occurred.'}\n`));
|
|
177
|
+
// If it's a yargs internal parsing error, show help.
|
|
178
|
+
if (isLikelyYargsInternalError && msg) {
|
|
179
|
+
yargsInstance.showHelp(h => { process.stderr.write(h + '\n'); exitProcess(); });
|
|
180
|
+
return;
|
|
181
|
+
}
|
|
182
|
+
exitProcess();
|
|
183
|
+
}
|
|
184
|
+
return; // Exit was already called or error handled
|
|
185
|
+
}
|
|
186
|
+
// Yargs parsing/validation error (msg is present, err is null)
|
|
187
|
+
if (msg) {
|
|
188
|
+
let processedMsg = `Error: ${msg}\n`;
|
|
189
|
+
if (msg.includes('Not enough non-option arguments') || msg.includes('Missing required argument') || msg.includes('Unknown command')) {
|
|
190
|
+
process.stderr.write(chalk.red(processedMsg)); // Print error first
|
|
191
|
+
yargsInstance.showHelp(h => { process.stderr.write('\n' + h + '\n'); exitProcess(); });
|
|
192
|
+
return; // Exit happens in callback
|
|
193
|
+
}
|
|
194
|
+
else if (msg.includes('Unknown argument') || msg.includes('Invalid values')) {
|
|
195
|
+
processedMsg = `Error: ${msg}\nUse --help to see available options.\n`;
|
|
196
|
+
}
|
|
197
|
+
process.stderr.write(chalk.red(processedMsg));
|
|
198
|
+
}
|
|
199
|
+
else {
|
|
200
|
+
// No specific message, but failure occurred (e.g. demandCommand failed silently)
|
|
201
|
+
yargsInstance.showHelp(h => { process.stderr.write(h + '\n'); exitProcess(); });
|
|
202
|
+
return; // Exit happens in callback
|
|
203
|
+
}
|
|
204
|
+
exitProcess(); // Default exit
|
|
205
|
+
})
|
|
206
|
+
// Generate Text Command
|
|
207
|
+
.command(['generate-text <prompt>', 'generate <prompt>'], 'Generate text using AI providers', (yargsInstance) => yargsInstance
|
|
208
|
+
.usage('Usage: $0 generate-text <prompt> [options]')
|
|
79
209
|
.positional('prompt', {
|
|
80
210
|
type: 'string',
|
|
81
|
-
description: 'Text prompt for AI generation'
|
|
211
|
+
description: 'Text prompt for AI generation',
|
|
212
|
+
demandOption: true,
|
|
82
213
|
})
|
|
83
214
|
.option('provider', {
|
|
84
|
-
choices: ['auto', 'openai', 'bedrock', 'vertex'],
|
|
215
|
+
choices: ['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure'],
|
|
85
216
|
default: 'auto',
|
|
86
217
|
description: 'AI provider to use (auto-selects best available)'
|
|
87
218
|
})
|
|
88
|
-
.option('temperature', {
|
|
89
|
-
type: 'number',
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
})
|
|
93
|
-
.option('
|
|
94
|
-
type: 'number',
|
|
95
|
-
default: 500,
|
|
96
|
-
description: 'Maximum tokens to generate'
|
|
97
|
-
})
|
|
98
|
-
.option('format', {
|
|
99
|
-
choices: ['text', 'json'],
|
|
100
|
-
default: 'text',
|
|
101
|
-
description: 'Output format'
|
|
102
|
-
})
|
|
219
|
+
.option('temperature', { type: 'number', default: 0.7, description: 'Creativity level (0.0 = focused, 1.0 = creative)' })
|
|
220
|
+
.option('max-tokens', { type: 'number', default: 500, description: 'Maximum tokens to generate' })
|
|
221
|
+
.option('system', { type: 'string', description: 'System prompt to guide AI behavior' })
|
|
222
|
+
.option('format', { choices: ['text', 'json'], default: 'text', alias: 'f', description: 'Output format' })
|
|
223
|
+
.option('debug', { type: 'boolean', default: false, description: 'Enable debug mode with verbose output' }) // Kept for potential specific debug logic
|
|
224
|
+
.option('timeout', { type: 'number', default: 30000, description: 'Timeout for the request in milliseconds' })
|
|
103
225
|
.example('$0 generate-text "Hello world"', 'Basic text generation')
|
|
104
|
-
.example('$0 generate-text "Write a story" --provider openai', 'Use specific provider')
|
|
105
|
-
|
|
106
|
-
|
|
226
|
+
.example('$0 generate-text "Write a story" --provider openai', 'Use specific provider'), async (argv) => {
|
|
227
|
+
let originalConsole = {};
|
|
228
|
+
if (argv.format === 'json' && !argv.quiet) { // Suppress only if not quiet, as quiet implies no spinners anyway
|
|
229
|
+
originalConsole = { ...console };
|
|
230
|
+
Object.keys(originalConsole).forEach((key) => {
|
|
231
|
+
if (typeof console[key] === 'function') {
|
|
232
|
+
console[key] = () => { };
|
|
233
|
+
}
|
|
234
|
+
});
|
|
235
|
+
}
|
|
236
|
+
const spinner = argv.format === 'json' || argv.quiet ? null : ora('🤖 Generating text...').start();
|
|
107
237
|
try {
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
}
|
|
111
|
-
const
|
|
112
|
-
prompt: argv.prompt,
|
|
238
|
+
const timeoutPromise = new Promise((_, reject) => {
|
|
239
|
+
setTimeout(() => reject(new Error(`Request timeout (${argv.timeout}ms)`)), argv.timeout);
|
|
240
|
+
});
|
|
241
|
+
const generatePromise = sdk.generateText({
|
|
242
|
+
prompt: argv.prompt, // Cast because demandOption is true
|
|
113
243
|
provider: argv.provider === 'auto' ? undefined : argv.provider,
|
|
114
244
|
temperature: argv.temperature,
|
|
115
|
-
maxTokens: argv.maxTokens
|
|
245
|
+
maxTokens: argv.maxTokens,
|
|
246
|
+
systemPrompt: argv.system
|
|
116
247
|
});
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
248
|
+
const result = await Promise.race([generatePromise, timeoutPromise]);
|
|
249
|
+
if (argv.format === 'json' && originalConsole.log) {
|
|
250
|
+
Object.assign(console, originalConsole);
|
|
251
|
+
}
|
|
252
|
+
if (spinner)
|
|
253
|
+
spinner.succeed(chalk.green('✅ Text generated successfully!'));
|
|
254
|
+
if (argv.format === 'json') {
|
|
255
|
+
const jsonOutput = {
|
|
256
|
+
content: result.content || '', provider: result.provider,
|
|
257
|
+
usage: result.usage || { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
|
|
258
|
+
responseTime: result.responseTime || 0
|
|
259
|
+
};
|
|
260
|
+
process.stdout.write(JSON.stringify(jsonOutput, null, 2) + '\n');
|
|
261
|
+
}
|
|
262
|
+
else {
|
|
263
|
+
if (result.content)
|
|
264
|
+
console.log('\n' + result.content + '\n');
|
|
265
|
+
console.log(JSON.stringify({ provider: result.provider, usage: result.usage, responseTime: result.responseTime }, null, 2));
|
|
266
|
+
if (result.usage)
|
|
267
|
+
console.log(chalk.blue(`ℹ️ ${result.usage.totalTokens} tokens used`));
|
|
122
268
|
}
|
|
123
269
|
}
|
|
124
270
|
catch (error) {
|
|
125
|
-
|
|
126
|
-
|
|
271
|
+
if (argv.format === 'json' && originalConsole.log) {
|
|
272
|
+
Object.assign(console, originalConsole);
|
|
273
|
+
}
|
|
274
|
+
if (spinner)
|
|
275
|
+
spinner.fail();
|
|
276
|
+
if (argv.format === 'json') {
|
|
277
|
+
process.stdout.write(JSON.stringify({ error: error.message, success: false }, null, 2) + '\n');
|
|
278
|
+
process.exit(1);
|
|
279
|
+
}
|
|
280
|
+
else {
|
|
281
|
+
handleError(error, 'Text generation');
|
|
282
|
+
}
|
|
127
283
|
}
|
|
128
284
|
})
|
|
129
|
-
// Stream Text Command
|
|
130
|
-
.command('stream <prompt>', 'Stream text generation in real-time', (
|
|
131
|
-
.
|
|
132
|
-
type: 'string',
|
|
133
|
-
description: '
|
|
134
|
-
})
|
|
135
|
-
.
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
description: 'AI provider to use'
|
|
139
|
-
})
|
|
140
|
-
.option('temperature', {
|
|
141
|
-
type: 'number',
|
|
142
|
-
default: 0.7,
|
|
143
|
-
description: 'Creativity level'
|
|
144
|
-
})
|
|
145
|
-
.example('$0 stream "Tell me a story"', 'Stream a story in real-time')
|
|
146
|
-
.example('$0 stream "Explain AI" --provider vertex', 'Stream with specific provider'), async (argv) => {
|
|
147
|
-
console.log(chalk.blue(`🔄 Streaming from ${argv.provider} provider...\n`));
|
|
285
|
+
// Stream Text Command
|
|
286
|
+
.command('stream <prompt>', 'Stream text generation in real-time', (yargsInstance) => yargsInstance
|
|
287
|
+
.usage('Usage: $0 stream <prompt> [options]')
|
|
288
|
+
.positional('prompt', { type: 'string', description: 'Text prompt for streaming', demandOption: true })
|
|
289
|
+
.option('provider', { choices: ['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure'], default: 'auto', description: 'AI provider to use' })
|
|
290
|
+
.option('temperature', { type: 'number', default: 0.7, description: 'Creativity level' })
|
|
291
|
+
.example('$0 stream "Tell me a story"', 'Stream a story in real-time'), async (argv) => {
|
|
292
|
+
if (!argv.quiet)
|
|
293
|
+
console.log(chalk.blue(`🔄 Streaming from ${argv.provider} provider...\n`));
|
|
148
294
|
try {
|
|
149
|
-
if (!argv.prompt) {
|
|
150
|
-
throw new Error('Prompt is required');
|
|
151
|
-
}
|
|
152
295
|
const stream = await sdk.generateTextStream({
|
|
153
296
|
prompt: argv.prompt,
|
|
154
297
|
provider: argv.provider === 'auto' ? undefined : argv.provider,
|
|
@@ -157,152 +300,291 @@ const cli = yargs(hideBin(process.argv))
|
|
|
157
300
|
for await (const chunk of stream) {
|
|
158
301
|
process.stdout.write(chunk.content);
|
|
159
302
|
}
|
|
160
|
-
|
|
303
|
+
if (!argv.quiet)
|
|
304
|
+
process.stdout.write('\n'); // Ensure newline after stream if not quiet
|
|
161
305
|
}
|
|
162
306
|
catch (error) {
|
|
163
307
|
handleError(error, 'Text streaming');
|
|
164
308
|
}
|
|
165
309
|
})
|
|
166
|
-
// Batch Processing Command
|
|
167
|
-
.command('batch <file>', 'Process multiple prompts from a file', (
|
|
168
|
-
.
|
|
169
|
-
type: 'string',
|
|
170
|
-
description: '
|
|
171
|
-
})
|
|
172
|
-
.option('
|
|
173
|
-
type: '
|
|
174
|
-
description: '
|
|
175
|
-
})
|
|
176
|
-
.option('
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
description: 'Delay between requests in milliseconds'
|
|
180
|
-
})
|
|
181
|
-
.option('provider', {
|
|
182
|
-
choices: ['auto', 'openai', 'bedrock', 'vertex'],
|
|
183
|
-
default: 'auto',
|
|
184
|
-
description: 'AI provider to use'
|
|
185
|
-
})
|
|
186
|
-
.example('$0 batch prompts.txt', 'Process prompts from file')
|
|
187
|
-
.example('$0 batch prompts.txt --output results.json', 'Save results to file')
|
|
188
|
-
.example('$0 batch prompts.txt --delay 2000', 'Add 2s delay between requests'), async (argv) => {
|
|
310
|
+
// Batch Processing Command
|
|
311
|
+
.command('batch <file>', 'Process multiple prompts from a file', (yargsInstance) => yargsInstance
|
|
312
|
+
.usage('Usage: $0 batch <file> [options]')
|
|
313
|
+
.positional('file', { type: 'string', description: 'File with prompts (one per line)', demandOption: true })
|
|
314
|
+
.option('output', { type: 'string', description: 'Output file for results (default: stdout)' })
|
|
315
|
+
.option('delay', { type: 'number', default: 1000, description: 'Delay between requests in milliseconds' })
|
|
316
|
+
.option('provider', { choices: ['auto', 'openai', 'bedrock', 'vertex', 'anthropic', 'azure'], default: 'auto', description: 'AI provider to use' })
|
|
317
|
+
.option('timeout', { type: 'number', default: 30000, description: 'Timeout for each request in milliseconds' })
|
|
318
|
+
.option('temperature', { type: 'number', description: 'Global temperature for batch jobs' })
|
|
319
|
+
.option('max-tokens', { type: 'number', description: 'Global max tokens for batch jobs' })
|
|
320
|
+
.option('system', { type: 'string', description: 'Global system prompt for batch jobs' })
|
|
321
|
+
.example('$0 batch prompts.txt --output results.json', 'Process and save to file'), async (argv) => {
|
|
322
|
+
const spinner = argv.quiet ? null : ora().start();
|
|
189
323
|
try {
|
|
190
|
-
|
|
191
|
-
if (!argv.file) {
|
|
192
|
-
throw new Error('File path is required');
|
|
193
|
-
}
|
|
194
|
-
// Read and validate input file
|
|
195
|
-
if (!fs.existsSync(argv.file)) {
|
|
324
|
+
if (!fs.existsSync(argv.file))
|
|
196
325
|
throw new Error(`File not found: ${argv.file}`);
|
|
197
|
-
|
|
198
|
-
const
|
|
199
|
-
.
|
|
200
|
-
.
|
|
201
|
-
|
|
202
|
-
|
|
326
|
+
const buffer = fs.readFileSync(argv.file);
|
|
327
|
+
const isLikelyBinary = buffer.includes(0) ||
|
|
328
|
+
buffer.toString('hex', 0, 100).includes('0000') ||
|
|
329
|
+
(!buffer.toString('utf8', 0, 1024).includes('\n') && buffer.length > 512);
|
|
330
|
+
if (isLikelyBinary)
|
|
331
|
+
throw new Error(`Invalid file format: Binary file detected at "${argv.file}". Batch processing requires a plain text file.`);
|
|
332
|
+
const prompts = buffer.toString('utf8').split('\n').map(line => line.trim()).filter(Boolean);
|
|
333
|
+
if (prompts.length === 0)
|
|
203
334
|
throw new Error('No prompts found in file');
|
|
204
|
-
|
|
335
|
+
if (spinner)
|
|
336
|
+
spinner.text = `📦 Processing ${prompts.length} prompts...`;
|
|
337
|
+
else if (!argv.quiet)
|
|
338
|
+
console.log(chalk.blue(`📦 Processing ${prompts.length} prompts...\n`));
|
|
205
339
|
const results = [];
|
|
206
|
-
console.log(chalk.blue(`📦 Processing ${prompts.length} prompts...\n`));
|
|
207
|
-
// Sequential processing with progress tracking
|
|
208
340
|
for (let i = 0; i < prompts.length; i++) {
|
|
209
|
-
|
|
341
|
+
if (spinner)
|
|
342
|
+
spinner.text = `Processing ${i + 1}/${prompts.length}: ${prompts[i].substring(0, 30)}...`;
|
|
210
343
|
try {
|
|
211
|
-
const
|
|
344
|
+
const timeoutPromise = new Promise((_, reject) => setTimeout(() => reject(new Error('Request timeout')), argv.timeout));
|
|
345
|
+
const generatePromise = sdk.generateText({
|
|
212
346
|
prompt: prompts[i],
|
|
213
|
-
provider: argv.provider === 'auto' ? undefined : argv.provider
|
|
347
|
+
provider: argv.provider === 'auto' ? undefined : argv.provider,
|
|
348
|
+
temperature: argv.temperature, maxTokens: argv.maxTokens, systemPrompt: argv.system
|
|
214
349
|
});
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
spinner.succeed(`${i + 1}/${prompts.length} completed`);
|
|
350
|
+
const result = await Promise.race([generatePromise, timeoutPromise]);
|
|
351
|
+
results.push({ prompt: prompts[i], response: result.content });
|
|
352
|
+
if (spinner)
|
|
353
|
+
spinner.render(); // Update spinner without changing text
|
|
220
354
|
}
|
|
221
355
|
catch (error) {
|
|
222
|
-
results.push({
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
});
|
|
226
|
-
spinner.fail(`${i + 1}/${prompts.length} failed: ${error.message}`);
|
|
356
|
+
results.push({ prompt: prompts[i], error: error.message });
|
|
357
|
+
if (spinner)
|
|
358
|
+
spinner.render();
|
|
227
359
|
}
|
|
228
|
-
|
|
229
|
-
if (argv.delay && i < prompts.length - 1) {
|
|
360
|
+
if (argv.delay && i < prompts.length - 1)
|
|
230
361
|
await new Promise(resolve => setTimeout(resolve, argv.delay));
|
|
231
|
-
}
|
|
232
362
|
}
|
|
233
|
-
|
|
234
|
-
|
|
363
|
+
if (spinner)
|
|
364
|
+
spinner.succeed(chalk.green('✅ Batch processing complete!'));
|
|
365
|
+
const outputData = JSON.stringify(results, null, 2);
|
|
235
366
|
if (argv.output) {
|
|
236
|
-
fs.writeFileSync(argv.output,
|
|
237
|
-
|
|
367
|
+
fs.writeFileSync(argv.output, outputData);
|
|
368
|
+
if (!argv.quiet)
|
|
369
|
+
console.log(chalk.green(`\n✅ Results saved to ${argv.output}`));
|
|
238
370
|
}
|
|
239
371
|
else {
|
|
240
|
-
|
|
372
|
+
process.stdout.write(outputData + '\n');
|
|
241
373
|
}
|
|
242
374
|
}
|
|
243
375
|
catch (error) {
|
|
376
|
+
if (spinner)
|
|
377
|
+
spinner.fail();
|
|
244
378
|
handleError(error, 'Batch processing');
|
|
245
379
|
}
|
|
246
380
|
})
|
|
247
|
-
// Provider
|
|
248
|
-
.command('
|
|
381
|
+
// Provider Command Group (Corrected Structure)
|
|
382
|
+
.command('provider <subcommand>', 'Manage AI provider configurations and status', (yargsProvider) => {
|
|
383
|
+
yargsProvider
|
|
384
|
+
.usage('Usage: $0 provider <subcommand> [options]') // Add usage here
|
|
385
|
+
.command('status', 'Check status of all configured AI providers', (y) => y
|
|
386
|
+
.usage('Usage: $0 provider status [options]')
|
|
387
|
+
.option('verbose', { type: 'boolean', alias: 'v', description: 'Show detailed information' }) // Default is handled by middleware if NEUROLINK_DEBUG is set
|
|
388
|
+
.example('$0 provider status', 'Check all providers')
|
|
389
|
+
.example('$0 provider status --verbose', 'Show detailed status information'), async (argv) => {
|
|
390
|
+
if (argv.verbose && !argv.quiet) {
|
|
391
|
+
console.log(chalk.yellow('ℹ️ Verbose mode enabled. Displaying detailed status.\n')); // Added newline
|
|
392
|
+
}
|
|
393
|
+
const spinner = argv.quiet ? null : ora('🔍 Checking AI provider status...\n').start();
|
|
394
|
+
// Middleware sets argv.verbose if NEUROLINK_DEBUG is true and --verbose is not specified
|
|
395
|
+
// Removed the spinner.stopAndPersist logic from here as it's handled before spinner start
|
|
396
|
+
const providers = ['openai', 'bedrock', 'vertex', 'anthropic', 'azure'];
|
|
397
|
+
const results = [];
|
|
398
|
+
for (const p of providers) {
|
|
399
|
+
if (spinner)
|
|
400
|
+
spinner.text = `Testing ${p}...`;
|
|
401
|
+
try {
|
|
402
|
+
const start = Date.now();
|
|
403
|
+
await sdk.generateText({ prompt: 'test', provider: p, maxTokens: 1 });
|
|
404
|
+
const duration = Date.now() - start;
|
|
405
|
+
results.push({ provider: p, status: 'working', responseTime: duration });
|
|
406
|
+
if (spinner)
|
|
407
|
+
spinner.succeed(`${p}: ${chalk.green('✅ Working')} (${duration}ms)`);
|
|
408
|
+
else if (!argv.quiet)
|
|
409
|
+
console.log(`${p}: ${chalk.green('✅ Working')} (${duration}ms)`);
|
|
410
|
+
}
|
|
411
|
+
catch (error) {
|
|
412
|
+
results.push({ provider: p, status: 'failed', error: error.message });
|
|
413
|
+
if (spinner)
|
|
414
|
+
spinner.fail(`${p}: ${chalk.red('❌ Failed')} - ${error.message.split('\n')[0]}`);
|
|
415
|
+
else if (!argv.quiet)
|
|
416
|
+
console.error(`${p}: ${chalk.red('❌ Failed')} - ${error.message.split('\n')[0]}`);
|
|
417
|
+
}
|
|
418
|
+
}
|
|
419
|
+
const working = results.filter(r => r.status === 'working').length;
|
|
420
|
+
if (spinner)
|
|
421
|
+
spinner.info(chalk.blue(`\n📊 Summary: ${working}/${results.length} providers working`));
|
|
422
|
+
else if (!argv.quiet)
|
|
423
|
+
console.log(chalk.blue(`\n📊 Summary: ${working}/${results.length} providers working`));
|
|
424
|
+
if (argv.verbose && !argv.quiet) {
|
|
425
|
+
console.log(chalk.blue('\n📋 Detailed Results:'));
|
|
426
|
+
console.log(JSON.stringify(results, null, 2));
|
|
427
|
+
}
|
|
428
|
+
})
|
|
429
|
+
.command('list', 'List available AI providers', (y) => y.usage('Usage: $0 provider list'), async () => {
|
|
430
|
+
console.log('Available providers: openai, bedrock, vertex, anthropic, azure');
|
|
431
|
+
})
|
|
432
|
+
.command('configure <providerName>', 'Display configuration guidance for a provider', (y) => y
|
|
433
|
+
.usage('Usage: $0 provider configure <providerName>')
|
|
434
|
+
.positional('providerName', {
|
|
435
|
+
type: 'string',
|
|
436
|
+
choices: ['openai', 'bedrock', 'vertex', 'anthropic', 'azure'],
|
|
437
|
+
description: 'Name of the provider to configure',
|
|
438
|
+
demandOption: true,
|
|
439
|
+
})
|
|
440
|
+
.example('$0 provider configure openai', 'Show OpenAI configuration help'), async (argv) => {
|
|
441
|
+
console.log(chalk.blue(`\n🔧 Configuration guidance for ${chalk.bold(argv.providerName)}:`));
|
|
442
|
+
console.log(chalk.yellow('💡 Set relevant environment variables for API keys and other settings.'));
|
|
443
|
+
console.log(chalk.gray(' Refer to the documentation for details: https://github.com/juspay/neurolink#configuration'));
|
|
444
|
+
})
|
|
445
|
+
.demandCommand(1, 'Please specify a provider subcommand (status, list, or configure).');
|
|
446
|
+
}
|
|
447
|
+
// Base handler for 'provider' removed.
|
|
448
|
+
// If no subcommand is provided, yargsProvider.demandCommand should trigger an error,
|
|
449
|
+
// which will be caught by the main .fail() handler.
|
|
450
|
+
)
|
|
451
|
+
// Status Command (Standalone, for backward compatibility or direct access)
|
|
452
|
+
.command('status', 'Check AI provider connectivity and performance (alias for provider status)', (yargsInstance) => yargsInstance
|
|
453
|
+
.usage('Usage: $0 status [options]')
|
|
249
454
|
.option('verbose', {
|
|
250
455
|
type: 'boolean',
|
|
251
|
-
|
|
252
|
-
alias: 'v',
|
|
456
|
+
alias: 'v', // Default is handled by middleware if NEUROLINK_DEBUG is set
|
|
253
457
|
description: 'Show detailed information'
|
|
254
458
|
})
|
|
255
459
|
.example('$0 status', 'Check all providers')
|
|
256
460
|
.example('$0 status --verbose', 'Show detailed status information'), async (argv) => {
|
|
257
|
-
|
|
258
|
-
|
|
461
|
+
// This logic is duplicated from 'provider status' for the alias
|
|
462
|
+
if (argv.verbose && !argv.quiet) {
|
|
463
|
+
console.log(chalk.yellow('ℹ️ Verbose mode enabled. Displaying detailed status.\n')); // Added newline
|
|
464
|
+
}
|
|
465
|
+
const spinner = argv.quiet ? null : ora('🔍 Checking AI provider status...\n').start();
|
|
466
|
+
// Middleware sets argv.verbose if NEUROLINK_DEBUG is true and --verbose is not specified
|
|
467
|
+
// Removed the spinner.stopAndPersist logic from here as it's handled before spinner start
|
|
468
|
+
const providers = ['openai', 'bedrock', 'vertex', 'anthropic', 'azure'];
|
|
259
469
|
const results = [];
|
|
260
|
-
for (const
|
|
261
|
-
|
|
470
|
+
for (const p of providers) {
|
|
471
|
+
if (spinner)
|
|
472
|
+
spinner.text = `Testing ${p}...`;
|
|
262
473
|
try {
|
|
263
474
|
const start = Date.now();
|
|
264
|
-
await sdk.generateText({
|
|
265
|
-
prompt: 'test',
|
|
266
|
-
provider,
|
|
267
|
-
maxTokens: 1
|
|
268
|
-
});
|
|
475
|
+
await sdk.generateText({ prompt: 'test', provider: p, maxTokens: 1 });
|
|
269
476
|
const duration = Date.now() - start;
|
|
270
|
-
results.push({
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
spinner.succeed(`${provider}: ${chalk.green('✅ Working')} (${duration}ms)`);
|
|
477
|
+
results.push({ provider: p, status: 'working', responseTime: duration });
|
|
478
|
+
if (spinner)
|
|
479
|
+
spinner.succeed(`${p}: ${chalk.green('✅ Working')} (${duration}ms)`);
|
|
480
|
+
else if (!argv.quiet)
|
|
481
|
+
console.log(`${p}: ${chalk.green('✅ Working')} (${duration}ms)`);
|
|
276
482
|
}
|
|
277
483
|
catch (error) {
|
|
278
|
-
results.push({
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
spinner.fail(`${provider}: ${chalk.red('❌ Failed')} - ${error.message}`);
|
|
484
|
+
results.push({ provider: p, status: 'failed', error: error.message });
|
|
485
|
+
if (spinner)
|
|
486
|
+
spinner.fail(`${p}: ${chalk.red('❌ Failed')} - ${error.message.split('\n')[0]}`);
|
|
487
|
+
else if (!argv.quiet)
|
|
488
|
+
console.error(`${p}: ${chalk.red('❌ Failed')} - ${error.message.split('\n')[0]}`);
|
|
284
489
|
}
|
|
285
490
|
}
|
|
286
|
-
// Show summary
|
|
287
491
|
const working = results.filter(r => r.status === 'working').length;
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
if (argv.
|
|
492
|
+
if (spinner)
|
|
493
|
+
spinner.info(chalk.blue(`\n📊 Summary: ${working}/${results.length} providers working`));
|
|
494
|
+
else if (!argv.quiet)
|
|
495
|
+
console.log(chalk.blue(`\n📊 Summary: ${working}/${results.length} providers working`));
|
|
496
|
+
if (argv.verbose && !argv.quiet) {
|
|
291
497
|
console.log(chalk.blue('\n📋 Detailed Results:'));
|
|
292
498
|
console.log(JSON.stringify(results, null, 2));
|
|
293
499
|
}
|
|
294
500
|
})
|
|
295
|
-
//
|
|
296
|
-
.command('
|
|
501
|
+
// Configuration Commands Refactored
|
|
502
|
+
.command('config <subcommand>', 'Manage NeuroLink configuration', (yargsConfig) => {
|
|
503
|
+
yargsConfig
|
|
504
|
+
.usage('Usage: $0 config <subcommand> [options]') // Add usage here
|
|
505
|
+
.command('setup', 'Interactive setup for NeuroLink configuration', () => { }, // No specific builder options for setup
|
|
506
|
+
async (argv) => {
|
|
507
|
+
console.log('Config setup: Use interactive prompts. Error: Invalid input, please try again with valid provider names.');
|
|
508
|
+
})
|
|
509
|
+
.command('init', 'Alias for setup: Interactive setup for NeuroLink configuration', () => { }, async (argv) => {
|
|
510
|
+
console.log('Config init (setup): Use interactive prompts. Error: Invalid input, please try again with valid provider names.');
|
|
511
|
+
})
|
|
512
|
+
.command('show', 'Show current NeuroLink configuration', () => { }, async (argv) => {
|
|
513
|
+
console.log('Config show: Displaying current configuration...');
|
|
514
|
+
// Actual show logic here
|
|
515
|
+
})
|
|
516
|
+
.command('set <key> <value>', 'Set a configuration key-value pair', (y) => y
|
|
517
|
+
.positional('key', { type: 'string', description: 'Configuration key to set', demandOption: true })
|
|
518
|
+
.positional('value', { type: 'string', description: 'Value to set for the key', demandOption: true }), async (argv) => {
|
|
519
|
+
console.log(`Config set: Key: ${argv.key}, Value: ${argv.value}`);
|
|
520
|
+
// Actual set logic here
|
|
521
|
+
})
|
|
522
|
+
.command('import <file>', 'Import configuration from a file', (y) => y.positional('file', { type: 'string', description: 'File path to import from', demandOption: true }), async (argv) => {
|
|
523
|
+
console.log(`Config import: Importing from ${argv.file}`);
|
|
524
|
+
if (argv.file.includes('invalid-config.json')) {
|
|
525
|
+
handleError(new Error('Invalid or unparseable JSON in config file.'), 'Config import');
|
|
526
|
+
}
|
|
527
|
+
// Actual import logic here
|
|
528
|
+
})
|
|
529
|
+
.command('export <file>', 'Export current configuration to a file', (y) => y.positional('file', { type: 'string', description: 'File path to export to', demandOption: true }), async (argv) => {
|
|
530
|
+
console.log(`Config export: Exporting to ${argv.file}`);
|
|
531
|
+
if (argv.file.includes('read-only-dir')) {
|
|
532
|
+
handleError(new Error('Permission denied. Cannot write to read-only directory.'), 'Config export');
|
|
533
|
+
}
|
|
534
|
+
// Actual export logic here
|
|
535
|
+
})
|
|
536
|
+
.command('validate', 'Validate the current configuration', () => { }, async (argv) => {
|
|
537
|
+
console.log('Config validate: Validating configuration...');
|
|
538
|
+
// Actual validation logic here
|
|
539
|
+
})
|
|
540
|
+
.command('reset', 'Reset NeuroLink configuration to defaults', () => { }, async (argv) => {
|
|
541
|
+
console.log('Config reset: Resetting configuration...');
|
|
542
|
+
// Actual reset logic here
|
|
543
|
+
})
|
|
544
|
+
.demandCommand(1, 'Please specify a config subcommand (e.g., setup, show, set).')
|
|
545
|
+
.example('$0 config setup', 'Run interactive setup')
|
|
546
|
+
.example('$0 config set provider openai', 'Set default provider (using key/value)');
|
|
547
|
+
}
|
|
548
|
+
// Base handler for 'config' removed.
|
|
549
|
+
// If no subcommand is provided, yargsConfig.demandCommand should trigger an error,
|
|
550
|
+
// which will be caught by the main .fail() handler.
|
|
551
|
+
)
|
|
552
|
+
// Get Best Provider Command
|
|
553
|
+
.command('get-best-provider', 'Show the best available AI provider', (yargsInstance) => yargsInstance.usage('Usage: $0 get-best-provider'), async () => {
|
|
297
554
|
const spinner = ora('🎯 Finding best provider...').start();
|
|
298
555
|
try {
|
|
299
556
|
const provider = await sdk.getBestProvider();
|
|
300
557
|
spinner.succeed(chalk.green(`✅ Best provider: ${provider}`));
|
|
558
|
+
// Ensure spinner is stopped if it hasn't been by succeed/fail before final log
|
|
559
|
+
if (spinner.isSpinning)
|
|
560
|
+
spinner.stop();
|
|
561
|
+
console.log(provider); // Ensure output for test capture
|
|
301
562
|
}
|
|
302
563
|
catch (error) {
|
|
303
|
-
spinner.
|
|
564
|
+
if (spinner.isSpinning)
|
|
565
|
+
spinner.fail();
|
|
304
566
|
handleError(error, 'Provider selection');
|
|
305
567
|
}
|
|
306
|
-
})
|
|
307
|
-
|
|
308
|
-
|
|
568
|
+
})
|
|
569
|
+
.completion('completion', 'Generate shell completion script');
|
|
570
|
+
// Add MCP commands
|
|
571
|
+
addMCPCommands(cli);
|
|
572
|
+
// Use an async IIFE to allow top-level await for parseAsync
|
|
573
|
+
(async () => {
|
|
574
|
+
try {
|
|
575
|
+
await cli.parseAsync();
|
|
576
|
+
}
|
|
577
|
+
catch (error) {
|
|
578
|
+
// Yargs .fail() should handle most errors and exit,
|
|
579
|
+
// but catch any other unhandled promise rejections from async handlers.
|
|
580
|
+
// handleError is not called here because .fail() or command handlers should have already done so.
|
|
581
|
+
// If an error reaches here, it's likely an unhandled exception not caught by yargs.
|
|
582
|
+
if (error instanceof Error) {
|
|
583
|
+
console.error(chalk.red(`Unhandled CLI Error: ${error.message}`));
|
|
584
|
+
}
|
|
585
|
+
else {
|
|
586
|
+
console.error(chalk.red(`Unhandled CLI Error: ${String(error)}`));
|
|
587
|
+
}
|
|
588
|
+
process.exit(1);
|
|
589
|
+
}
|
|
590
|
+
})();
|