@juspay/neurolink 7.0.0 → 7.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/CHANGELOG.md +15 -4
  2. package/README.md +16 -11
  3. package/dist/cli/commands/config.d.ts +2 -2
  4. package/dist/cli/commands/config.js +22 -21
  5. package/dist/cli/commands/mcp.d.ts +79 -0
  6. package/dist/cli/commands/mcp.js +916 -0
  7. package/dist/cli/commands/models.d.ts +63 -0
  8. package/dist/cli/commands/models.js +653 -0
  9. package/dist/cli/commands/ollama.js +56 -55
  10. package/dist/cli/factories/commandFactory.d.ts +67 -2
  11. package/dist/cli/factories/commandFactory.js +840 -92
  12. package/dist/cli/index.d.ts +6 -0
  13. package/dist/cli/index.js +42 -999
  14. package/dist/cli/utils/completeSetup.js +9 -8
  15. package/dist/cli/utils/envManager.js +7 -6
  16. package/dist/cli/utils/interactiveSetup.js +20 -19
  17. package/dist/core/analytics.js +25 -38
  18. package/dist/core/baseProvider.d.ts +8 -0
  19. package/dist/core/baseProvider.js +177 -68
  20. package/dist/core/constants.d.ts +11 -0
  21. package/dist/core/constants.js +17 -0
  22. package/dist/core/evaluation.js +25 -14
  23. package/dist/core/factory.js +21 -18
  24. package/dist/core/streamAnalytics.d.ts +65 -0
  25. package/dist/core/streamAnalytics.js +125 -0
  26. package/dist/factories/providerRegistry.js +3 -1
  27. package/dist/lib/core/analytics.js +25 -38
  28. package/dist/lib/core/baseProvider.d.ts +8 -0
  29. package/dist/lib/core/baseProvider.js +177 -68
  30. package/dist/lib/core/constants.d.ts +11 -0
  31. package/dist/lib/core/constants.js +17 -0
  32. package/dist/lib/core/evaluation.js +25 -14
  33. package/dist/lib/core/factory.js +22 -18
  34. package/dist/lib/core/streamAnalytics.d.ts +65 -0
  35. package/dist/lib/core/streamAnalytics.js +125 -0
  36. package/dist/lib/factories/providerRegistry.js +3 -1
  37. package/dist/lib/mcp/toolRegistry.d.ts +5 -0
  38. package/dist/lib/mcp/toolRegistry.js +60 -0
  39. package/dist/lib/models/modelRegistry.d.ts +132 -0
  40. package/dist/lib/models/modelRegistry.js +483 -0
  41. package/dist/lib/models/modelResolver.d.ts +115 -0
  42. package/dist/lib/models/modelResolver.js +467 -0
  43. package/dist/lib/neurolink.d.ts +4 -1
  44. package/dist/lib/neurolink.js +108 -69
  45. package/dist/lib/providers/anthropic.js +3 -0
  46. package/dist/lib/providers/googleAiStudio.js +13 -0
  47. package/dist/lib/providers/huggingFace.js +15 -3
  48. package/dist/lib/providers/mistral.js +19 -7
  49. package/dist/lib/providers/ollama.js +31 -7
  50. package/dist/lib/providers/openAI.js +12 -0
  51. package/dist/lib/sdk/toolRegistration.js +17 -0
  52. package/dist/lib/types/cli.d.ts +56 -1
  53. package/dist/lib/types/contextTypes.d.ts +110 -0
  54. package/dist/lib/types/contextTypes.js +176 -0
  55. package/dist/lib/types/index.d.ts +4 -1
  56. package/dist/lib/types/mcpTypes.d.ts +118 -7
  57. package/dist/lib/types/providers.d.ts +81 -0
  58. package/dist/lib/types/streamTypes.d.ts +44 -7
  59. package/dist/lib/types/tools.d.ts +9 -0
  60. package/dist/lib/types/universalProviderOptions.d.ts +3 -1
  61. package/dist/lib/types/universalProviderOptions.js +2 -1
  62. package/dist/lib/utils/logger.d.ts +7 -0
  63. package/dist/lib/utils/logger.js +16 -6
  64. package/dist/lib/utils/performance.d.ts +105 -0
  65. package/dist/lib/utils/performance.js +210 -0
  66. package/dist/lib/utils/providerUtils.js +9 -2
  67. package/dist/lib/utils/retryHandler.d.ts +89 -0
  68. package/dist/lib/utils/retryHandler.js +269 -0
  69. package/dist/mcp/toolRegistry.d.ts +5 -0
  70. package/dist/mcp/toolRegistry.js +60 -0
  71. package/dist/models/modelRegistry.d.ts +132 -0
  72. package/dist/models/modelRegistry.js +483 -0
  73. package/dist/models/modelResolver.d.ts +115 -0
  74. package/dist/models/modelResolver.js +468 -0
  75. package/dist/neurolink.d.ts +4 -1
  76. package/dist/neurolink.js +108 -69
  77. package/dist/providers/anthropic.js +3 -0
  78. package/dist/providers/googleAiStudio.js +13 -0
  79. package/dist/providers/huggingFace.js +15 -3
  80. package/dist/providers/mistral.js +19 -7
  81. package/dist/providers/ollama.js +31 -7
  82. package/dist/providers/openAI.js +12 -0
  83. package/dist/sdk/toolRegistration.js +17 -0
  84. package/dist/types/cli.d.ts +56 -1
  85. package/dist/types/contextTypes.d.ts +110 -0
  86. package/dist/types/contextTypes.js +177 -0
  87. package/dist/types/index.d.ts +4 -1
  88. package/dist/types/mcpTypes.d.ts +118 -7
  89. package/dist/types/providers.d.ts +81 -0
  90. package/dist/types/streamTypes.d.ts +44 -7
  91. package/dist/types/tools.d.ts +9 -0
  92. package/dist/types/universalProviderOptions.d.ts +3 -1
  93. package/dist/types/universalProviderOptions.js +3 -1
  94. package/dist/utils/logger.d.ts +7 -0
  95. package/dist/utils/logger.js +16 -6
  96. package/dist/utils/performance.d.ts +105 -0
  97. package/dist/utils/performance.js +210 -0
  98. package/dist/utils/providerUtils.js +9 -2
  99. package/dist/utils/retryHandler.d.ts +89 -0
  100. package/dist/utils/retryHandler.js +269 -0
  101. package/package.json +2 -1
package/dist/cli/index.js CHANGED
@@ -1,101 +1,16 @@
1
1
  #!/usr/bin/env node
2
- // CRITICAL: Set MCP logging level before ANY imports
3
- if (!process.argv.includes("--debug")) {
4
- process.env.MCP_LOG_LEVEL = "error"; // Only show MCP errors unless debugging
5
- }
6
- else {
7
- process.env.MCP_LOG_LEVEL = "info"; // Show MCP logs when debugging
8
- }
9
2
  /**
10
- * NeuroLink CLI - Enhanced Simplified Approach
3
+ * NeuroLink CLI
11
4
  *
12
5
  * Professional CLI experience with minimal maintenance overhead.
13
6
  * Features: Spinners, colors, batch processing, provider testing, rich help
14
- * Implementation: ~300 lines using simple JS utility functions
15
7
  */
16
- import { NeuroLink } from "../lib/neurolink.js";
17
8
  import yargs from "yargs";
18
9
  import { hideBin } from "yargs/helpers";
19
- import ora from "ora";
20
10
  import chalk from "chalk";
21
- import fs from "fs";
22
11
  import { addOllamaCommands } from "./commands/ollama.js";
12
+ import { CLICommandFactory } from "./factories/commandFactory.js";
23
13
  import { logger } from "../lib/utils/logger.js";
24
- function displayDebugInfo(title, data, debug) {
25
- if (debug) {
26
- console.log(chalk.blue(title));
27
- console.log(JSON.stringify(data, null, 2));
28
- console.log();
29
- }
30
- }
31
- function displayMissingDataWarning(type) {
32
- console.log();
33
- console.log(chalk.red(`⚠️ ${type} enabled but no data received`));
34
- console.log();
35
- }
36
- function formatAnalytics(analytics) {
37
- console.log();
38
- console.log(chalk.blue("📊 Analytics:"));
39
- console.log(` 🚀 Provider: ${analytics.provider}`);
40
- console.log(` 🤖 Model: ${analytics.model}`);
41
- if (analytics.tokens) {
42
- const tokens = analytics.tokens;
43
- console.log(` 💬 Tokens: ${tokens.totalTokens || tokens.total || "unknown"}`);
44
- }
45
- console.log(` ⏱️ Response Time: ${analytics.responseTime}ms`);
46
- if (analytics.context) {
47
- const context = analytics.context;
48
- console.log(` 📋 Context: ${Object.keys(context).length} fields`);
49
- }
50
- console.log();
51
- }
52
- function formatEvaluation(evaluation) {
53
- console.log();
54
- console.log(chalk.blue("⭐ Response Quality Evaluation:"));
55
- console.log(` 📊 Scores: Relevance ${evaluation.relevanceScore || evaluation.relevance}/10, Accuracy ${evaluation.accuracyScore || evaluation.accuracy}/10, Completeness ${evaluation.completenessScore || evaluation.completeness}/10`);
56
- console.log(` 🎯 Overall Quality: ${evaluation.overall}/10`);
57
- const severity = evaluation.alertSeverity || "none";
58
- const severityColors = {
59
- high: chalk.red,
60
- medium: chalk.yellow,
61
- low: chalk.blue,
62
- none: chalk.green,
63
- };
64
- const severityColor = severityColors[severity] || chalk.gray;
65
- console.log(` 🚨 Alert Level: ${severityColor(severity)}`);
66
- if (evaluation.reasoning) {
67
- console.log(` 💭 Analysis: ${evaluation.reasoning}`);
68
- }
69
- if (evaluation.suggestedImprovements) {
70
- console.log(` 💡 Improvements: ${evaluation.suggestedImprovements}`);
71
- }
72
- const evalModel = evaluation.evaluationModel || "unknown";
73
- const evalTime = evaluation.evaluationTime
74
- ? `${evaluation.evaluationTime}ms`
75
- : "unknown";
76
- console.log(` 🤖 Evaluated by: ${evalModel} (${evalTime})`);
77
- console.log();
78
- }
79
- function displayAnalyticsAndEvaluation(result, argv) {
80
- if (result && result.analytics) {
81
- displayDebugInfo("📊 Analytics:", result.analytics, argv.debug);
82
- if (!argv.debug) {
83
- formatAnalytics(result.analytics);
84
- }
85
- }
86
- else if (argv.enableAnalytics) {
87
- displayMissingDataWarning("Analytics");
88
- }
89
- if (result && result.evaluation) {
90
- displayDebugInfo("⭐ Response Evaluation:", result.evaluation, argv.debug);
91
- if (!argv.debug) {
92
- formatEvaluation(result.evaluation);
93
- }
94
- }
95
- else if (argv.enableEvaluation) {
96
- displayMissingDataWarning("Evaluation");
97
- }
98
- }
99
14
  // Load environment variables from .env file
100
15
  try {
101
16
  // Try to import and configure dotenv
@@ -167,39 +82,27 @@ function handleError(error, context) {
167
82
  "Authorization error: You are not authorized to perform this action or access this resource.";
168
83
  }
169
84
  // If no specific condition matched, genericMessage remains error.message
170
- console.error(chalk.red(`❌ ${context} failed: ${genericMessage}`));
85
+ logger.error(chalk.red(`❌ ${context} failed: ${genericMessage}`));
171
86
  // Smart hints for common errors (just string matching!)
172
87
  if (genericMessage.toLowerCase().includes("api key") ||
173
88
  genericMessage.toLowerCase().includes("credential")) {
174
- console.error(chalk.yellow("💡 Set Google AI Studio API key (RECOMMENDED): export GOOGLE_AI_API_KEY=AIza-..."));
175
- console.error(chalk.yellow("💡 Or set OpenAI API key: export OPENAI_API_KEY=sk-..."));
176
- console.error(chalk.yellow("💡 Or set AWS Bedrock credentials: export AWS_ACCESS_KEY_ID=... AWS_SECRET_ACCESS_KEY=... AWS_REGION=us-east-1"));
177
- console.error(chalk.yellow("💡 Or set Google Vertex AI credentials: export GOOGLE_APPLICATION_CREDENTIALS=/path/to/key.json"));
178
- console.error(chalk.yellow("💡 Or set Anthropic API key: export ANTHROPIC_API_KEY=sk-ant-..."));
179
- console.error(chalk.yellow("💡 Or set Azure OpenAI credentials: export AZURE_OPENAI_API_KEY=... AZURE_OPENAI_ENDPOINT=..."));
89
+ logger.error(chalk.yellow("💡 Set Google AI Studio API key (RECOMMENDED): export GOOGLE_AI_API_KEY=AIza-..."));
90
+ logger.error(chalk.yellow("💡 Or set OpenAI API key: export OPENAI_API_KEY=sk-..."));
91
+ logger.error(chalk.yellow("💡 Or set AWS Bedrock credentials: export AWS_ACCESS_KEY_ID=... AWS_SECRET_ACCESS_KEY=... AWS_REGION=us-east-1"));
92
+ logger.error(chalk.yellow("💡 Or set Google Vertex AI credentials: export GOOGLE_APPLICATION_CREDENTIALS=/path/to/key.json"));
93
+ logger.error(chalk.yellow("💡 Or set Anthropic API key: export ANTHROPIC_API_KEY=sk-ant-..."));
94
+ logger.error(chalk.yellow("💡 Or set Azure OpenAI credentials: export AZURE_OPENAI_API_KEY=... AZURE_OPENAI_ENDPOINT=..."));
180
95
  }
181
96
  if (error.message.toLowerCase().includes("rate limit")) {
182
- console.error(chalk.yellow("💡 Try again in a few moments or use --provider vertex"));
97
+ logger.error(chalk.yellow("💡 Try again in a few moments or use --provider vertex"));
183
98
  }
184
99
  if (error.message.toLowerCase().includes("not authorized") ||
185
100
  error.message.toLowerCase().includes("permission denied")) {
186
- console.error(chalk.yellow("💡 Check your account permissions for the selected model/service."));
187
- console.error(chalk.yellow("💡 For AWS Bedrock, ensure you have permissions for the specific model and consider using inference profile ARNs."));
101
+ logger.error(chalk.yellow("💡 Check your account permissions for the selected model/service."));
102
+ logger.error(chalk.yellow("💡 For AWS Bedrock, ensure you have permissions for the specific model and consider using inference profile ARNs."));
188
103
  }
189
104
  process.exit(1);
190
105
  }
191
- // Initialize MCP system for CLI with manual config enabled
192
- async function initializeCLI() {
193
- // Import and configure for CLI mode
194
- const { ProviderRegistry } = await import("../lib/factories/providerRegistry.js");
195
- // Enable manual MCP only for CLI
196
- ProviderRegistry.setOptions({
197
- enableManualMCP: true,
198
- });
199
- logger.debug("CLI initialized with manual MCP support enabled");
200
- }
201
- // Initialize SDK
202
- const sdk = new NeuroLink();
203
106
  // Manual pre-validation for unknown flags
204
107
  const args = hideBin(process.argv);
205
108
  // Enhanced CLI with Professional UX
@@ -216,15 +119,21 @@ const cli = yargs(args)
216
119
  .epilogue("For more info: https://github.com/juspay/neurolink")
217
120
  .showHelpOnFail(true, "Specify --help for available options")
218
121
  .middleware((argv) => {
122
+ // Handle no-color option globally
123
+ if (argv.noColor || process.env.NO_COLOR || !process.stdout.isTTY) {
124
+ process.env.FORCE_COLOR = "0";
125
+ }
126
+ // Handle custom config file
127
+ if (argv.configFile) {
128
+ process.env.NEUROLINK_CONFIG_FILE = argv.configFile;
129
+ }
219
130
  // Control SDK logging based on debug flag
220
131
  if (argv.debug) {
221
132
  process.env.NEUROLINK_DEBUG = "true";
222
- process.env.MCP_LOG_LEVEL = "info"; // Show MCP logs in debug mode
223
133
  }
224
134
  else {
225
135
  // Always set to false when debug is not enabled (including when not provided)
226
136
  process.env.NEUROLINK_DEBUG = "false";
227
- process.env.MCP_LOG_LEVEL = "error"; // Hide MCP info logs when not debugging
228
137
  }
229
138
  // Keep existing quiet middleware
230
139
  if (process.env.NEUROLINK_QUIET === "true" &&
@@ -295,899 +204,33 @@ const cli = yargs(args)
295
204
  }
296
205
  exitProcess(); // Default exit
297
206
  })
298
- // Generate Command (Primary)
299
- .command(["generate [prompt]", "gen [prompt]"], "Generate content using AI providers", (yargsInstance) => yargsInstance
300
- .usage("Usage: $0 generate [prompt] [options]")
301
- .positional("prompt", {
302
- type: "string",
303
- description: "Text prompt for AI generation (or read from stdin)",
304
- })
305
- .option("provider", {
306
- choices: [
307
- "auto",
308
- "openai",
309
- "bedrock",
310
- "vertex",
311
- "googleVertex",
312
- "anthropic",
313
- "azure",
314
- "google-ai",
315
- "huggingface",
316
- "ollama",
317
- "mistral",
318
- ],
319
- default: "auto",
320
- description: "AI provider to use (auto-selects best available)",
321
- })
322
- .option("temperature", {
323
- type: "number",
324
- default: 0.7,
325
- description: "Creativity level (0.0 = focused, 1.0 = creative)",
326
- })
327
- .option("max-tokens", {
328
- type: "number",
329
- default: 1000,
330
- description: "Maximum tokens to generate",
331
- })
332
- .option("system", {
333
- type: "string",
334
- description: "System prompt to guide AI behavior",
335
- })
336
- .option("format", {
337
- choices: ["text", "json"],
338
- default: "text",
339
- alias: "f",
340
- description: "Output format",
341
- })
342
- .option("debug", {
343
- type: "boolean",
344
- default: false,
345
- description: "Enable debug mode with verbose output",
346
- }) // Kept for potential specific debug logic
347
- .option("model", {
348
- type: "string",
349
- description: "Specific model to use (e.g. gemini-2.5-pro, gemini-2.5-flash)",
350
- })
351
- .option("timeout", {
352
- type: "number",
353
- default: 120,
354
- description: "Maximum execution time in seconds (default: 120)",
355
- })
356
- .option("disable-tools", {
357
- type: "boolean",
358
- default: false,
359
- description: "Disable MCP tool integration (tools enabled by default)",
360
- })
361
- .option("enable-analytics", {
362
- type: "boolean",
363
- default: false,
364
- description: "Enable usage analytics collection",
365
- })
366
- .option("enable-evaluation", {
367
- type: "boolean",
368
- default: false,
369
- description: "Enable AI response quality evaluation",
370
- })
371
- .option("evaluation-domain", {
372
- type: "string",
373
- description: "Domain expertise for evaluation (e.g., 'AI coding assistant', 'Customer service expert')",
374
- })
375
- .option("tool-usage-context", {
376
- type: "string",
377
- description: "Tool usage context for evaluation (e.g., 'Used sales-data MCP tools')",
378
- })
379
- .option("lighthouse-style", {
380
- type: "boolean",
381
- default: false,
382
- description: "Use Lighthouse-compatible domain-aware evaluation",
383
- })
384
- .option("context", {
385
- type: "string",
386
- description: "JSON context object for custom data",
387
- })
388
- .example('$0 generate "Hello world"', "Basic content generation")
389
- .example('$0 generate "Write a story" --provider openai', "Use specific provider")
390
- .example('$0 generate "What time is it?"', "Use with natural tool integration (default)")
391
- .example('$0 generate "Hello world" --disable-tools', "Use without tool integration"), async (argv) => {
392
- // SOLUTION 1: Handle stdin input if no prompt provided
393
- if (!argv.prompt && !process.stdin.isTTY) {
394
- // Read from stdin
395
- let stdinData = "";
396
- process.stdin.setEncoding("utf8");
397
- for await (const chunk of process.stdin) {
398
- stdinData += chunk;
399
- }
400
- argv.prompt = stdinData.trim();
401
- if (!argv.prompt) {
402
- throw new Error("No input received from stdin");
403
- }
404
- }
405
- else if (!argv.prompt) {
406
- throw new Error('Prompt required. Use: neurolink generate "your prompt" or echo "prompt" | neurolink generate');
407
- }
408
- // SOLUTION 2: Parameter validation
409
- const errors = [];
410
- // Validate max-tokens
411
- if (argv.maxTokens !== undefined) {
412
- if (!Number.isInteger(argv.maxTokens) || argv.maxTokens < 1) {
413
- errors.push(`max-tokens must be a positive integer >= 1, got: ${argv.maxTokens}`);
414
- }
415
- if (argv.maxTokens > 100000) {
416
- errors.push(`max-tokens too large (>100000), got: ${argv.maxTokens}`);
417
- }
418
- }
419
- // Validate temperature
420
- if (argv.temperature !== undefined) {
421
- if (typeof argv.temperature !== "number" ||
422
- argv.temperature < 0 ||
423
- argv.temperature > 1) {
424
- errors.push(`temperature must be between 0.0 and 1.0, got: ${argv.temperature}`);
425
- }
426
- }
427
- // Validate timeout
428
- if (argv.timeout !== undefined) {
429
- if (!Number.isInteger(argv.timeout) || argv.timeout < 1) {
430
- errors.push(`timeout must be a positive integer >= 1 second, got: ${argv.timeout}`);
431
- }
432
- if (argv.timeout > 600) {
433
- errors.push(`timeout too large (>600s), got: ${argv.timeout}s`);
434
- }
435
- }
436
- if (errors.length > 0) {
437
- throw new Error(`Parameter validation failed:\n${errors.map((e) => ` • ${e}`).join("\n")}\n\nUse --help for valid parameter ranges.`);
438
- }
439
- // Command is now the primary generate method
440
- let originalConsole = {};
441
- if ((argv.format === "json" || argv.outputFormat === "json") &&
442
- !argv.quiet) {
443
- // Suppress only if not quiet, as quiet implies no spinners anyway
444
- originalConsole = { ...console };
445
- Object.keys(originalConsole).forEach((key) => {
446
- if (typeof console[key] === "function") {
447
- console[key] = () => { };
448
- }
449
- });
450
- }
451
- const spinner = argv.outputFormat === "json" || argv.format === "json" || argv.quiet
452
- ? null
453
- : ora("🤖 Generating text...").start();
454
- try {
455
- // CRITICAL: Add master timeout to prevent infinite hangs
456
- const cliTimeout = argv.timeout ? argv.timeout * 1000 : 120000; // Default 2 minutes
457
- const timeoutPromise = new Promise((_, reject) => {
458
- setTimeout(() => {
459
- reject(new Error(`CLI operation timed out after ${cliTimeout / 1000} seconds. Use --timeout to adjust.`));
460
- }, cliTimeout);
461
- });
462
- // Parse context if provided
463
- let contextObj;
464
- if (argv.context) {
465
- try {
466
- contextObj = JSON.parse(argv.context);
467
- }
468
- catch {
469
- throw new Error("Invalid JSON provided for --context option");
470
- }
471
- }
472
- // Use standard SDK for all generation - tools are now built into BaseProvider
473
- const generatePromise = sdk.generate({
474
- input: { text: argv.prompt },
475
- provider: argv.provider === "auto"
476
- ? undefined
477
- : argv.provider,
478
- model: argv.model,
479
- temperature: argv.temperature,
480
- maxTokens: argv.maxTokens,
481
- systemPrompt: argv.system,
482
- timeout: argv.timeout,
483
- disableTools: argv.disableTools === true, // Tools are enabled by default
484
- // NEW: Analytics and evaluation support
485
- enableAnalytics: argv.enableAnalytics,
486
- enableEvaluation: argv.enableEvaluation,
487
- context: contextObj,
488
- // NEW: Lighthouse-compatible domain-aware evaluation
489
- evaluationDomain: argv.evaluationDomain,
490
- toolUsageContext: argv.toolUsageContext,
491
- });
492
- // Wrap generation with master timeout to prevent infinite hangs
493
- const result = await Promise.race([generatePromise, timeoutPromise]);
494
- if (argv.format === "json" && originalConsole.log) {
495
- Object.assign(console, originalConsole);
496
- }
497
- if (spinner) {
498
- spinner.succeed(chalk.green("✅ Text generated successfully!"));
499
- }
500
- const typedResult = result;
501
- const responseText = typedResult?.text || typedResult?.content || "";
502
- const responseUsage = typedResult?.usage || {
503
- promptTokens: 0,
504
- completionTokens: 0,
505
- totalTokens: 0,
506
- };
507
- if (argv.format === "json" || argv.outputFormat === "json") {
508
- // CLI debug removed - analytics and evaluation now working correctly
509
- const jsonOutput = {
510
- content: responseText,
511
- provider: typedResult?.provider || argv.provider,
512
- usage: responseUsage,
513
- responseTime: typedResult?.responseTime || 0,
514
- toolCalls: typedResult?.toolCalls || [],
515
- toolResults: typedResult?.toolResults || [],
516
- };
517
- // Include analytics if present
518
- if (typedResult?.analytics) {
519
- jsonOutput.analytics = typedResult.analytics;
520
- }
521
- // Include evaluation if present
522
- if (typedResult?.evaluation) {
523
- jsonOutput.evaluation = typedResult.evaluation;
524
- }
525
- process.stdout.write(JSON.stringify(jsonOutput, null, 2) + "\n");
526
- }
527
- else if (argv.debug) {
528
- // Debug mode: Show AI response + full metadata
529
- if (responseText) {
530
- console.log("\n" + responseText + "\n");
531
- }
532
- // Show tool calls if any
533
- const typedResultForTools = result;
534
- if (typedResultForTools &&
535
- typedResultForTools.toolCalls &&
536
- typedResultForTools.toolCalls.length > 0) {
537
- console.log(chalk.blue("🔧 Tools Called:"));
538
- for (const toolCall of typedResultForTools.toolCalls) {
539
- const toolCallObj = toolCall;
540
- console.log(`- ${toolCallObj.toolName}`);
541
- console.log(` Args: ${JSON.stringify(toolCallObj.args)}`);
542
- }
543
- console.log();
544
- }
545
- // Show tool results if any
546
- if (typedResultForTools &&
547
- typedResultForTools.toolResults &&
548
- typedResultForTools.toolResults.length > 0) {
549
- console.log(chalk.blue("📋 Tool Results:"));
550
- for (const toolResult of typedResultForTools.toolResults) {
551
- const toolResultObj = toolResult;
552
- console.log(`- ${toolResultObj.toolCallId}`);
553
- console.log(` Result: ${JSON.stringify(toolResultObj.result).substring(0, 200)}...`);
554
- }
555
- console.log();
556
- }
557
- // DEBUG: Show what's in the result object
558
- if (argv.debug) {
559
- logger.debug("Result object keys:", {
560
- keys: Object.keys(result || {}),
561
- });
562
- logger.debug("Enhancement status:", {
563
- hasAnalytics: !!(result && result.analytics),
564
- hasEvaluation: !!(result && result.evaluation),
565
- enableAnalytics: argv.enableAnalytics,
566
- enableEvaluation: argv.enableEvaluation,
567
- hasContext: !!contextObj,
568
- });
569
- }
570
- // Show analytics and evaluation if enabled
571
- displayAnalyticsAndEvaluation(result, argv);
572
- console.log(JSON.stringify({
573
- provider: result
574
- ? result.provider || argv.provider
575
- : argv.provider,
576
- usage: responseUsage,
577
- responseTime: result
578
- ? result.responseTime || 0
579
- : 0,
580
- }, null, 2));
581
- if (responseUsage.totalTokens) {
582
- console.log(chalk.blue(`ℹ️ ${responseUsage.totalTokens} tokens used`));
583
- }
584
- }
585
- else {
586
- // Default mode: Clean AI response only
587
- if (responseText) {
588
- console.log(responseText);
589
- }
590
- // Show analytics and evaluation if enabled
591
- displayAnalyticsAndEvaluation(result, argv);
592
- }
593
- // Explicitly exit to prevent hanging, especially with Google AI Studio
594
- process.exit(0);
595
- }
596
- catch (error) {
597
- if (argv.format === "json" && originalConsole.log) {
598
- Object.assign(console, originalConsole);
599
- }
600
- if (spinner) {
601
- spinner.fail();
602
- }
603
- if (argv.format === "json") {
604
- process.stdout.write(JSON.stringify({ error: error.message, success: false }, null, 2) + "\n");
605
- process.exit(1);
606
- }
607
- else {
608
- handleError(error, "Text generation");
609
- }
610
- }
611
- })
612
- // Stream Text Command
613
- .command("stream [prompt]", "Stream generation in real-time", (yargsInstance) => yargsInstance
614
- .usage("Usage: $0 stream [prompt] [options]")
615
- .positional("prompt", {
616
- type: "string",
617
- description: "Text prompt for streaming (or read from stdin)",
618
- })
619
- .option("provider", {
620
- choices: [
621
- "auto",
622
- "openai",
623
- "bedrock",
624
- "vertex",
625
- "googleVertex",
626
- "anthropic",
627
- "azure",
628
- "google-ai",
629
- "huggingface",
630
- "ollama",
631
- "mistral",
632
- ],
633
- default: "auto",
634
- description: "AI provider to use",
635
- })
636
- .option("temperature", {
637
- type: "number",
638
- default: 0.7,
639
- description: "Creativity level",
640
- })
641
- .option("max-tokens", {
642
- type: "number",
643
- description: "Maximum number of tokens to generate",
644
- })
645
- .option("timeout", {
646
- type: "string",
647
- default: "2m",
648
- description: "Timeout for streaming (e.g., 30s, 2m, 1h)",
649
- })
650
- .option("model", {
651
- type: "string",
652
- description: "Specific model to use (e.g., gemini-2.5-pro, gemini-2.5-flash)",
653
- })
654
- .option("debug", {
655
- type: "boolean",
656
- default: false,
657
- description: "Enable debug mode with interleaved logging",
658
- })
659
- .option("disable-tools", {
660
- type: "boolean",
661
- default: false,
662
- description: "Disable MCP tool integration (tools enabled by default)",
663
- })
664
- .option("enable-analytics", {
665
- type: "boolean",
666
- default: false,
667
- description: "Enable usage analytics collection",
668
- })
669
- .option("enable-evaluation", {
670
- type: "boolean",
671
- default: false,
672
- description: "Enable AI response quality evaluation",
673
- })
674
- .option("evaluation-domain", {
675
- type: "string",
676
- description: "Domain expertise for evaluation (e.g., 'AI coding assistant', 'Customer service expert')",
677
- })
678
- .option("tool-usage-context", {
679
- type: "string",
680
- description: "Tool usage context for evaluation (e.g., 'Used sales-data MCP tools')",
681
- })
682
- .option("lighthouse-style", {
683
- type: "boolean",
684
- default: false,
685
- description: "Use Lighthouse-compatible domain-aware evaluation",
686
- })
687
- .option("context", {
688
- type: "string",
689
- description: "JSON context object for custom data",
690
- })
691
- .example('$0 stream "Tell me a story"', "Stream a story in real-time")
692
- .example('$0 stream "What time is it?"', "Stream with natural tool integration (default)")
693
- .example('$0 stream "Tell me a story" --disable-tools', "Stream without tool integration"), async (argv) => {
694
- // SOLUTION 1: Handle stdin input if no prompt provided
695
- if (!argv.prompt && !process.stdin.isTTY) {
696
- // Read from stdin
697
- let stdinData = "";
698
- process.stdin.setEncoding("utf8");
699
- for await (const chunk of process.stdin) {
700
- stdinData += chunk;
701
- }
702
- argv.prompt = stdinData.trim();
703
- if (!argv.prompt) {
704
- throw new Error("No input received from stdin");
705
- }
706
- }
707
- else if (!argv.prompt) {
708
- throw new Error('Prompt required. Use: neurolink stream "your prompt" or echo "prompt" | neurolink stream');
709
- }
710
- // Default mode: Simple streaming message
711
- // Debug mode: More detailed information
712
- if (!argv.quiet && !argv.debug) {
713
- console.log(chalk.blue("🔄 Streaming..."));
714
- }
715
- else if (!argv.quiet && argv.debug) {
716
- console.log(chalk.blue(`🔄 Streaming from ${argv.provider} provider with debug logging...\n`));
717
- }
718
- try {
719
- // Parse context if provided
720
- let contextObj;
721
- if (argv.context) {
722
- try {
723
- contextObj = JSON.parse(argv.context);
724
- }
725
- catch {
726
- throw new Error("Invalid JSON provided for --context option");
727
- }
728
- }
729
- // Use standard SDK streaming - tools are handled automatically
730
- const sdk = new NeuroLink();
731
- const stream = await sdk.stream({
732
- input: { text: argv.prompt },
733
- provider: argv.provider === "auto"
734
- ? undefined
735
- : argv.provider,
736
- model: argv.model,
737
- temperature: argv.temperature,
738
- timeout: argv.timeout,
739
- disableTools: argv.disableTools === true, // Tools are enabled by default
740
- // NEW: Analytics and evaluation support
741
- enableAnalytics: argv.enableAnalytics,
742
- enableEvaluation: argv.enableEvaluation,
743
- context: contextObj,
744
- });
745
- // Process the stream
746
- for await (const chunk of stream.stream) {
747
- process.stdout.write(chunk.content);
748
- }
749
- if (!argv.quiet) {
750
- process.stdout.write("\n");
751
- }
752
- // Clean exit for tools-disabled streaming
753
- process.exit(0);
754
- }
755
- catch (error) {
756
- handleError(error, "Text streaming");
757
- }
758
- })
759
- // Batch Processing Command
760
- .command("batch <file>", "Process multiple prompts from a file", (yargsInstance) => yargsInstance
761
- .usage("Usage: $0 batch <file> [options]")
762
- .positional("file", {
763
- type: "string",
764
- description: "File with prompts (one per line)",
765
- demandOption: true,
766
- })
767
- .option("output", {
768
- type: "string",
769
- description: "Output file for results (default: stdout)",
770
- })
771
- .option("delay", {
772
- type: "number",
773
- default: 1000,
774
- description: "Delay between requests in milliseconds",
775
- })
776
- .option("provider", {
777
- choices: [
778
- "auto",
779
- "openai",
780
- "bedrock",
781
- "vertex",
782
- "googleVertex",
783
- "anthropic",
784
- "azure",
785
- "google-ai",
786
- "huggingface",
787
- "ollama",
788
- "mistral",
789
- ],
790
- default: "auto",
791
- description: "AI provider to use",
792
- })
793
- .option("timeout", {
794
- type: "string",
795
- default: "30s",
796
- description: "Timeout for each request (e.g., 30s, 2m, 1h)",
797
- })
798
- .option("temperature", {
799
- type: "number",
800
- description: "Global temperature for batch jobs",
801
- })
802
- .option("max-tokens", {
803
- type: "number",
804
- description: "Global max tokens for batch jobs",
805
- })
806
- .option("system", {
807
- type: "string",
808
- description: "Global system prompt for batch jobs",
809
- })
810
- .option("debug", {
811
- type: "boolean",
812
- default: false,
813
- description: "Enable debug mode with detailed per-item logging",
814
- })
815
- .example("$0 batch prompts.txt --output results.json", "Process and save to file"), async (argv) => {
816
- const spinner = argv.quiet ? null : ora().start();
817
- try {
818
- if (!fs.existsSync(argv.file)) {
819
- throw new Error(`File not found: ${argv.file}`);
820
- }
821
- const buffer = fs.readFileSync(argv.file);
822
- const isLikelyBinary = buffer.includes(0) ||
823
- buffer.toString("hex", 0, 100).includes("0000") ||
824
- (!buffer.toString("utf8", 0, 1024).includes("\n") &&
825
- buffer.length > 512);
826
- if (isLikelyBinary) {
827
- throw new Error(`Invalid file format: Binary file detected at "${argv.file}". Batch processing requires a plain text file.`);
828
- }
829
- const prompts = buffer
830
- .toString("utf8")
831
- .split("\n")
832
- .map((line) => line.trim())
833
- .filter(Boolean);
834
- if (prompts.length === 0) {
835
- throw new Error("No prompts found in file");
836
- }
837
- if (spinner) {
838
- spinner.text = `📦 Processing ${prompts.length} prompts...`;
839
- }
840
- else if (!argv.quiet) {
841
- console.log(chalk.blue(`📦 Processing ${prompts.length} prompts...\n`));
842
- }
843
- const results = [];
844
- for (let i = 0; i < prompts.length; i++) {
845
- if (spinner) {
846
- spinner.text = `Processing ${i + 1}/${prompts.length}: ${prompts[i].substring(0, 30)}...`;
847
- }
848
- try {
849
- const result = await sdk.generate({
850
- input: { text: prompts[i] },
851
- provider: argv.provider === "auto"
852
- ? undefined
853
- : argv.provider,
854
- temperature: argv.temperature,
855
- maxTokens: argv.maxTokens,
856
- systemPrompt: argv.system,
857
- timeout: argv.timeout,
858
- });
859
- results.push({ prompt: prompts[i], response: result.content });
860
- if (spinner) {
861
- spinner.render();
862
- } // Update spinner without changing text
863
- }
864
- catch (error) {
865
- results.push({
866
- prompt: prompts[i],
867
- error: error.message,
868
- });
869
- if (spinner) {
870
- spinner.render();
871
- }
872
- }
873
- if (argv.delay && i < prompts.length - 1) {
874
- await new Promise((resolve) => setTimeout(resolve, argv.delay));
875
- }
876
- }
877
- if (spinner) {
878
- spinner.succeed(chalk.green("✅ Batch processing complete!"));
879
- }
880
- const outputData = JSON.stringify(results, null, 2);
881
- if (argv.output) {
882
- fs.writeFileSync(argv.output, outputData);
883
- if (!argv.quiet) {
884
- console.log(chalk.green(`\n✅ Results saved to ${argv.output}`));
885
- }
886
- }
887
- else {
888
- process.stdout.write(outputData + "\n");
889
- }
890
- }
891
- catch (error) {
892
- if (spinner) {
893
- spinner.fail();
894
- }
895
- handleError(error, "Batch processing");
896
- }
897
- })
898
- // Provider Command Group (Corrected Structure)
899
- .command("provider <subcommand>", "Manage AI provider configurations and status", (yargsProvider) => {
900
- // Builder for the main 'provider' command
901
- yargsProvider
902
- .usage("Usage: $0 provider <subcommand> [options]") // Add usage here
903
- .command("status", "Check status of all configured AI providers", (y) => y
904
- .usage("Usage: $0 provider status [options]")
905
- .option("verbose", {
906
- type: "boolean",
907
- alias: "v",
908
- description: "Show detailed information",
909
- }) // Default is handled by middleware if NEUROLINK_DEBUG is set
910
- .example("$0 provider status", "Check all providers")
911
- .example("$0 provider status --verbose", "Show detailed status information"), async (argv) => {
912
- if (argv.verbose && !argv.quiet) {
913
- console.log(chalk.yellow("ℹ️ Verbose mode enabled. Displaying detailed status.\n")); // Added newline
914
- }
915
- const spinner = argv.quiet
916
- ? null
917
- : ora("🔍 Checking AI provider status...\n").start();
918
- const providers = [
919
- "openai",
920
- "bedrock",
921
- "vertex",
922
- "googleVertex",
923
- "anthropic",
924
- "azure",
925
- "google-ai",
926
- "huggingface",
927
- "ollama",
928
- "mistral",
929
- ];
930
- // Import hasProviderEnvVars to check environment variables
931
- const { hasProviderEnvVars } = await import("../lib/utils/providerUtils.js");
932
- const results = [];
933
- for (const p of providers) {
934
- if (spinner) {
935
- spinner.text = `Testing ${p}...`;
936
- }
937
- // First check if provider has env vars configured
938
- const hasEnvVars = hasProviderEnvVars(p);
939
- if (!hasEnvVars && p !== "ollama") {
940
- // No env vars, don't even try to test
941
- results.push({
942
- provider: p,
943
- status: "not-configured",
944
- configured: false,
945
- error: "Missing required environment variables",
946
- });
947
- if (spinner) {
948
- spinner.fail(`${p}: ${chalk.gray("⚪ Not configured")} - Missing environment variables`);
949
- }
950
- else if (!argv.quiet) {
951
- console.log(`${p}: ${chalk.gray("⚪ Not configured")} - Missing environment variables`);
952
- }
953
- continue;
954
- }
955
- // Special handling for Ollama
956
- if (p === "ollama") {
957
- try {
958
- // First, check if the service is running
959
- const serviceResponse = await fetch("http://localhost:11434/api/tags", {
960
- method: "GET",
961
- signal: AbortSignal.timeout(2000),
962
- });
963
- if (!serviceResponse.ok) {
964
- throw new Error("Ollama service not responding");
965
- }
966
- // Service is running, now check if the default model is available
967
- const { models } = await serviceResponse.json();
968
- const defaultOllamaModel = "llama3.2:latest";
969
- const modelIsAvailable = models.some((m) => m.name === defaultOllamaModel);
970
- if (modelIsAvailable) {
971
- results.push({
972
- provider: p,
973
- status: "working",
974
- configured: true,
975
- authenticated: true,
976
- responseTime: 0,
977
- });
978
- if (spinner) {
979
- spinner.succeed(`${p}: ${chalk.green("✅ Working")} - Service running and model '${defaultOllamaModel}' is available.`);
980
- }
981
- }
982
- else {
983
- results.push({
984
- provider: p,
985
- status: "failed",
986
- configured: true,
987
- authenticated: false,
988
- error: `Ollama service is running, but model '${defaultOllamaModel}' is not found. Please run 'ollama pull ${defaultOllamaModel}'.`,
989
- });
990
- if (spinner) {
991
- spinner.fail(`${p}: ${chalk.red("❌ Model Not Found")} - Run 'ollama pull ${defaultOllamaModel}'`);
992
- }
993
- }
994
- }
995
- catch (error) {
996
- results.push({
997
- provider: p,
998
- status: "failed",
999
- configured: false,
1000
- authenticated: false,
1001
- error: "Ollama is not running. Please start with: ollama serve",
1002
- });
1003
- if (spinner) {
1004
- spinner.fail(`${p}: ${chalk.red("❌ Failed")} - Service not running`);
1005
- }
1006
- }
1007
- continue;
1008
- }
1009
- // Provider has env vars, now test authentication
1010
- try {
1011
- const start = Date.now();
1012
- // Add timeout to prevent hanging
1013
- const testPromise = sdk.generate({
1014
- input: { text: "test" },
1015
- provider: p,
1016
- maxTokens: 1,
1017
- disableTools: true, // Disable tools for faster status check
1018
- });
1019
- const timeoutPromise = new Promise((_, reject) => {
1020
- setTimeout(() => reject(new Error("Provider test timeout (5s)")), 5000);
1021
- });
1022
- await Promise.race([testPromise, timeoutPromise]);
1023
- const duration = Date.now() - start;
1024
- results.push({
1025
- provider: p,
1026
- status: "working",
1027
- configured: true,
1028
- authenticated: true,
1029
- responseTime: duration,
1030
- });
1031
- if (spinner) {
1032
- spinner.succeed(`${p}: ${chalk.green("✅ Working")} (${duration}ms)`);
1033
- }
1034
- else if (!argv.quiet) {
1035
- console.log(`${p}: ${chalk.green("✅ Working")} (${duration}ms)`);
1036
- }
1037
- }
1038
- catch (error) {
1039
- const errorMsg = error.message.includes("timeout")
1040
- ? "Connection timeout"
1041
- : error.message.split("\n")[0];
1042
- results.push({
1043
- provider: p,
1044
- status: "failed",
1045
- configured: true,
1046
- authenticated: false,
1047
- error: errorMsg,
1048
- });
1049
- if (spinner) {
1050
- spinner.fail(`${p}: ${chalk.red("❌ Failed")} - ${errorMsg}`);
1051
- }
1052
- else if (!argv.quiet) {
1053
- console.error(`${p}: ${chalk.red("❌ Failed")} - ${errorMsg}`);
1054
- }
1055
- }
1056
- }
1057
- const working = results.filter((r) => r.status === "working").length;
1058
- const configured = results.filter((r) => r.configured).length;
1059
- if (spinner) {
1060
- spinner.info(chalk.blue(`\n📊 Summary: ${working}/${results.length} providers working, ${configured}/${results.length} configured`));
1061
- }
1062
- else if (!argv.quiet) {
1063
- console.log(chalk.blue(`\n📊 Summary: ${working}/${results.length} providers working, ${configured}/${results.length} configured`));
1064
- }
1065
- if (argv.verbose && !argv.quiet) {
1066
- console.log(chalk.blue("\n📋 Detailed Results:"));
1067
- console.log(JSON.stringify(results, null, 2));
1068
- }
1069
- })
1070
- .demandCommand(1, "")
1071
- .example("$0 provider status", "Check all providers");
1072
- })
1073
- // Status command alias
1074
- .command("status", "Check AI provider connectivity and performance (alias for provider status)", (yargsConfig) => yargsConfig
1075
- .usage("Usage: $0 status [options]")
1076
- .option("verbose", {
1077
- type: "boolean",
1078
- alias: "v",
1079
- description: "Show detailed information",
1080
- })
1081
- .option("quiet", {
1082
- type: "boolean",
1083
- alias: "q",
1084
- description: "Suppress non-essential output",
1085
- })
1086
- .example("$0 status", "Quick provider status check")
1087
- .example("$0 status --verbose", "Show detailed status information"), async (argv) => {
1088
- // Direct implementation instead of redirect to avoid recursion
1089
- const { CLICommandFactory } = await import("./factories/commandFactory.js");
1090
- const commandFactory = new CLICommandFactory();
1091
- await commandFactory.executeProviderStatus(argv);
1092
- })
1093
- // Configuration Command Group
1094
- .command("config <subcommand>", "Manage NeuroLink configuration", (yargsConfig) => {
1095
- yargsConfig
1096
- .usage("Usage: $0 config <subcommand> [options]")
1097
- .command("export", "Export current configuration", (y) => y
1098
- .usage("Usage: $0 config export [options]")
1099
- .option("output", {
1100
- type: "string",
1101
- alias: "o",
1102
- description: "Output file for configuration",
1103
- })
1104
- .example("$0 config export", "Export to stdout")
1105
- .example("$0 config export -o config.json", "Export to file"), async (argv) => {
1106
- try {
1107
- const config = {
1108
- providers: {
1109
- openai: !!process.env.OPENAI_API_KEY,
1110
- bedrock: !!(process.env.AWS_ACCESS_KEY_ID &&
1111
- process.env.AWS_SECRET_ACCESS_KEY),
1112
- vertex: !!(process.env.GOOGLE_APPLICATION_CREDENTIALS ||
1113
- process.env.GOOGLE_SERVICE_ACCOUNT_KEY ||
1114
- (process.env.GOOGLE_AUTH_CLIENT_EMAIL &&
1115
- process.env.GOOGLE_AUTH_PRIVATE_KEY)),
1116
- anthropic: !!process.env.ANTHROPIC_API_KEY,
1117
- azure: !!(process.env.AZURE_OPENAI_API_KEY &&
1118
- process.env.AZURE_OPENAI_ENDPOINT),
1119
- "google-ai": !!process.env.GOOGLE_AI_API_KEY,
1120
- },
1121
- defaults: {
1122
- temperature: 0.7,
1123
- maxTokens: 500,
1124
- },
1125
- timestamp: new Date().toISOString(),
1126
- };
1127
- const output = JSON.stringify(config, null, 2);
1128
- if (argv.output) {
1129
- fs.writeFileSync(argv.output, output);
1130
- if (!argv.quiet) {
1131
- console.log(chalk.green(`✅ Configuration exported to ${argv.output}`));
1132
- }
1133
- }
1134
- else {
1135
- process.stdout.write(output + "\n");
1136
- }
1137
- }
1138
- catch (error) {
1139
- handleError(error, "Configuration export");
1140
- }
1141
- })
1142
- .demandCommand(1, "")
1143
- .example("$0 config export", "Export configuration");
1144
- })
1145
- // Get Best Provider Command
1146
- .command("get-best-provider", "Show the best available AI provider", (yargsInstance) => yargsInstance
1147
- .usage("Usage: $0 get-best-provider [options]")
1148
- .option("format", {
1149
- choices: ["text", "json"],
1150
- default: "text",
1151
- description: "Output format",
1152
- })
1153
- .example("$0 get-best-provider", "Show best provider")
1154
- .example("$0 get-best-provider --format json", "Show in JSON format"), async (argv) => {
1155
- try {
1156
- const { getBestProvider } = await import("../lib/utils/providerUtils.js");
1157
- const bestProvider = await getBestProvider();
1158
- if (argv.format === "json") {
1159
- process.stdout.write(JSON.stringify({ provider: bestProvider }, null, 2) + "\n");
1160
- }
1161
- else {
1162
- if (!argv.quiet) {
1163
- console.log(chalk.green(`🎯 Best available provider: ${bestProvider}`));
1164
- }
1165
- else {
1166
- process.stdout.write(bestProvider + "\n");
1167
- }
1168
- }
1169
- }
1170
- catch (error) {
1171
- handleError(error, "Provider selection");
1172
- }
1173
- })
1174
- // Completion Command
1175
- .command("completion", "Generate shell completion script", (yargsInstance) => yargsInstance
1176
- .usage("Usage: $0 completion")
1177
- .example("$0 completion >> ~/.bashrc", "Add to bash")
1178
- .example("$0 completion >> ~/.zshrc", "Add to zsh"), async (argv) => {
1179
- cli.showCompletionScript();
1180
- });
1181
- // Add NEW Generate Command (Primary)
1182
- // Removed CLICommandFactory call - commands are handled directly above.createGenerateCommand());
1183
- // MCP Commands: Integrated within base provider functionality
207
+ // Generate Command (Primary) - Using CLICommandFactory
208
+ .command(CLICommandFactory.createGenerateCommand())
209
+ // Stream Text Command - Using CLICommandFactory
210
+ .command(CLICommandFactory.createStreamCommand())
211
+ // Batch Processing Command - Using CLICommandFactory
212
+ .command(CLICommandFactory.createBatchCommand())
213
+ // Provider Command Group - Using CLICommandFactory
214
+ .command(CLICommandFactory.createProviderCommands())
215
+ // Status command alias - Using CLICommandFactory
216
+ .command(CLICommandFactory.createStatusCommand())
217
+ // Models Command Group - Using CLICommandFactory
218
+ .command(CLICommandFactory.createModelsCommands())
219
+ // MCP Command Group - Using CLICommandFactory
220
+ .command(CLICommandFactory.createMCPCommands())
221
+ // Discover Command - Using CLICommandFactory
222
+ .command(CLICommandFactory.createDiscoverCommand())
223
+ // Configuration Command Group - Using CLICommandFactory
224
+ .command(CLICommandFactory.createConfigCommands())
225
+ // Get Best Provider Command - Using CLICommandFactory
226
+ .command(CLICommandFactory.createBestProviderCommand())
227
+ // Completion Command - Using CLICommandFactory
228
+ .command(CLICommandFactory.createCompletionCommand());
1184
229
  // Add Ollama Commands
1185
230
  addOllamaCommands(cli);
1186
231
  // Execute CLI
1187
232
  (async () => {
1188
233
  try {
1189
- // Initialize CLI with manual MCP support
1190
- await initializeCLI();
1191
234
  // Parse and execute commands
1192
235
  await cli.parse();
1193
236
  }