@juspay/neurolink 6.2.1 → 7.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +32 -16
- package/README.md +3 -3
- package/dist/cli/factories/commandFactory.d.ts +70 -0
- package/dist/cli/factories/commandFactory.js +633 -0
- package/dist/cli/index.d.ts +6 -0
- package/dist/cli/index.js +18 -990
- package/dist/cli/utils/{complete-setup.d.ts → completeSetup.d.ts} +1 -1
- package/dist/cli/utils/{complete-setup.js → completeSetup.js} +2 -2
- package/dist/{lib/core/base-provider.d.ts → core/baseProvider.d.ts} +1 -1
- package/dist/core/{base-provider.js → baseProvider.js} +2 -2
- package/dist/core/{dynamic-models.js → dynamicModels.js} +1 -1
- package/dist/core/evaluation.js +1 -1
- package/dist/core/factory.js +7 -5
- package/dist/core/types.d.ts +2 -2
- package/dist/{lib/factories/provider-registry.js → factories/providerRegistry.js} +11 -9
- package/dist/index.d.ts +4 -4
- package/dist/index.js +1 -1
- package/dist/{core/base-provider.d.ts → lib/core/baseProvider.d.ts} +1 -1
- package/dist/lib/core/{base-provider.js → baseProvider.js} +2 -2
- package/dist/lib/core/{dynamic-models.js → dynamicModels.js} +1 -1
- package/dist/lib/core/evaluation.js +1 -1
- package/dist/lib/core/factory.js +6 -3
- package/dist/lib/core/types.d.ts +2 -2
- package/dist/{factories/provider-registry.js → lib/factories/providerRegistry.js} +11 -9
- package/dist/lib/index.d.ts +4 -4
- package/dist/lib/index.js +1 -1
- package/dist/lib/mcp/factory.d.ts +2 -2
- package/dist/lib/mcp/factory.js +2 -2
- package/dist/lib/mcp/servers/agent/{direct-tools-server.js → directToolsServer.js} +1 -1
- package/dist/lib/mcp/servers/{ai-providers/ai-core-server.js → aiProviders/aiCoreServer.js} +1 -1
- package/dist/lib/mcp/{tool-registry.d.ts → toolRegistry.d.ts} +5 -0
- package/dist/lib/mcp/{tool-registry.js → toolRegistry.js} +60 -0
- package/dist/lib/neurolink.d.ts +4 -4
- package/dist/lib/neurolink.js +13 -8
- package/dist/lib/providers/{amazon-bedrock.d.ts → amazonBedrock.d.ts} +2 -2
- package/dist/lib/providers/{amazon-bedrock.js → amazonBedrock.js} +1 -1
- package/dist/lib/providers/anthropic.d.ts +2 -2
- package/dist/lib/providers/anthropic.js +1 -1
- package/dist/{providers/anthropic-baseprovider.d.ts → lib/providers/anthropicBaseProvider.d.ts} +2 -2
- package/dist/lib/providers/{anthropic-baseprovider.js → anthropicBaseProvider.js} +1 -1
- package/dist/{providers/azure-openai.d.ts → lib/providers/azureOpenai.d.ts} +2 -2
- package/dist/{providers/azure-openai.js → lib/providers/azureOpenai.js} +1 -1
- package/dist/{providers/google-ai-studio.d.ts → lib/providers/googleAiStudio.d.ts} +2 -2
- package/dist/lib/providers/{google-ai-studio.js → googleAiStudio.js} +1 -1
- package/dist/{providers/google-vertex.d.ts → lib/providers/googleVertex.d.ts} +2 -2
- package/dist/lib/providers/{google-vertex.js → googleVertex.js} +1 -1
- package/dist/lib/providers/huggingFace.d.ts +2 -2
- package/dist/lib/providers/huggingFace.js +1 -1
- package/dist/lib/providers/index.d.ts +4 -4
- package/dist/lib/providers/index.js +4 -4
- package/dist/lib/providers/mistral.d.ts +2 -2
- package/dist/lib/providers/mistral.js +1 -1
- package/dist/lib/providers/ollama.d.ts +2 -2
- package/dist/lib/providers/ollama.js +1 -1
- package/dist/lib/providers/openAI.d.ts +2 -2
- package/dist/lib/providers/openAI.js +1 -1
- package/dist/{sdk/tool-registration.d.ts → lib/sdk/toolRegistration.d.ts} +1 -1
- package/dist/lib/sdk/{tool-registration.js → toolRegistration.js} +17 -0
- package/dist/lib/telemetry/index.d.ts +2 -2
- package/dist/lib/telemetry/index.js +3 -3
- package/dist/lib/utils/logger.js +5 -6
- package/dist/lib/utils/providerConfig.js +1 -1
- package/dist/lib/utils/{provider-setup-messages.js → providerSetupMessages.js} +1 -1
- package/dist/lib/utils/providerUtils.js +10 -3
- package/dist/mcp/factory.d.ts +2 -2
- package/dist/mcp/factory.js +2 -2
- package/dist/mcp/servers/agent/{direct-tools-server.js → directToolsServer.js} +1 -1
- package/dist/mcp/servers/{ai-providers/ai-core-server.js → aiProviders/aiCoreServer.js} +2 -2
- package/dist/mcp/{tool-registry.d.ts → toolRegistry.d.ts} +5 -0
- package/dist/mcp/{tool-registry.js → toolRegistry.js} +60 -0
- package/dist/neurolink.d.ts +5 -5
- package/dist/neurolink.js +13 -8
- package/dist/providers/{amazon-bedrock.d.ts → amazonBedrock.d.ts} +2 -2
- package/dist/providers/{amazon-bedrock.js → amazonBedrock.js} +1 -1
- package/dist/providers/anthropic.d.ts +2 -2
- package/dist/providers/anthropic.js +1 -1
- package/dist/{lib/providers/anthropic-baseprovider.d.ts → providers/anthropicBaseProvider.d.ts} +2 -2
- package/dist/providers/{anthropic-baseprovider.js → anthropicBaseProvider.js} +1 -1
- package/dist/{lib/providers/azure-openai.d.ts → providers/azureOpenai.d.ts} +2 -2
- package/dist/{lib/providers/azure-openai.js → providers/azureOpenai.js} +1 -1
- package/dist/{lib/providers/google-ai-studio.d.ts → providers/googleAiStudio.d.ts} +2 -2
- package/dist/providers/{google-ai-studio.js → googleAiStudio.js} +2 -2
- package/dist/{lib/providers/google-vertex.d.ts → providers/googleVertex.d.ts} +2 -2
- package/dist/providers/{google-vertex.js → googleVertex.js} +1 -1
- package/dist/providers/huggingFace.d.ts +2 -2
- package/dist/providers/huggingFace.js +1 -1
- package/dist/providers/index.d.ts +4 -4
- package/dist/providers/index.js +4 -4
- package/dist/providers/mistral.d.ts +2 -2
- package/dist/providers/mistral.js +1 -1
- package/dist/providers/ollama.d.ts +2 -2
- package/dist/providers/ollama.js +1 -1
- package/dist/providers/openAI.d.ts +2 -2
- package/dist/providers/openAI.js +1 -1
- package/dist/{lib/sdk/tool-registration.d.ts → sdk/toolRegistration.d.ts} +1 -1
- package/dist/sdk/{tool-registration.js → toolRegistration.js} +17 -0
- package/dist/telemetry/index.d.ts +2 -2
- package/dist/telemetry/index.js +3 -3
- package/dist/utils/logger.js +5 -6
- package/dist/utils/providerConfig.js +1 -1
- package/dist/utils/{provider-setup-messages.js → providerSetupMessages.js} +1 -1
- package/dist/utils/providerUtils.js +10 -3
- package/package.json +23 -22
- package/dist/cli/factories/command-factory.d.ts +0 -19
- package/dist/cli/factories/command-factory.js +0 -184
- /package/dist/agent/{direct-tools.d.ts → directTools.d.ts} +0 -0
- /package/dist/agent/{direct-tools.js → directTools.js} +0 -0
- /package/dist/cli/utils/{env-manager.d.ts → envManager.d.ts} +0 -0
- /package/dist/cli/utils/{env-manager.js → envManager.js} +0 -0
- /package/dist/cli/utils/{interactive-setup.d.ts → interactiveSetup.d.ts} +0 -0
- /package/dist/cli/utils/{interactive-setup.js → interactiveSetup.js} +0 -0
- /package/dist/core/{dynamic-models.d.ts → dynamicModels.d.ts} +0 -0
- /package/dist/core/{evaluation-providers.d.ts → evaluationProviders.d.ts} +0 -0
- /package/dist/core/{evaluation-providers.js → evaluationProviders.js} +0 -0
- /package/dist/core/{service-registry.d.ts → serviceRegistry.d.ts} +0 -0
- /package/dist/core/{service-registry.js → serviceRegistry.js} +0 -0
- /package/dist/factories/{provider-factory.d.ts → providerFactory.d.ts} +0 -0
- /package/dist/factories/{provider-factory.js → providerFactory.js} +0 -0
- /package/dist/factories/{provider-registry.d.ts → providerRegistry.d.ts} +0 -0
- /package/dist/lib/agent/{direct-tools.d.ts → directTools.d.ts} +0 -0
- /package/dist/lib/agent/{direct-tools.js → directTools.js} +0 -0
- /package/dist/lib/core/{dynamic-models.d.ts → dynamicModels.d.ts} +0 -0
- /package/dist/lib/core/{evaluation-providers.d.ts → evaluationProviders.d.ts} +0 -0
- /package/dist/lib/core/{evaluation-providers.js → evaluationProviders.js} +0 -0
- /package/dist/lib/core/{service-registry.d.ts → serviceRegistry.d.ts} +0 -0
- /package/dist/lib/core/{service-registry.js → serviceRegistry.js} +0 -0
- /package/dist/lib/factories/{provider-factory.d.ts → providerFactory.d.ts} +0 -0
- /package/dist/lib/factories/{provider-factory.js → providerFactory.js} +0 -0
- /package/dist/lib/factories/{provider-registry.d.ts → providerRegistry.d.ts} +0 -0
- /package/dist/lib/mcp/servers/agent/{direct-tools-server.d.ts → directToolsServer.d.ts} +0 -0
- /package/dist/lib/mcp/servers/{ai-providers/ai-analysis-tools.d.ts → aiProviders/aiAnalysisTools.d.ts} +0 -0
- /package/dist/lib/mcp/servers/{ai-providers/ai-analysis-tools.js → aiProviders/aiAnalysisTools.js} +0 -0
- /package/dist/lib/mcp/servers/{ai-providers/ai-core-server.d.ts → aiProviders/aiCoreServer.d.ts} +0 -0
- /package/dist/lib/mcp/servers/{ai-providers/ai-workflow-tools.d.ts → aiProviders/aiWorkflowTools.d.ts} +0 -0
- /package/dist/lib/mcp/servers/{ai-providers/ai-workflow-tools.js → aiProviders/aiWorkflowTools.js} +0 -0
- /package/dist/lib/mcp/servers/utilities/{utility-server.d.ts → utilityServer.d.ts} +0 -0
- /package/dist/lib/mcp/servers/utilities/{utility-server.js → utilityServer.js} +0 -0
- /package/dist/lib/proxy/{proxy-fetch.d.ts → proxyFetch.d.ts} +0 -0
- /package/dist/lib/proxy/{proxy-fetch.js → proxyFetch.js} +0 -0
- /package/dist/lib/telemetry/{telemetry-service.d.ts → telemetryService.d.ts} +0 -0
- /package/dist/lib/telemetry/{telemetry-service.js → telemetryService.js} +0 -0
- /package/dist/lib/types/{generate-types.d.ts → generateTypes.d.ts} +0 -0
- /package/dist/lib/types/{generate-types.js → generateTypes.js} +0 -0
- /package/dist/lib/types/{mcp-types.d.ts → mcpTypes.d.ts} +0 -0
- /package/dist/lib/types/{mcp-types.js → mcpTypes.js} +0 -0
- /package/dist/lib/types/{stream-types.d.ts → streamTypes.d.ts} +0 -0
- /package/dist/lib/types/{stream-types.js → streamTypes.js} +0 -0
- /package/dist/lib/types/{universal-provider-options.d.ts → universalProviderOptions.d.ts} +0 -0
- /package/dist/lib/types/{universal-provider-options.js → universalProviderOptions.js} +0 -0
- /package/dist/lib/utils/{provider-setup-messages.d.ts → providerSetupMessages.d.ts} +0 -0
- /package/dist/mcp/servers/agent/{direct-tools-server.d.ts → directToolsServer.d.ts} +0 -0
- /package/dist/mcp/servers/{ai-providers/ai-analysis-tools.d.ts → aiProviders/aiAnalysisTools.d.ts} +0 -0
- /package/dist/mcp/servers/{ai-providers/ai-analysis-tools.js → aiProviders/aiAnalysisTools.js} +0 -0
- /package/dist/mcp/servers/{ai-providers/ai-core-server.d.ts → aiProviders/aiCoreServer.d.ts} +0 -0
- /package/dist/mcp/servers/{ai-providers/ai-workflow-tools.d.ts → aiProviders/aiWorkflowTools.d.ts} +0 -0
- /package/dist/mcp/servers/{ai-providers/ai-workflow-tools.js → aiProviders/aiWorkflowTools.js} +0 -0
- /package/dist/mcp/servers/utilities/{utility-server.d.ts → utilityServer.d.ts} +0 -0
- /package/dist/mcp/servers/utilities/{utility-server.js → utilityServer.js} +0 -0
- /package/dist/proxy/{proxy-fetch.d.ts → proxyFetch.d.ts} +0 -0
- /package/dist/proxy/{proxy-fetch.js → proxyFetch.js} +0 -0
- /package/dist/telemetry/{telemetry-service.d.ts → telemetryService.d.ts} +0 -0
- /package/dist/telemetry/{telemetry-service.js → telemetryService.js} +0 -0
- /package/dist/types/{generate-types.d.ts → generateTypes.d.ts} +0 -0
- /package/dist/types/{generate-types.js → generateTypes.js} +0 -0
- /package/dist/types/{mcp-types.d.ts → mcpTypes.d.ts} +0 -0
- /package/dist/types/{mcp-types.js → mcpTypes.js} +0 -0
- /package/dist/types/{stream-types.d.ts → streamTypes.d.ts} +0 -0
- /package/dist/types/{stream-types.js → streamTypes.js} +0 -0
- /package/dist/types/{universal-provider-options.d.ts → universalProviderOptions.d.ts} +0 -0
- /package/dist/types/{universal-provider-options.js → universalProviderOptions.js} +0 -0
- /package/dist/utils/{provider-setup-messages.d.ts → providerSetupMessages.d.ts} +0 -0
package/dist/cli/index.js
CHANGED
|
@@ -1,101 +1,15 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
|
-
// CRITICAL: Set MCP logging level before ANY imports
|
|
3
|
-
if (!process.argv.includes("--debug")) {
|
|
4
|
-
process.env.MCP_LOG_LEVEL = "error"; // Only show MCP errors unless debugging
|
|
5
|
-
}
|
|
6
|
-
else {
|
|
7
|
-
process.env.MCP_LOG_LEVEL = "info"; // Show MCP logs when debugging
|
|
8
|
-
}
|
|
9
2
|
/**
|
|
10
|
-
* NeuroLink CLI
|
|
3
|
+
* NeuroLink CLI
|
|
11
4
|
*
|
|
12
5
|
* Professional CLI experience with minimal maintenance overhead.
|
|
13
6
|
* Features: Spinners, colors, batch processing, provider testing, rich help
|
|
14
|
-
* Implementation: ~300 lines using simple JS utility functions
|
|
15
7
|
*/
|
|
16
|
-
import { NeuroLink } from "../lib/neurolink.js";
|
|
17
8
|
import yargs from "yargs";
|
|
18
9
|
import { hideBin } from "yargs/helpers";
|
|
19
|
-
import ora from "ora";
|
|
20
10
|
import chalk from "chalk";
|
|
21
|
-
import fs from "fs";
|
|
22
11
|
import { addOllamaCommands } from "./commands/ollama.js";
|
|
23
|
-
import {
|
|
24
|
-
function displayDebugInfo(title, data, debug) {
|
|
25
|
-
if (debug) {
|
|
26
|
-
console.log(chalk.blue(title));
|
|
27
|
-
console.log(JSON.stringify(data, null, 2));
|
|
28
|
-
console.log();
|
|
29
|
-
}
|
|
30
|
-
}
|
|
31
|
-
function displayMissingDataWarning(type) {
|
|
32
|
-
console.log();
|
|
33
|
-
console.log(chalk.red(`⚠️ ${type} enabled but no data received`));
|
|
34
|
-
console.log();
|
|
35
|
-
}
|
|
36
|
-
function formatAnalytics(analytics) {
|
|
37
|
-
console.log();
|
|
38
|
-
console.log(chalk.blue("📊 Analytics:"));
|
|
39
|
-
console.log(` 🚀 Provider: ${analytics.provider}`);
|
|
40
|
-
console.log(` 🤖 Model: ${analytics.model}`);
|
|
41
|
-
if (analytics.tokens) {
|
|
42
|
-
const tokens = analytics.tokens;
|
|
43
|
-
console.log(` 💬 Tokens: ${tokens.totalTokens || tokens.total || "unknown"}`);
|
|
44
|
-
}
|
|
45
|
-
console.log(` ⏱️ Response Time: ${analytics.responseTime}ms`);
|
|
46
|
-
if (analytics.context) {
|
|
47
|
-
const context = analytics.context;
|
|
48
|
-
console.log(` 📋 Context: ${Object.keys(context).length} fields`);
|
|
49
|
-
}
|
|
50
|
-
console.log();
|
|
51
|
-
}
|
|
52
|
-
function formatEvaluation(evaluation) {
|
|
53
|
-
console.log();
|
|
54
|
-
console.log(chalk.blue("⭐ Response Quality Evaluation:"));
|
|
55
|
-
console.log(` 📊 Scores: Relevance ${evaluation.relevanceScore || evaluation.relevance}/10, Accuracy ${evaluation.accuracyScore || evaluation.accuracy}/10, Completeness ${evaluation.completenessScore || evaluation.completeness}/10`);
|
|
56
|
-
console.log(` 🎯 Overall Quality: ${evaluation.overall}/10`);
|
|
57
|
-
const severity = evaluation.alertSeverity || "none";
|
|
58
|
-
const severityColors = {
|
|
59
|
-
high: chalk.red,
|
|
60
|
-
medium: chalk.yellow,
|
|
61
|
-
low: chalk.blue,
|
|
62
|
-
none: chalk.green,
|
|
63
|
-
};
|
|
64
|
-
const severityColor = severityColors[severity] || chalk.gray;
|
|
65
|
-
console.log(` 🚨 Alert Level: ${severityColor(severity)}`);
|
|
66
|
-
if (evaluation.reasoning) {
|
|
67
|
-
console.log(` 💭 Analysis: ${evaluation.reasoning}`);
|
|
68
|
-
}
|
|
69
|
-
if (evaluation.suggestedImprovements) {
|
|
70
|
-
console.log(` 💡 Improvements: ${evaluation.suggestedImprovements}`);
|
|
71
|
-
}
|
|
72
|
-
const evalModel = evaluation.evaluationModel || "unknown";
|
|
73
|
-
const evalTime = evaluation.evaluationTime
|
|
74
|
-
? `${evaluation.evaluationTime}ms`
|
|
75
|
-
: "unknown";
|
|
76
|
-
console.log(` 🤖 Evaluated by: ${evalModel} (${evalTime})`);
|
|
77
|
-
console.log();
|
|
78
|
-
}
|
|
79
|
-
function displayAnalyticsAndEvaluation(result, argv) {
|
|
80
|
-
if (result && result.analytics) {
|
|
81
|
-
displayDebugInfo("📊 Analytics:", result.analytics, argv.debug);
|
|
82
|
-
if (!argv.debug) {
|
|
83
|
-
formatAnalytics(result.analytics);
|
|
84
|
-
}
|
|
85
|
-
}
|
|
86
|
-
else if (argv.enableAnalytics) {
|
|
87
|
-
displayMissingDataWarning("Analytics");
|
|
88
|
-
}
|
|
89
|
-
if (result && result.evaluation) {
|
|
90
|
-
displayDebugInfo("⭐ Response Evaluation:", result.evaluation, argv.debug);
|
|
91
|
-
if (!argv.debug) {
|
|
92
|
-
formatEvaluation(result.evaluation);
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
else if (argv.enableEvaluation) {
|
|
96
|
-
displayMissingDataWarning("Evaluation");
|
|
97
|
-
}
|
|
98
|
-
}
|
|
12
|
+
import { CLICommandFactory } from "./factories/commandFactory.js";
|
|
99
13
|
// Load environment variables from .env file
|
|
100
14
|
try {
|
|
101
15
|
// Try to import and configure dotenv
|
|
@@ -188,18 +102,6 @@ function handleError(error, context) {
|
|
|
188
102
|
}
|
|
189
103
|
process.exit(1);
|
|
190
104
|
}
|
|
191
|
-
// Initialize MCP system for CLI with manual config enabled
|
|
192
|
-
async function initializeCLI() {
|
|
193
|
-
// Import and configure for CLI mode
|
|
194
|
-
const { ProviderRegistry } = await import("../lib/factories/provider-registry.js");
|
|
195
|
-
// Enable manual MCP only for CLI
|
|
196
|
-
ProviderRegistry.setOptions({
|
|
197
|
-
enableManualMCP: true,
|
|
198
|
-
});
|
|
199
|
-
logger.debug("CLI initialized with manual MCP support enabled");
|
|
200
|
-
}
|
|
201
|
-
// Initialize SDK
|
|
202
|
-
const sdk = new NeuroLink();
|
|
203
105
|
// Manual pre-validation for unknown flags
|
|
204
106
|
const args = hideBin(process.argv);
|
|
205
107
|
// Enhanced CLI with Professional UX
|
|
@@ -219,12 +121,10 @@ const cli = yargs(args)
|
|
|
219
121
|
// Control SDK logging based on debug flag
|
|
220
122
|
if (argv.debug) {
|
|
221
123
|
process.env.NEUROLINK_DEBUG = "true";
|
|
222
|
-
process.env.MCP_LOG_LEVEL = "info"; // Show MCP logs in debug mode
|
|
223
124
|
}
|
|
224
125
|
else {
|
|
225
126
|
// Always set to false when debug is not enabled (including when not provided)
|
|
226
127
|
process.env.NEUROLINK_DEBUG = "false";
|
|
227
|
-
process.env.MCP_LOG_LEVEL = "error"; // Hide MCP info logs when not debugging
|
|
228
128
|
}
|
|
229
129
|
// Keep existing quiet middleware
|
|
230
130
|
if (process.env.NEUROLINK_QUIET === "true" &&
|
|
@@ -295,899 +195,27 @@ const cli = yargs(args)
|
|
|
295
195
|
}
|
|
296
196
|
exitProcess(); // Default exit
|
|
297
197
|
})
|
|
298
|
-
// Generate Command (Primary)
|
|
299
|
-
.command(
|
|
300
|
-
|
|
301
|
-
.
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
.
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
"google-ai",
|
|
315
|
-
"huggingface",
|
|
316
|
-
"ollama",
|
|
317
|
-
"mistral",
|
|
318
|
-
],
|
|
319
|
-
default: "auto",
|
|
320
|
-
description: "AI provider to use (auto-selects best available)",
|
|
321
|
-
})
|
|
322
|
-
.option("temperature", {
|
|
323
|
-
type: "number",
|
|
324
|
-
default: 0.7,
|
|
325
|
-
description: "Creativity level (0.0 = focused, 1.0 = creative)",
|
|
326
|
-
})
|
|
327
|
-
.option("max-tokens", {
|
|
328
|
-
type: "number",
|
|
329
|
-
default: 1000,
|
|
330
|
-
description: "Maximum tokens to generate",
|
|
331
|
-
})
|
|
332
|
-
.option("system", {
|
|
333
|
-
type: "string",
|
|
334
|
-
description: "System prompt to guide AI behavior",
|
|
335
|
-
})
|
|
336
|
-
.option("format", {
|
|
337
|
-
choices: ["text", "json"],
|
|
338
|
-
default: "text",
|
|
339
|
-
alias: "f",
|
|
340
|
-
description: "Output format",
|
|
341
|
-
})
|
|
342
|
-
.option("debug", {
|
|
343
|
-
type: "boolean",
|
|
344
|
-
default: false,
|
|
345
|
-
description: "Enable debug mode with verbose output",
|
|
346
|
-
}) // Kept for potential specific debug logic
|
|
347
|
-
.option("model", {
|
|
348
|
-
type: "string",
|
|
349
|
-
description: "Specific model to use (e.g. gemini-2.5-pro, gemini-2.5-flash)",
|
|
350
|
-
})
|
|
351
|
-
.option("timeout", {
|
|
352
|
-
type: "number",
|
|
353
|
-
default: 120,
|
|
354
|
-
description: "Maximum execution time in seconds (default: 120)",
|
|
355
|
-
})
|
|
356
|
-
.option("disable-tools", {
|
|
357
|
-
type: "boolean",
|
|
358
|
-
default: false,
|
|
359
|
-
description: "Disable MCP tool integration (tools enabled by default)",
|
|
360
|
-
})
|
|
361
|
-
.option("enable-analytics", {
|
|
362
|
-
type: "boolean",
|
|
363
|
-
default: false,
|
|
364
|
-
description: "Enable usage analytics collection",
|
|
365
|
-
})
|
|
366
|
-
.option("enable-evaluation", {
|
|
367
|
-
type: "boolean",
|
|
368
|
-
default: false,
|
|
369
|
-
description: "Enable AI response quality evaluation",
|
|
370
|
-
})
|
|
371
|
-
.option("evaluation-domain", {
|
|
372
|
-
type: "string",
|
|
373
|
-
description: "Domain expertise for evaluation (e.g., 'AI coding assistant', 'Customer service expert')",
|
|
374
|
-
})
|
|
375
|
-
.option("tool-usage-context", {
|
|
376
|
-
type: "string",
|
|
377
|
-
description: "Tool usage context for evaluation (e.g., 'Used sales-data MCP tools')",
|
|
378
|
-
})
|
|
379
|
-
.option("lighthouse-style", {
|
|
380
|
-
type: "boolean",
|
|
381
|
-
default: false,
|
|
382
|
-
description: "Use Lighthouse-compatible domain-aware evaluation",
|
|
383
|
-
})
|
|
384
|
-
.option("context", {
|
|
385
|
-
type: "string",
|
|
386
|
-
description: "JSON context object for custom data",
|
|
387
|
-
})
|
|
388
|
-
.example('$0 generate "Hello world"', "Basic content generation")
|
|
389
|
-
.example('$0 generate "Write a story" --provider openai', "Use specific provider")
|
|
390
|
-
.example('$0 generate "What time is it?"', "Use with natural tool integration (default)")
|
|
391
|
-
.example('$0 generate "Hello world" --disable-tools', "Use without tool integration"), async (argv) => {
|
|
392
|
-
// SOLUTION 1: Handle stdin input if no prompt provided
|
|
393
|
-
if (!argv.prompt && !process.stdin.isTTY) {
|
|
394
|
-
// Read from stdin
|
|
395
|
-
let stdinData = "";
|
|
396
|
-
process.stdin.setEncoding("utf8");
|
|
397
|
-
for await (const chunk of process.stdin) {
|
|
398
|
-
stdinData += chunk;
|
|
399
|
-
}
|
|
400
|
-
argv.prompt = stdinData.trim();
|
|
401
|
-
if (!argv.prompt) {
|
|
402
|
-
throw new Error("No input received from stdin");
|
|
403
|
-
}
|
|
404
|
-
}
|
|
405
|
-
else if (!argv.prompt) {
|
|
406
|
-
throw new Error('Prompt required. Use: neurolink generate "your prompt" or echo "prompt" | neurolink generate');
|
|
407
|
-
}
|
|
408
|
-
// SOLUTION 2: Parameter validation
|
|
409
|
-
const errors = [];
|
|
410
|
-
// Validate max-tokens
|
|
411
|
-
if (argv.maxTokens !== undefined) {
|
|
412
|
-
if (!Number.isInteger(argv.maxTokens) || argv.maxTokens < 1) {
|
|
413
|
-
errors.push(`max-tokens must be a positive integer >= 1, got: ${argv.maxTokens}`);
|
|
414
|
-
}
|
|
415
|
-
if (argv.maxTokens > 100000) {
|
|
416
|
-
errors.push(`max-tokens too large (>100000), got: ${argv.maxTokens}`);
|
|
417
|
-
}
|
|
418
|
-
}
|
|
419
|
-
// Validate temperature
|
|
420
|
-
if (argv.temperature !== undefined) {
|
|
421
|
-
if (typeof argv.temperature !== "number" ||
|
|
422
|
-
argv.temperature < 0 ||
|
|
423
|
-
argv.temperature > 1) {
|
|
424
|
-
errors.push(`temperature must be between 0.0 and 1.0, got: ${argv.temperature}`);
|
|
425
|
-
}
|
|
426
|
-
}
|
|
427
|
-
// Validate timeout
|
|
428
|
-
if (argv.timeout !== undefined) {
|
|
429
|
-
if (!Number.isInteger(argv.timeout) || argv.timeout < 1) {
|
|
430
|
-
errors.push(`timeout must be a positive integer >= 1 second, got: ${argv.timeout}`);
|
|
431
|
-
}
|
|
432
|
-
if (argv.timeout > 600) {
|
|
433
|
-
errors.push(`timeout too large (>600s), got: ${argv.timeout}s`);
|
|
434
|
-
}
|
|
435
|
-
}
|
|
436
|
-
if (errors.length > 0) {
|
|
437
|
-
throw new Error(`Parameter validation failed:\n${errors.map((e) => ` • ${e}`).join("\n")}\n\nUse --help for valid parameter ranges.`);
|
|
438
|
-
}
|
|
439
|
-
// Command is now the primary generate method
|
|
440
|
-
let originalConsole = {};
|
|
441
|
-
if ((argv.format === "json" || argv.outputFormat === "json") &&
|
|
442
|
-
!argv.quiet) {
|
|
443
|
-
// Suppress only if not quiet, as quiet implies no spinners anyway
|
|
444
|
-
originalConsole = { ...console };
|
|
445
|
-
Object.keys(originalConsole).forEach((key) => {
|
|
446
|
-
if (typeof console[key] === "function") {
|
|
447
|
-
console[key] = () => { };
|
|
448
|
-
}
|
|
449
|
-
});
|
|
450
|
-
}
|
|
451
|
-
const spinner = argv.outputFormat === "json" || argv.format === "json" || argv.quiet
|
|
452
|
-
? null
|
|
453
|
-
: ora("🤖 Generating text...").start();
|
|
454
|
-
try {
|
|
455
|
-
// CRITICAL: Add master timeout to prevent infinite hangs
|
|
456
|
-
const cliTimeout = argv.timeout ? argv.timeout * 1000 : 120000; // Default 2 minutes
|
|
457
|
-
const timeoutPromise = new Promise((_, reject) => {
|
|
458
|
-
setTimeout(() => {
|
|
459
|
-
reject(new Error(`CLI operation timed out after ${cliTimeout / 1000} seconds. Use --timeout to adjust.`));
|
|
460
|
-
}, cliTimeout);
|
|
461
|
-
});
|
|
462
|
-
// Parse context if provided
|
|
463
|
-
let contextObj;
|
|
464
|
-
if (argv.context) {
|
|
465
|
-
try {
|
|
466
|
-
contextObj = JSON.parse(argv.context);
|
|
467
|
-
}
|
|
468
|
-
catch {
|
|
469
|
-
throw new Error("Invalid JSON provided for --context option");
|
|
470
|
-
}
|
|
471
|
-
}
|
|
472
|
-
// Use standard SDK for all generation - tools are now built into BaseProvider
|
|
473
|
-
const generatePromise = sdk.generate({
|
|
474
|
-
input: { text: argv.prompt },
|
|
475
|
-
provider: argv.provider === "auto"
|
|
476
|
-
? undefined
|
|
477
|
-
: argv.provider,
|
|
478
|
-
model: argv.model,
|
|
479
|
-
temperature: argv.temperature,
|
|
480
|
-
maxTokens: argv.maxTokens,
|
|
481
|
-
systemPrompt: argv.system,
|
|
482
|
-
timeout: argv.timeout,
|
|
483
|
-
disableTools: argv.disableTools === true, // Tools are enabled by default
|
|
484
|
-
// NEW: Analytics and evaluation support
|
|
485
|
-
enableAnalytics: argv.enableAnalytics,
|
|
486
|
-
enableEvaluation: argv.enableEvaluation,
|
|
487
|
-
context: contextObj,
|
|
488
|
-
// NEW: Lighthouse-compatible domain-aware evaluation
|
|
489
|
-
evaluationDomain: argv.evaluationDomain,
|
|
490
|
-
toolUsageContext: argv.toolUsageContext,
|
|
491
|
-
});
|
|
492
|
-
// Wrap generation with master timeout to prevent infinite hangs
|
|
493
|
-
const result = await Promise.race([generatePromise, timeoutPromise]);
|
|
494
|
-
if (argv.format === "json" && originalConsole.log) {
|
|
495
|
-
Object.assign(console, originalConsole);
|
|
496
|
-
}
|
|
497
|
-
if (spinner) {
|
|
498
|
-
spinner.succeed(chalk.green("✅ Text generated successfully!"));
|
|
499
|
-
}
|
|
500
|
-
const typedResult = result;
|
|
501
|
-
const responseText = typedResult?.text || typedResult?.content || "";
|
|
502
|
-
const responseUsage = typedResult?.usage || {
|
|
503
|
-
promptTokens: 0,
|
|
504
|
-
completionTokens: 0,
|
|
505
|
-
totalTokens: 0,
|
|
506
|
-
};
|
|
507
|
-
if (argv.format === "json" || argv.outputFormat === "json") {
|
|
508
|
-
// CLI debug removed - analytics and evaluation now working correctly
|
|
509
|
-
const jsonOutput = {
|
|
510
|
-
content: responseText,
|
|
511
|
-
provider: typedResult?.provider || argv.provider,
|
|
512
|
-
usage: responseUsage,
|
|
513
|
-
responseTime: typedResult?.responseTime || 0,
|
|
514
|
-
toolCalls: typedResult?.toolCalls || [],
|
|
515
|
-
toolResults: typedResult?.toolResults || [],
|
|
516
|
-
};
|
|
517
|
-
// Include analytics if present
|
|
518
|
-
if (typedResult?.analytics) {
|
|
519
|
-
jsonOutput.analytics = typedResult.analytics;
|
|
520
|
-
}
|
|
521
|
-
// Include evaluation if present
|
|
522
|
-
if (typedResult?.evaluation) {
|
|
523
|
-
jsonOutput.evaluation = typedResult.evaluation;
|
|
524
|
-
}
|
|
525
|
-
process.stdout.write(JSON.stringify(jsonOutput, null, 2) + "\n");
|
|
526
|
-
}
|
|
527
|
-
else if (argv.debug) {
|
|
528
|
-
// Debug mode: Show AI response + full metadata
|
|
529
|
-
if (responseText) {
|
|
530
|
-
console.log("\n" + responseText + "\n");
|
|
531
|
-
}
|
|
532
|
-
// Show tool calls if any
|
|
533
|
-
const typedResultForTools = result;
|
|
534
|
-
if (typedResultForTools &&
|
|
535
|
-
typedResultForTools.toolCalls &&
|
|
536
|
-
typedResultForTools.toolCalls.length > 0) {
|
|
537
|
-
console.log(chalk.blue("🔧 Tools Called:"));
|
|
538
|
-
for (const toolCall of typedResultForTools.toolCalls) {
|
|
539
|
-
const toolCallObj = toolCall;
|
|
540
|
-
console.log(`- ${toolCallObj.toolName}`);
|
|
541
|
-
console.log(` Args: ${JSON.stringify(toolCallObj.args)}`);
|
|
542
|
-
}
|
|
543
|
-
console.log();
|
|
544
|
-
}
|
|
545
|
-
// Show tool results if any
|
|
546
|
-
if (typedResultForTools &&
|
|
547
|
-
typedResultForTools.toolResults &&
|
|
548
|
-
typedResultForTools.toolResults.length > 0) {
|
|
549
|
-
console.log(chalk.blue("📋 Tool Results:"));
|
|
550
|
-
for (const toolResult of typedResultForTools.toolResults) {
|
|
551
|
-
const toolResultObj = toolResult;
|
|
552
|
-
console.log(`- ${toolResultObj.toolCallId}`);
|
|
553
|
-
console.log(` Result: ${JSON.stringify(toolResultObj.result).substring(0, 200)}...`);
|
|
554
|
-
}
|
|
555
|
-
console.log();
|
|
556
|
-
}
|
|
557
|
-
// DEBUG: Show what's in the result object
|
|
558
|
-
if (argv.debug) {
|
|
559
|
-
logger.debug("Result object keys:", {
|
|
560
|
-
keys: Object.keys(result || {}),
|
|
561
|
-
});
|
|
562
|
-
logger.debug("Enhancement status:", {
|
|
563
|
-
hasAnalytics: !!(result && result.analytics),
|
|
564
|
-
hasEvaluation: !!(result && result.evaluation),
|
|
565
|
-
enableAnalytics: argv.enableAnalytics,
|
|
566
|
-
enableEvaluation: argv.enableEvaluation,
|
|
567
|
-
hasContext: !!contextObj,
|
|
568
|
-
});
|
|
569
|
-
}
|
|
570
|
-
// Show analytics and evaluation if enabled
|
|
571
|
-
displayAnalyticsAndEvaluation(result, argv);
|
|
572
|
-
console.log(JSON.stringify({
|
|
573
|
-
provider: result
|
|
574
|
-
? result.provider || argv.provider
|
|
575
|
-
: argv.provider,
|
|
576
|
-
usage: responseUsage,
|
|
577
|
-
responseTime: result
|
|
578
|
-
? result.responseTime || 0
|
|
579
|
-
: 0,
|
|
580
|
-
}, null, 2));
|
|
581
|
-
if (responseUsage.totalTokens) {
|
|
582
|
-
console.log(chalk.blue(`ℹ️ ${responseUsage.totalTokens} tokens used`));
|
|
583
|
-
}
|
|
584
|
-
}
|
|
585
|
-
else {
|
|
586
|
-
// Default mode: Clean AI response only
|
|
587
|
-
if (responseText) {
|
|
588
|
-
console.log(responseText);
|
|
589
|
-
}
|
|
590
|
-
// Show analytics and evaluation if enabled
|
|
591
|
-
displayAnalyticsAndEvaluation(result, argv);
|
|
592
|
-
}
|
|
593
|
-
// Explicitly exit to prevent hanging, especially with Google AI Studio
|
|
594
|
-
process.exit(0);
|
|
595
|
-
}
|
|
596
|
-
catch (error) {
|
|
597
|
-
if (argv.format === "json" && originalConsole.log) {
|
|
598
|
-
Object.assign(console, originalConsole);
|
|
599
|
-
}
|
|
600
|
-
if (spinner) {
|
|
601
|
-
spinner.fail();
|
|
602
|
-
}
|
|
603
|
-
if (argv.format === "json") {
|
|
604
|
-
process.stdout.write(JSON.stringify({ error: error.message, success: false }, null, 2) + "\n");
|
|
605
|
-
process.exit(1);
|
|
606
|
-
}
|
|
607
|
-
else {
|
|
608
|
-
handleError(error, "Text generation");
|
|
609
|
-
}
|
|
610
|
-
}
|
|
611
|
-
})
|
|
612
|
-
// Stream Text Command
|
|
613
|
-
.command("stream [prompt]", "Stream generation in real-time", (yargsInstance) => yargsInstance
|
|
614
|
-
.usage("Usage: $0 stream [prompt] [options]")
|
|
615
|
-
.positional("prompt", {
|
|
616
|
-
type: "string",
|
|
617
|
-
description: "Text prompt for streaming (or read from stdin)",
|
|
618
|
-
})
|
|
619
|
-
.option("provider", {
|
|
620
|
-
choices: [
|
|
621
|
-
"auto",
|
|
622
|
-
"openai",
|
|
623
|
-
"bedrock",
|
|
624
|
-
"vertex",
|
|
625
|
-
"google-vertex",
|
|
626
|
-
"anthropic",
|
|
627
|
-
"azure",
|
|
628
|
-
"google-ai",
|
|
629
|
-
"huggingface",
|
|
630
|
-
"ollama",
|
|
631
|
-
"mistral",
|
|
632
|
-
],
|
|
633
|
-
default: "auto",
|
|
634
|
-
description: "AI provider to use",
|
|
635
|
-
})
|
|
636
|
-
.option("temperature", {
|
|
637
|
-
type: "number",
|
|
638
|
-
default: 0.7,
|
|
639
|
-
description: "Creativity level",
|
|
640
|
-
})
|
|
641
|
-
.option("max-tokens", {
|
|
642
|
-
type: "number",
|
|
643
|
-
description: "Maximum number of tokens to generate",
|
|
644
|
-
})
|
|
645
|
-
.option("timeout", {
|
|
646
|
-
type: "string",
|
|
647
|
-
default: "2m",
|
|
648
|
-
description: "Timeout for streaming (e.g., 30s, 2m, 1h)",
|
|
649
|
-
})
|
|
650
|
-
.option("model", {
|
|
651
|
-
type: "string",
|
|
652
|
-
description: "Specific model to use (e.g., gemini-2.5-pro, gemini-2.5-flash)",
|
|
653
|
-
})
|
|
654
|
-
.option("debug", {
|
|
655
|
-
type: "boolean",
|
|
656
|
-
default: false,
|
|
657
|
-
description: "Enable debug mode with interleaved logging",
|
|
658
|
-
})
|
|
659
|
-
.option("disable-tools", {
|
|
660
|
-
type: "boolean",
|
|
661
|
-
default: false,
|
|
662
|
-
description: "Disable MCP tool integration (tools enabled by default)",
|
|
663
|
-
})
|
|
664
|
-
.option("enable-analytics", {
|
|
665
|
-
type: "boolean",
|
|
666
|
-
default: false,
|
|
667
|
-
description: "Enable usage analytics collection",
|
|
668
|
-
})
|
|
669
|
-
.option("enable-evaluation", {
|
|
670
|
-
type: "boolean",
|
|
671
|
-
default: false,
|
|
672
|
-
description: "Enable AI response quality evaluation",
|
|
673
|
-
})
|
|
674
|
-
.option("evaluation-domain", {
|
|
675
|
-
type: "string",
|
|
676
|
-
description: "Domain expertise for evaluation (e.g., 'AI coding assistant', 'Customer service expert')",
|
|
677
|
-
})
|
|
678
|
-
.option("tool-usage-context", {
|
|
679
|
-
type: "string",
|
|
680
|
-
description: "Tool usage context for evaluation (e.g., 'Used sales-data MCP tools')",
|
|
681
|
-
})
|
|
682
|
-
.option("lighthouse-style", {
|
|
683
|
-
type: "boolean",
|
|
684
|
-
default: false,
|
|
685
|
-
description: "Use Lighthouse-compatible domain-aware evaluation",
|
|
686
|
-
})
|
|
687
|
-
.option("context", {
|
|
688
|
-
type: "string",
|
|
689
|
-
description: "JSON context object for custom data",
|
|
690
|
-
})
|
|
691
|
-
.example('$0 stream "Tell me a story"', "Stream a story in real-time")
|
|
692
|
-
.example('$0 stream "What time is it?"', "Stream with natural tool integration (default)")
|
|
693
|
-
.example('$0 stream "Tell me a story" --disable-tools', "Stream without tool integration"), async (argv) => {
|
|
694
|
-
// SOLUTION 1: Handle stdin input if no prompt provided
|
|
695
|
-
if (!argv.prompt && !process.stdin.isTTY) {
|
|
696
|
-
// Read from stdin
|
|
697
|
-
let stdinData = "";
|
|
698
|
-
process.stdin.setEncoding("utf8");
|
|
699
|
-
for await (const chunk of process.stdin) {
|
|
700
|
-
stdinData += chunk;
|
|
701
|
-
}
|
|
702
|
-
argv.prompt = stdinData.trim();
|
|
703
|
-
if (!argv.prompt) {
|
|
704
|
-
throw new Error("No input received from stdin");
|
|
705
|
-
}
|
|
706
|
-
}
|
|
707
|
-
else if (!argv.prompt) {
|
|
708
|
-
throw new Error('Prompt required. Use: neurolink stream "your prompt" or echo "prompt" | neurolink stream');
|
|
709
|
-
}
|
|
710
|
-
// Default mode: Simple streaming message
|
|
711
|
-
// Debug mode: More detailed information
|
|
712
|
-
if (!argv.quiet && !argv.debug) {
|
|
713
|
-
console.log(chalk.blue("🔄 Streaming..."));
|
|
714
|
-
}
|
|
715
|
-
else if (!argv.quiet && argv.debug) {
|
|
716
|
-
console.log(chalk.blue(`🔄 Streaming from ${argv.provider} provider with debug logging...\n`));
|
|
717
|
-
}
|
|
718
|
-
try {
|
|
719
|
-
// Parse context if provided
|
|
720
|
-
let contextObj;
|
|
721
|
-
if (argv.context) {
|
|
722
|
-
try {
|
|
723
|
-
contextObj = JSON.parse(argv.context);
|
|
724
|
-
}
|
|
725
|
-
catch {
|
|
726
|
-
throw new Error("Invalid JSON provided for --context option");
|
|
727
|
-
}
|
|
728
|
-
}
|
|
729
|
-
// Use standard SDK streaming - tools are handled automatically
|
|
730
|
-
const sdk = new NeuroLink();
|
|
731
|
-
const stream = await sdk.stream({
|
|
732
|
-
input: { text: argv.prompt },
|
|
733
|
-
provider: argv.provider === "auto"
|
|
734
|
-
? undefined
|
|
735
|
-
: argv.provider,
|
|
736
|
-
model: argv.model,
|
|
737
|
-
temperature: argv.temperature,
|
|
738
|
-
timeout: argv.timeout,
|
|
739
|
-
disableTools: argv.disableTools === true, // Tools are enabled by default
|
|
740
|
-
// NEW: Analytics and evaluation support
|
|
741
|
-
enableAnalytics: argv.enableAnalytics,
|
|
742
|
-
enableEvaluation: argv.enableEvaluation,
|
|
743
|
-
context: contextObj,
|
|
744
|
-
});
|
|
745
|
-
// Process the stream
|
|
746
|
-
for await (const chunk of stream.stream) {
|
|
747
|
-
process.stdout.write(chunk.content);
|
|
748
|
-
}
|
|
749
|
-
if (!argv.quiet) {
|
|
750
|
-
process.stdout.write("\n");
|
|
751
|
-
}
|
|
752
|
-
// Clean exit for tools-disabled streaming
|
|
753
|
-
process.exit(0);
|
|
754
|
-
}
|
|
755
|
-
catch (error) {
|
|
756
|
-
handleError(error, "Text streaming");
|
|
757
|
-
}
|
|
758
|
-
})
|
|
759
|
-
// Batch Processing Command
|
|
760
|
-
.command("batch <file>", "Process multiple prompts from a file", (yargsInstance) => yargsInstance
|
|
761
|
-
.usage("Usage: $0 batch <file> [options]")
|
|
762
|
-
.positional("file", {
|
|
763
|
-
type: "string",
|
|
764
|
-
description: "File with prompts (one per line)",
|
|
765
|
-
demandOption: true,
|
|
766
|
-
})
|
|
767
|
-
.option("output", {
|
|
768
|
-
type: "string",
|
|
769
|
-
description: "Output file for results (default: stdout)",
|
|
770
|
-
})
|
|
771
|
-
.option("delay", {
|
|
772
|
-
type: "number",
|
|
773
|
-
default: 1000,
|
|
774
|
-
description: "Delay between requests in milliseconds",
|
|
775
|
-
})
|
|
776
|
-
.option("provider", {
|
|
777
|
-
choices: [
|
|
778
|
-
"auto",
|
|
779
|
-
"openai",
|
|
780
|
-
"bedrock",
|
|
781
|
-
"vertex",
|
|
782
|
-
"google-vertex",
|
|
783
|
-
"anthropic",
|
|
784
|
-
"azure",
|
|
785
|
-
"google-ai",
|
|
786
|
-
"huggingface",
|
|
787
|
-
"ollama",
|
|
788
|
-
"mistral",
|
|
789
|
-
],
|
|
790
|
-
default: "auto",
|
|
791
|
-
description: "AI provider to use",
|
|
792
|
-
})
|
|
793
|
-
.option("timeout", {
|
|
794
|
-
type: "string",
|
|
795
|
-
default: "30s",
|
|
796
|
-
description: "Timeout for each request (e.g., 30s, 2m, 1h)",
|
|
797
|
-
})
|
|
798
|
-
.option("temperature", {
|
|
799
|
-
type: "number",
|
|
800
|
-
description: "Global temperature for batch jobs",
|
|
801
|
-
})
|
|
802
|
-
.option("max-tokens", {
|
|
803
|
-
type: "number",
|
|
804
|
-
description: "Global max tokens for batch jobs",
|
|
805
|
-
})
|
|
806
|
-
.option("system", {
|
|
807
|
-
type: "string",
|
|
808
|
-
description: "Global system prompt for batch jobs",
|
|
809
|
-
})
|
|
810
|
-
.option("debug", {
|
|
811
|
-
type: "boolean",
|
|
812
|
-
default: false,
|
|
813
|
-
description: "Enable debug mode with detailed per-item logging",
|
|
814
|
-
})
|
|
815
|
-
.example("$0 batch prompts.txt --output results.json", "Process and save to file"), async (argv) => {
|
|
816
|
-
const spinner = argv.quiet ? null : ora().start();
|
|
817
|
-
try {
|
|
818
|
-
if (!fs.existsSync(argv.file)) {
|
|
819
|
-
throw new Error(`File not found: ${argv.file}`);
|
|
820
|
-
}
|
|
821
|
-
const buffer = fs.readFileSync(argv.file);
|
|
822
|
-
const isLikelyBinary = buffer.includes(0) ||
|
|
823
|
-
buffer.toString("hex", 0, 100).includes("0000") ||
|
|
824
|
-
(!buffer.toString("utf8", 0, 1024).includes("\n") &&
|
|
825
|
-
buffer.length > 512);
|
|
826
|
-
if (isLikelyBinary) {
|
|
827
|
-
throw new Error(`Invalid file format: Binary file detected at "${argv.file}". Batch processing requires a plain text file.`);
|
|
828
|
-
}
|
|
829
|
-
const prompts = buffer
|
|
830
|
-
.toString("utf8")
|
|
831
|
-
.split("\n")
|
|
832
|
-
.map((line) => line.trim())
|
|
833
|
-
.filter(Boolean);
|
|
834
|
-
if (prompts.length === 0) {
|
|
835
|
-
throw new Error("No prompts found in file");
|
|
836
|
-
}
|
|
837
|
-
if (spinner) {
|
|
838
|
-
spinner.text = `📦 Processing ${prompts.length} prompts...`;
|
|
839
|
-
}
|
|
840
|
-
else if (!argv.quiet) {
|
|
841
|
-
console.log(chalk.blue(`📦 Processing ${prompts.length} prompts...\n`));
|
|
842
|
-
}
|
|
843
|
-
const results = [];
|
|
844
|
-
for (let i = 0; i < prompts.length; i++) {
|
|
845
|
-
if (spinner) {
|
|
846
|
-
spinner.text = `Processing ${i + 1}/${prompts.length}: ${prompts[i].substring(0, 30)}...`;
|
|
847
|
-
}
|
|
848
|
-
try {
|
|
849
|
-
const result = await sdk.generate({
|
|
850
|
-
input: { text: prompts[i] },
|
|
851
|
-
provider: argv.provider === "auto"
|
|
852
|
-
? undefined
|
|
853
|
-
: argv.provider,
|
|
854
|
-
temperature: argv.temperature,
|
|
855
|
-
maxTokens: argv.maxTokens,
|
|
856
|
-
systemPrompt: argv.system,
|
|
857
|
-
timeout: argv.timeout,
|
|
858
|
-
});
|
|
859
|
-
results.push({ prompt: prompts[i], response: result.content });
|
|
860
|
-
if (spinner) {
|
|
861
|
-
spinner.render();
|
|
862
|
-
} // Update spinner without changing text
|
|
863
|
-
}
|
|
864
|
-
catch (error) {
|
|
865
|
-
results.push({
|
|
866
|
-
prompt: prompts[i],
|
|
867
|
-
error: error.message,
|
|
868
|
-
});
|
|
869
|
-
if (spinner) {
|
|
870
|
-
spinner.render();
|
|
871
|
-
}
|
|
872
|
-
}
|
|
873
|
-
if (argv.delay && i < prompts.length - 1) {
|
|
874
|
-
await new Promise((resolve) => setTimeout(resolve, argv.delay));
|
|
875
|
-
}
|
|
876
|
-
}
|
|
877
|
-
if (spinner) {
|
|
878
|
-
spinner.succeed(chalk.green("✅ Batch processing complete!"));
|
|
879
|
-
}
|
|
880
|
-
const outputData = JSON.stringify(results, null, 2);
|
|
881
|
-
if (argv.output) {
|
|
882
|
-
fs.writeFileSync(argv.output, outputData);
|
|
883
|
-
if (!argv.quiet) {
|
|
884
|
-
console.log(chalk.green(`\n✅ Results saved to ${argv.output}`));
|
|
885
|
-
}
|
|
886
|
-
}
|
|
887
|
-
else {
|
|
888
|
-
process.stdout.write(outputData + "\n");
|
|
889
|
-
}
|
|
890
|
-
}
|
|
891
|
-
catch (error) {
|
|
892
|
-
if (spinner) {
|
|
893
|
-
spinner.fail();
|
|
894
|
-
}
|
|
895
|
-
handleError(error, "Batch processing");
|
|
896
|
-
}
|
|
897
|
-
})
|
|
898
|
-
// Provider Command Group (Corrected Structure)
|
|
899
|
-
.command("provider <subcommand>", "Manage AI provider configurations and status", (yargsProvider) => {
|
|
900
|
-
// Builder for the main 'provider' command
|
|
901
|
-
yargsProvider
|
|
902
|
-
.usage("Usage: $0 provider <subcommand> [options]") // Add usage here
|
|
903
|
-
.command("status", "Check status of all configured AI providers", (y) => y
|
|
904
|
-
.usage("Usage: $0 provider status [options]")
|
|
905
|
-
.option("verbose", {
|
|
906
|
-
type: "boolean",
|
|
907
|
-
alias: "v",
|
|
908
|
-
description: "Show detailed information",
|
|
909
|
-
}) // Default is handled by middleware if NEUROLINK_DEBUG is set
|
|
910
|
-
.example("$0 provider status", "Check all providers")
|
|
911
|
-
.example("$0 provider status --verbose", "Show detailed status information"), async (argv) => {
|
|
912
|
-
if (argv.verbose && !argv.quiet) {
|
|
913
|
-
console.log(chalk.yellow("ℹ️ Verbose mode enabled. Displaying detailed status.\n")); // Added newline
|
|
914
|
-
}
|
|
915
|
-
const spinner = argv.quiet
|
|
916
|
-
? null
|
|
917
|
-
: ora("🔍 Checking AI provider status...\n").start();
|
|
918
|
-
const providers = [
|
|
919
|
-
"openai",
|
|
920
|
-
"bedrock",
|
|
921
|
-
"vertex",
|
|
922
|
-
"google-vertex",
|
|
923
|
-
"anthropic",
|
|
924
|
-
"azure",
|
|
925
|
-
"google-ai",
|
|
926
|
-
"huggingface",
|
|
927
|
-
"ollama",
|
|
928
|
-
"mistral",
|
|
929
|
-
];
|
|
930
|
-
// Import hasProviderEnvVars to check environment variables
|
|
931
|
-
const { hasProviderEnvVars } = await import("../lib/utils/providerUtils.js");
|
|
932
|
-
const results = [];
|
|
933
|
-
for (const p of providers) {
|
|
934
|
-
if (spinner) {
|
|
935
|
-
spinner.text = `Testing ${p}...`;
|
|
936
|
-
}
|
|
937
|
-
// First check if provider has env vars configured
|
|
938
|
-
const hasEnvVars = hasProviderEnvVars(p);
|
|
939
|
-
if (!hasEnvVars && p !== "ollama") {
|
|
940
|
-
// No env vars, don't even try to test
|
|
941
|
-
results.push({
|
|
942
|
-
provider: p,
|
|
943
|
-
status: "not-configured",
|
|
944
|
-
configured: false,
|
|
945
|
-
error: "Missing required environment variables",
|
|
946
|
-
});
|
|
947
|
-
if (spinner) {
|
|
948
|
-
spinner.fail(`${p}: ${chalk.gray("⚪ Not configured")} - Missing environment variables`);
|
|
949
|
-
}
|
|
950
|
-
else if (!argv.quiet) {
|
|
951
|
-
console.log(`${p}: ${chalk.gray("⚪ Not configured")} - Missing environment variables`);
|
|
952
|
-
}
|
|
953
|
-
continue;
|
|
954
|
-
}
|
|
955
|
-
// Special handling for Ollama
|
|
956
|
-
if (p === "ollama") {
|
|
957
|
-
try {
|
|
958
|
-
// First, check if the service is running
|
|
959
|
-
const serviceResponse = await fetch("http://localhost:11434/api/tags", {
|
|
960
|
-
method: "GET",
|
|
961
|
-
signal: AbortSignal.timeout(2000),
|
|
962
|
-
});
|
|
963
|
-
if (!serviceResponse.ok) {
|
|
964
|
-
throw new Error("Ollama service not responding");
|
|
965
|
-
}
|
|
966
|
-
// Service is running, now check if the default model is available
|
|
967
|
-
const { models } = await serviceResponse.json();
|
|
968
|
-
const defaultOllamaModel = "llama3.2:latest";
|
|
969
|
-
const modelIsAvailable = models.some((m) => m.name === defaultOllamaModel);
|
|
970
|
-
if (modelIsAvailable) {
|
|
971
|
-
results.push({
|
|
972
|
-
provider: p,
|
|
973
|
-
status: "working",
|
|
974
|
-
configured: true,
|
|
975
|
-
authenticated: true,
|
|
976
|
-
responseTime: 0,
|
|
977
|
-
});
|
|
978
|
-
if (spinner) {
|
|
979
|
-
spinner.succeed(`${p}: ${chalk.green("✅ Working")} - Service running and model '${defaultOllamaModel}' is available.`);
|
|
980
|
-
}
|
|
981
|
-
}
|
|
982
|
-
else {
|
|
983
|
-
results.push({
|
|
984
|
-
provider: p,
|
|
985
|
-
status: "failed",
|
|
986
|
-
configured: true,
|
|
987
|
-
authenticated: false,
|
|
988
|
-
error: `Ollama service is running, but model '${defaultOllamaModel}' is not found. Please run 'ollama pull ${defaultOllamaModel}'.`,
|
|
989
|
-
});
|
|
990
|
-
if (spinner) {
|
|
991
|
-
spinner.fail(`${p}: ${chalk.red("❌ Model Not Found")} - Run 'ollama pull ${defaultOllamaModel}'`);
|
|
992
|
-
}
|
|
993
|
-
}
|
|
994
|
-
}
|
|
995
|
-
catch (error) {
|
|
996
|
-
results.push({
|
|
997
|
-
provider: p,
|
|
998
|
-
status: "failed",
|
|
999
|
-
configured: false,
|
|
1000
|
-
authenticated: false,
|
|
1001
|
-
error: "Ollama is not running. Please start with: ollama serve",
|
|
1002
|
-
});
|
|
1003
|
-
if (spinner) {
|
|
1004
|
-
spinner.fail(`${p}: ${chalk.red("❌ Failed")} - Service not running`);
|
|
1005
|
-
}
|
|
1006
|
-
}
|
|
1007
|
-
continue;
|
|
1008
|
-
}
|
|
1009
|
-
// Provider has env vars, now test authentication
|
|
1010
|
-
try {
|
|
1011
|
-
const start = Date.now();
|
|
1012
|
-
// Add timeout to prevent hanging
|
|
1013
|
-
const testPromise = sdk.generate({
|
|
1014
|
-
input: { text: "test" },
|
|
1015
|
-
provider: p,
|
|
1016
|
-
maxTokens: 1,
|
|
1017
|
-
disableTools: true, // Disable tools for faster status check
|
|
1018
|
-
});
|
|
1019
|
-
const timeoutPromise = new Promise((_, reject) => {
|
|
1020
|
-
setTimeout(() => reject(new Error("Provider test timeout (5s)")), 5000);
|
|
1021
|
-
});
|
|
1022
|
-
await Promise.race([testPromise, timeoutPromise]);
|
|
1023
|
-
const duration = Date.now() - start;
|
|
1024
|
-
results.push({
|
|
1025
|
-
provider: p,
|
|
1026
|
-
status: "working",
|
|
1027
|
-
configured: true,
|
|
1028
|
-
authenticated: true,
|
|
1029
|
-
responseTime: duration,
|
|
1030
|
-
});
|
|
1031
|
-
if (spinner) {
|
|
1032
|
-
spinner.succeed(`${p}: ${chalk.green("✅ Working")} (${duration}ms)`);
|
|
1033
|
-
}
|
|
1034
|
-
else if (!argv.quiet) {
|
|
1035
|
-
console.log(`${p}: ${chalk.green("✅ Working")} (${duration}ms)`);
|
|
1036
|
-
}
|
|
1037
|
-
}
|
|
1038
|
-
catch (error) {
|
|
1039
|
-
const errorMsg = error.message.includes("timeout")
|
|
1040
|
-
? "Connection timeout"
|
|
1041
|
-
: error.message.split("\n")[0];
|
|
1042
|
-
results.push({
|
|
1043
|
-
provider: p,
|
|
1044
|
-
status: "failed",
|
|
1045
|
-
configured: true,
|
|
1046
|
-
authenticated: false,
|
|
1047
|
-
error: errorMsg,
|
|
1048
|
-
});
|
|
1049
|
-
if (spinner) {
|
|
1050
|
-
spinner.fail(`${p}: ${chalk.red("❌ Failed")} - ${errorMsg}`);
|
|
1051
|
-
}
|
|
1052
|
-
else if (!argv.quiet) {
|
|
1053
|
-
console.error(`${p}: ${chalk.red("❌ Failed")} - ${errorMsg}`);
|
|
1054
|
-
}
|
|
1055
|
-
}
|
|
1056
|
-
}
|
|
1057
|
-
const working = results.filter((r) => r.status === "working").length;
|
|
1058
|
-
const configured = results.filter((r) => r.configured).length;
|
|
1059
|
-
if (spinner) {
|
|
1060
|
-
spinner.info(chalk.blue(`\n📊 Summary: ${working}/${results.length} providers working, ${configured}/${results.length} configured`));
|
|
1061
|
-
}
|
|
1062
|
-
else if (!argv.quiet) {
|
|
1063
|
-
console.log(chalk.blue(`\n📊 Summary: ${working}/${results.length} providers working, ${configured}/${results.length} configured`));
|
|
1064
|
-
}
|
|
1065
|
-
if (argv.verbose && !argv.quiet) {
|
|
1066
|
-
console.log(chalk.blue("\n📋 Detailed Results:"));
|
|
1067
|
-
console.log(JSON.stringify(results, null, 2));
|
|
1068
|
-
}
|
|
1069
|
-
})
|
|
1070
|
-
.demandCommand(1, "")
|
|
1071
|
-
.example("$0 provider status", "Check all providers");
|
|
1072
|
-
})
|
|
1073
|
-
// Status command alias
|
|
1074
|
-
.command("status", "Check AI provider connectivity and performance (alias for provider status)", (yargsConfig) => yargsConfig
|
|
1075
|
-
.usage("Usage: $0 status [options]")
|
|
1076
|
-
.option("verbose", {
|
|
1077
|
-
type: "boolean",
|
|
1078
|
-
alias: "v",
|
|
1079
|
-
description: "Show detailed information",
|
|
1080
|
-
})
|
|
1081
|
-
.option("quiet", {
|
|
1082
|
-
type: "boolean",
|
|
1083
|
-
alias: "q",
|
|
1084
|
-
description: "Suppress non-essential output",
|
|
1085
|
-
})
|
|
1086
|
-
.example("$0 status", "Quick provider status check")
|
|
1087
|
-
.example("$0 status --verbose", "Show detailed status information"), async (argv) => {
|
|
1088
|
-
// Direct implementation instead of redirect to avoid recursion
|
|
1089
|
-
const { CLICommandFactory } = await import("./factories/command-factory.js");
|
|
1090
|
-
const commandFactory = new CLICommandFactory();
|
|
1091
|
-
await commandFactory.executeProviderStatus(argv);
|
|
1092
|
-
})
|
|
1093
|
-
// Configuration Command Group
|
|
1094
|
-
.command("config <subcommand>", "Manage NeuroLink configuration", (yargsConfig) => {
|
|
1095
|
-
yargsConfig
|
|
1096
|
-
.usage("Usage: $0 config <subcommand> [options]")
|
|
1097
|
-
.command("export", "Export current configuration", (y) => y
|
|
1098
|
-
.usage("Usage: $0 config export [options]")
|
|
1099
|
-
.option("output", {
|
|
1100
|
-
type: "string",
|
|
1101
|
-
alias: "o",
|
|
1102
|
-
description: "Output file for configuration",
|
|
1103
|
-
})
|
|
1104
|
-
.example("$0 config export", "Export to stdout")
|
|
1105
|
-
.example("$0 config export -o config.json", "Export to file"), async (argv) => {
|
|
1106
|
-
try {
|
|
1107
|
-
const config = {
|
|
1108
|
-
providers: {
|
|
1109
|
-
openai: !!process.env.OPENAI_API_KEY,
|
|
1110
|
-
bedrock: !!(process.env.AWS_ACCESS_KEY_ID &&
|
|
1111
|
-
process.env.AWS_SECRET_ACCESS_KEY),
|
|
1112
|
-
vertex: !!(process.env.GOOGLE_APPLICATION_CREDENTIALS ||
|
|
1113
|
-
process.env.GOOGLE_SERVICE_ACCOUNT_KEY ||
|
|
1114
|
-
(process.env.GOOGLE_AUTH_CLIENT_EMAIL &&
|
|
1115
|
-
process.env.GOOGLE_AUTH_PRIVATE_KEY)),
|
|
1116
|
-
anthropic: !!process.env.ANTHROPIC_API_KEY,
|
|
1117
|
-
azure: !!(process.env.AZURE_OPENAI_API_KEY &&
|
|
1118
|
-
process.env.AZURE_OPENAI_ENDPOINT),
|
|
1119
|
-
"google-ai": !!process.env.GOOGLE_AI_API_KEY,
|
|
1120
|
-
},
|
|
1121
|
-
defaults: {
|
|
1122
|
-
temperature: 0.7,
|
|
1123
|
-
maxTokens: 500,
|
|
1124
|
-
},
|
|
1125
|
-
timestamp: new Date().toISOString(),
|
|
1126
|
-
};
|
|
1127
|
-
const output = JSON.stringify(config, null, 2);
|
|
1128
|
-
if (argv.output) {
|
|
1129
|
-
fs.writeFileSync(argv.output, output);
|
|
1130
|
-
if (!argv.quiet) {
|
|
1131
|
-
console.log(chalk.green(`✅ Configuration exported to ${argv.output}`));
|
|
1132
|
-
}
|
|
1133
|
-
}
|
|
1134
|
-
else {
|
|
1135
|
-
process.stdout.write(output + "\n");
|
|
1136
|
-
}
|
|
1137
|
-
}
|
|
1138
|
-
catch (error) {
|
|
1139
|
-
handleError(error, "Configuration export");
|
|
1140
|
-
}
|
|
1141
|
-
})
|
|
1142
|
-
.demandCommand(1, "")
|
|
1143
|
-
.example("$0 config export", "Export configuration");
|
|
1144
|
-
})
|
|
1145
|
-
// Get Best Provider Command
|
|
1146
|
-
.command("get-best-provider", "Show the best available AI provider", (yargsInstance) => yargsInstance
|
|
1147
|
-
.usage("Usage: $0 get-best-provider [options]")
|
|
1148
|
-
.option("format", {
|
|
1149
|
-
choices: ["text", "json"],
|
|
1150
|
-
default: "text",
|
|
1151
|
-
description: "Output format",
|
|
1152
|
-
})
|
|
1153
|
-
.example("$0 get-best-provider", "Show best provider")
|
|
1154
|
-
.example("$0 get-best-provider --format json", "Show in JSON format"), async (argv) => {
|
|
1155
|
-
try {
|
|
1156
|
-
const { getBestProvider } = await import("../lib/utils/providerUtils.js");
|
|
1157
|
-
const bestProvider = await getBestProvider();
|
|
1158
|
-
if (argv.format === "json") {
|
|
1159
|
-
process.stdout.write(JSON.stringify({ provider: bestProvider }, null, 2) + "\n");
|
|
1160
|
-
}
|
|
1161
|
-
else {
|
|
1162
|
-
if (!argv.quiet) {
|
|
1163
|
-
console.log(chalk.green(`🎯 Best available provider: ${bestProvider}`));
|
|
1164
|
-
}
|
|
1165
|
-
else {
|
|
1166
|
-
process.stdout.write(bestProvider + "\n");
|
|
1167
|
-
}
|
|
1168
|
-
}
|
|
1169
|
-
}
|
|
1170
|
-
catch (error) {
|
|
1171
|
-
handleError(error, "Provider selection");
|
|
1172
|
-
}
|
|
1173
|
-
})
|
|
1174
|
-
// Completion Command
|
|
1175
|
-
.command("completion", "Generate shell completion script", (yargsInstance) => yargsInstance
|
|
1176
|
-
.usage("Usage: $0 completion")
|
|
1177
|
-
.example("$0 completion >> ~/.bashrc", "Add to bash")
|
|
1178
|
-
.example("$0 completion >> ~/.zshrc", "Add to zsh"), async (argv) => {
|
|
1179
|
-
cli.showCompletionScript();
|
|
1180
|
-
});
|
|
1181
|
-
// Add NEW Generate Command (Primary)
|
|
1182
|
-
// Removed CLICommandFactory call - commands are handled directly above.createGenerateCommand());
|
|
1183
|
-
// MCP Commands: Integrated within base provider functionality
|
|
198
|
+
// Generate Command (Primary) - Using CLICommandFactory
|
|
199
|
+
.command(CLICommandFactory.createGenerateCommand())
|
|
200
|
+
// Stream Text Command - Using CLICommandFactory
|
|
201
|
+
.command(CLICommandFactory.createStreamCommand())
|
|
202
|
+
// Batch Processing Command - Using CLICommandFactory
|
|
203
|
+
.command(CLICommandFactory.createBatchCommand())
|
|
204
|
+
// Provider Command Group - Using CLICommandFactory
|
|
205
|
+
.command(CLICommandFactory.createProviderCommands())
|
|
206
|
+
// Status command alias - Using CLICommandFactory
|
|
207
|
+
.command(CLICommandFactory.createStatusCommand())
|
|
208
|
+
// Configuration Command Group - Using CLICommandFactory
|
|
209
|
+
.command(CLICommandFactory.createConfigCommands())
|
|
210
|
+
// Get Best Provider Command - Using CLICommandFactory
|
|
211
|
+
.command(CLICommandFactory.createBestProviderCommand())
|
|
212
|
+
// Completion Command - Using CLICommandFactory
|
|
213
|
+
.command(CLICommandFactory.createCompletionCommand());
|
|
1184
214
|
// Add Ollama Commands
|
|
1185
215
|
addOllamaCommands(cli);
|
|
1186
216
|
// Execute CLI
|
|
1187
217
|
(async () => {
|
|
1188
218
|
try {
|
|
1189
|
-
// Initialize CLI with manual MCP support
|
|
1190
|
-
await initializeCLI();
|
|
1191
219
|
// Parse and execute commands
|
|
1192
220
|
await cli.parse();
|
|
1193
221
|
}
|