@juspay/neurolink 7.0.0 → 7.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -4
- package/README.md +16 -11
- package/dist/cli/commands/config.d.ts +2 -2
- package/dist/cli/commands/config.js +22 -21
- package/dist/cli/commands/mcp.d.ts +79 -0
- package/dist/cli/commands/mcp.js +916 -0
- package/dist/cli/commands/models.d.ts +63 -0
- package/dist/cli/commands/models.js +653 -0
- package/dist/cli/commands/ollama.js +56 -55
- package/dist/cli/factories/commandFactory.d.ts +67 -2
- package/dist/cli/factories/commandFactory.js +840 -92
- package/dist/cli/index.d.ts +6 -0
- package/dist/cli/index.js +42 -999
- package/dist/cli/utils/completeSetup.js +9 -8
- package/dist/cli/utils/envManager.js +7 -6
- package/dist/cli/utils/interactiveSetup.js +20 -19
- package/dist/core/analytics.js +25 -38
- package/dist/core/baseProvider.d.ts +8 -0
- package/dist/core/baseProvider.js +177 -68
- package/dist/core/constants.d.ts +11 -0
- package/dist/core/constants.js +17 -0
- package/dist/core/evaluation.js +25 -14
- package/dist/core/factory.js +21 -18
- package/dist/core/streamAnalytics.d.ts +65 -0
- package/dist/core/streamAnalytics.js +125 -0
- package/dist/factories/providerRegistry.js +3 -1
- package/dist/lib/core/analytics.js +25 -38
- package/dist/lib/core/baseProvider.d.ts +8 -0
- package/dist/lib/core/baseProvider.js +177 -68
- package/dist/lib/core/constants.d.ts +11 -0
- package/dist/lib/core/constants.js +17 -0
- package/dist/lib/core/evaluation.js +25 -14
- package/dist/lib/core/factory.js +22 -18
- package/dist/lib/core/streamAnalytics.d.ts +65 -0
- package/dist/lib/core/streamAnalytics.js +125 -0
- package/dist/lib/factories/providerRegistry.js +3 -1
- package/dist/lib/mcp/toolRegistry.d.ts +5 -0
- package/dist/lib/mcp/toolRegistry.js +60 -0
- package/dist/lib/models/modelRegistry.d.ts +132 -0
- package/dist/lib/models/modelRegistry.js +483 -0
- package/dist/lib/models/modelResolver.d.ts +115 -0
- package/dist/lib/models/modelResolver.js +467 -0
- package/dist/lib/neurolink.d.ts +4 -1
- package/dist/lib/neurolink.js +108 -69
- package/dist/lib/providers/anthropic.js +3 -0
- package/dist/lib/providers/googleAiStudio.js +13 -0
- package/dist/lib/providers/huggingFace.js +15 -3
- package/dist/lib/providers/mistral.js +19 -7
- package/dist/lib/providers/ollama.js +31 -7
- package/dist/lib/providers/openAI.js +12 -0
- package/dist/lib/sdk/toolRegistration.js +17 -0
- package/dist/lib/types/cli.d.ts +56 -1
- package/dist/lib/types/contextTypes.d.ts +110 -0
- package/dist/lib/types/contextTypes.js +176 -0
- package/dist/lib/types/index.d.ts +4 -1
- package/dist/lib/types/mcpTypes.d.ts +118 -7
- package/dist/lib/types/providers.d.ts +81 -0
- package/dist/lib/types/streamTypes.d.ts +44 -7
- package/dist/lib/types/tools.d.ts +9 -0
- package/dist/lib/types/universalProviderOptions.d.ts +3 -1
- package/dist/lib/types/universalProviderOptions.js +2 -1
- package/dist/lib/utils/logger.d.ts +7 -0
- package/dist/lib/utils/logger.js +16 -6
- package/dist/lib/utils/performance.d.ts +105 -0
- package/dist/lib/utils/performance.js +210 -0
- package/dist/lib/utils/providerUtils.js +9 -2
- package/dist/lib/utils/retryHandler.d.ts +89 -0
- package/dist/lib/utils/retryHandler.js +269 -0
- package/dist/mcp/toolRegistry.d.ts +5 -0
- package/dist/mcp/toolRegistry.js +60 -0
- package/dist/models/modelRegistry.d.ts +132 -0
- package/dist/models/modelRegistry.js +483 -0
- package/dist/models/modelResolver.d.ts +115 -0
- package/dist/models/modelResolver.js +468 -0
- package/dist/neurolink.d.ts +4 -1
- package/dist/neurolink.js +108 -69
- package/dist/providers/anthropic.js +3 -0
- package/dist/providers/googleAiStudio.js +13 -0
- package/dist/providers/huggingFace.js +15 -3
- package/dist/providers/mistral.js +19 -7
- package/dist/providers/ollama.js +31 -7
- package/dist/providers/openAI.js +12 -0
- package/dist/sdk/toolRegistration.js +17 -0
- package/dist/types/cli.d.ts +56 -1
- package/dist/types/contextTypes.d.ts +110 -0
- package/dist/types/contextTypes.js +177 -0
- package/dist/types/index.d.ts +4 -1
- package/dist/types/mcpTypes.d.ts +118 -7
- package/dist/types/providers.d.ts +81 -0
- package/dist/types/streamTypes.d.ts +44 -7
- package/dist/types/tools.d.ts +9 -0
- package/dist/types/universalProviderOptions.d.ts +3 -1
- package/dist/types/universalProviderOptions.js +3 -1
- package/dist/utils/logger.d.ts +7 -0
- package/dist/utils/logger.js +16 -6
- package/dist/utils/performance.d.ts +105 -0
- package/dist/utils/performance.js +210 -0
- package/dist/utils/providerUtils.js +9 -2
- package/dist/utils/retryHandler.d.ts +89 -0
- package/dist/utils/retryHandler.js +269 -0
- package/package.json +2 -1
|
@@ -1,97 +1,492 @@
|
|
|
1
1
|
import { NeuroLink } from "../../lib/neurolink.js";
|
|
2
|
+
import { ContextFactory, } from "../../lib/types/contextTypes.js";
|
|
3
|
+
import { ModelsCommandFactory } from "../commands/models.js";
|
|
4
|
+
import { MCPCommandFactory } from "../commands/mcp.js";
|
|
2
5
|
import ora from "ora";
|
|
3
6
|
import chalk from "chalk";
|
|
4
7
|
import { logger } from "../../lib/utils/logger.js";
|
|
8
|
+
import fs from "fs";
|
|
5
9
|
/**
|
|
6
10
|
* CLI Command Factory for generate commands
|
|
7
11
|
*/
|
|
8
12
|
export class CLICommandFactory {
|
|
13
|
+
// Common options available on all commands
|
|
14
|
+
static commonOptions = {
|
|
15
|
+
// Core generation options
|
|
16
|
+
provider: {
|
|
17
|
+
choices: [
|
|
18
|
+
"auto",
|
|
19
|
+
"openai",
|
|
20
|
+
"bedrock",
|
|
21
|
+
"vertex",
|
|
22
|
+
"googleVertex",
|
|
23
|
+
"anthropic",
|
|
24
|
+
"azure",
|
|
25
|
+
"google-ai",
|
|
26
|
+
"huggingface",
|
|
27
|
+
"ollama",
|
|
28
|
+
"mistral",
|
|
29
|
+
],
|
|
30
|
+
default: "auto",
|
|
31
|
+
description: "AI provider to use (auto-selects best available)",
|
|
32
|
+
alias: "p",
|
|
33
|
+
},
|
|
34
|
+
model: {
|
|
35
|
+
type: "string",
|
|
36
|
+
description: "Specific model to use (e.g. gemini-2.5-pro, gemini-2.5-flash)",
|
|
37
|
+
alias: "m",
|
|
38
|
+
},
|
|
39
|
+
temperature: {
|
|
40
|
+
type: "number",
|
|
41
|
+
default: 0.7,
|
|
42
|
+
description: "Creativity level (0.0 = focused, 1.0 = creative)",
|
|
43
|
+
alias: "t",
|
|
44
|
+
},
|
|
45
|
+
maxTokens: {
|
|
46
|
+
type: "number",
|
|
47
|
+
default: 1000,
|
|
48
|
+
description: "Maximum tokens to generate",
|
|
49
|
+
alias: "max",
|
|
50
|
+
},
|
|
51
|
+
system: {
|
|
52
|
+
type: "string",
|
|
53
|
+
description: "System prompt to guide AI behavior",
|
|
54
|
+
alias: "s",
|
|
55
|
+
},
|
|
56
|
+
// Output control options
|
|
57
|
+
format: {
|
|
58
|
+
choices: ["text", "json", "table"],
|
|
59
|
+
default: "text",
|
|
60
|
+
alias: ["f", "output-format"],
|
|
61
|
+
description: "Output format",
|
|
62
|
+
},
|
|
63
|
+
output: {
|
|
64
|
+
type: "string",
|
|
65
|
+
description: "Save output to file",
|
|
66
|
+
alias: "o",
|
|
67
|
+
},
|
|
68
|
+
// Behavior control options
|
|
69
|
+
timeout: {
|
|
70
|
+
type: "number",
|
|
71
|
+
default: 120,
|
|
72
|
+
description: "Maximum execution time in seconds",
|
|
73
|
+
},
|
|
74
|
+
delay: {
|
|
75
|
+
type: "number",
|
|
76
|
+
description: "Delay between operations (ms)",
|
|
77
|
+
},
|
|
78
|
+
// Tools & features options
|
|
79
|
+
disableTools: {
|
|
80
|
+
type: "boolean",
|
|
81
|
+
default: false,
|
|
82
|
+
description: "Disable MCP tool integration (tools enabled by default)",
|
|
83
|
+
},
|
|
84
|
+
enableAnalytics: {
|
|
85
|
+
type: "boolean",
|
|
86
|
+
default: false,
|
|
87
|
+
description: "Enable usage analytics collection",
|
|
88
|
+
},
|
|
89
|
+
enableEvaluation: {
|
|
90
|
+
type: "boolean",
|
|
91
|
+
default: false,
|
|
92
|
+
description: "Enable AI response quality evaluation",
|
|
93
|
+
},
|
|
94
|
+
evaluationDomain: {
|
|
95
|
+
type: "string",
|
|
96
|
+
description: "Domain expertise for evaluation (e.g., 'AI coding assistant', 'Customer service expert')",
|
|
97
|
+
},
|
|
98
|
+
toolUsageContext: {
|
|
99
|
+
type: "string",
|
|
100
|
+
description: "Tool usage context for evaluation (e.g., 'Used sales-data MCP tools')",
|
|
101
|
+
},
|
|
102
|
+
lighthouseStyle: {
|
|
103
|
+
type: "boolean",
|
|
104
|
+
default: false,
|
|
105
|
+
description: "Use Lighthouse-compatible domain-aware evaluation",
|
|
106
|
+
},
|
|
107
|
+
context: {
|
|
108
|
+
type: "string",
|
|
109
|
+
description: "JSON context object for custom data",
|
|
110
|
+
},
|
|
111
|
+
// Debug & output options
|
|
112
|
+
debug: {
|
|
113
|
+
type: "boolean",
|
|
114
|
+
alias: ["v", "verbose"],
|
|
115
|
+
default: false,
|
|
116
|
+
description: "Enable debug mode with verbose output",
|
|
117
|
+
},
|
|
118
|
+
quiet: {
|
|
119
|
+
type: "boolean",
|
|
120
|
+
alias: "q",
|
|
121
|
+
default: false,
|
|
122
|
+
description: "Suppress non-essential output",
|
|
123
|
+
},
|
|
124
|
+
noColor: {
|
|
125
|
+
type: "boolean",
|
|
126
|
+
default: false,
|
|
127
|
+
description: "Disable colored output (useful for CI/scripts)",
|
|
128
|
+
},
|
|
129
|
+
configFile: {
|
|
130
|
+
type: "string",
|
|
131
|
+
description: "Path to custom configuration file",
|
|
132
|
+
},
|
|
133
|
+
};
|
|
134
|
+
// Helper method to build options for commands
|
|
135
|
+
static buildOptions(yargs, additionalOptions = {}) {
|
|
136
|
+
return yargs.options({
|
|
137
|
+
...this.commonOptions,
|
|
138
|
+
...additionalOptions,
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
// Helper method to process common options
|
|
142
|
+
static processOptions(argv) {
|
|
143
|
+
// Handle noColor option by disabling chalk
|
|
144
|
+
if (argv.noColor) {
|
|
145
|
+
process.env.FORCE_COLOR = "0";
|
|
146
|
+
}
|
|
147
|
+
// Process context using ContextFactory for type-safe integration
|
|
148
|
+
let processedContext;
|
|
149
|
+
let contextConfig;
|
|
150
|
+
if (argv.context) {
|
|
151
|
+
let rawContext;
|
|
152
|
+
if (typeof argv.context === "string") {
|
|
153
|
+
try {
|
|
154
|
+
rawContext = JSON.parse(argv.context);
|
|
155
|
+
}
|
|
156
|
+
catch (err) {
|
|
157
|
+
const contextStr = argv.context;
|
|
158
|
+
const truncatedJson = contextStr.length > 100
|
|
159
|
+
? `${contextStr.slice(0, 100)}...`
|
|
160
|
+
: contextStr;
|
|
161
|
+
logger.error(`Invalid JSON in --context parameter: ${err.message}. Received: ${truncatedJson}`);
|
|
162
|
+
process.exit(1);
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
else {
|
|
166
|
+
rawContext = argv.context;
|
|
167
|
+
}
|
|
168
|
+
const validatedContext = ContextFactory.validateContext(rawContext);
|
|
169
|
+
if (validatedContext) {
|
|
170
|
+
processedContext = validatedContext;
|
|
171
|
+
// Configure context integration based on CLI usage
|
|
172
|
+
contextConfig = {
|
|
173
|
+
mode: "prompt_prefix", // Add context as prompt prefix for CLI usage
|
|
174
|
+
includeInPrompt: true,
|
|
175
|
+
includeInAnalytics: true,
|
|
176
|
+
includeInEvaluation: true,
|
|
177
|
+
maxLength: 500, // Reasonable limit for CLI context
|
|
178
|
+
};
|
|
179
|
+
}
|
|
180
|
+
else if (argv.debug) {
|
|
181
|
+
logger.debug("Invalid context provided, skipping context integration");
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
return {
|
|
185
|
+
provider: argv.provider === "auto" ? undefined : argv.provider,
|
|
186
|
+
model: argv.model,
|
|
187
|
+
temperature: argv.temperature,
|
|
188
|
+
maxTokens: argv.maxTokens,
|
|
189
|
+
systemPrompt: argv.system,
|
|
190
|
+
timeout: argv.timeout,
|
|
191
|
+
disableTools: argv.disableTools,
|
|
192
|
+
enableAnalytics: argv.enableAnalytics,
|
|
193
|
+
enableEvaluation: argv.enableEvaluation,
|
|
194
|
+
evaluationDomain: argv.evaluationDomain,
|
|
195
|
+
toolUsageContext: argv.toolUsageContext,
|
|
196
|
+
lighthouseStyle: argv.lighthouseStyle,
|
|
197
|
+
context: processedContext,
|
|
198
|
+
contextConfig,
|
|
199
|
+
debug: argv.debug,
|
|
200
|
+
quiet: argv.quiet,
|
|
201
|
+
format: argv.format,
|
|
202
|
+
output: argv.output,
|
|
203
|
+
delay: argv.delay,
|
|
204
|
+
noColor: argv.noColor,
|
|
205
|
+
configFile: argv.configFile,
|
|
206
|
+
};
|
|
207
|
+
}
|
|
208
|
+
// Helper method to handle output
|
|
209
|
+
static handleOutput(result, options) {
|
|
210
|
+
let output;
|
|
211
|
+
if (options.format === "json") {
|
|
212
|
+
output = JSON.stringify(result, null, 2);
|
|
213
|
+
}
|
|
214
|
+
else if (options.format === "table" && Array.isArray(result)) {
|
|
215
|
+
logger.table(result);
|
|
216
|
+
return;
|
|
217
|
+
}
|
|
218
|
+
else {
|
|
219
|
+
if (typeof result === "string") {
|
|
220
|
+
output = result;
|
|
221
|
+
}
|
|
222
|
+
else if (result && typeof result === "object" && "content" in result) {
|
|
223
|
+
const generateResult = result;
|
|
224
|
+
output = generateResult.content;
|
|
225
|
+
// Add analytics display for text mode when enabled
|
|
226
|
+
if (options.enableAnalytics && generateResult.analytics) {
|
|
227
|
+
output += this.formatAnalyticsForTextMode(generateResult);
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
else if (result && typeof result === "object" && "text" in result) {
|
|
231
|
+
output = result.text;
|
|
232
|
+
}
|
|
233
|
+
else {
|
|
234
|
+
output = JSON.stringify(result);
|
|
235
|
+
}
|
|
236
|
+
}
|
|
237
|
+
if (options.output) {
|
|
238
|
+
fs.writeFileSync(options.output, output);
|
|
239
|
+
if (!options.quiet) {
|
|
240
|
+
logger.always(`Output saved to ${options.output}`);
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
else {
|
|
244
|
+
logger.always(output);
|
|
245
|
+
}
|
|
246
|
+
}
|
|
247
|
+
// Helper method to validate token usage data
|
|
248
|
+
static isValidTokenUsage(tokens) {
|
|
249
|
+
return !!(tokens &&
|
|
250
|
+
typeof tokens === "object" &&
|
|
251
|
+
tokens !== null &&
|
|
252
|
+
typeof tokens.input === "number" &&
|
|
253
|
+
typeof tokens.output === "number" &&
|
|
254
|
+
typeof tokens.total === "number");
|
|
255
|
+
}
|
|
256
|
+
// Helper method to format analytics for text mode display
|
|
257
|
+
static formatAnalyticsForTextMode(result) {
|
|
258
|
+
if (!result.analytics) {
|
|
259
|
+
return "";
|
|
260
|
+
}
|
|
261
|
+
const analytics = result.analytics;
|
|
262
|
+
let analyticsText = "\n\n📊 Analytics:\n";
|
|
263
|
+
// Provider and model info
|
|
264
|
+
analyticsText += ` Provider: ${analytics.provider}`;
|
|
265
|
+
if (result.model) {
|
|
266
|
+
analyticsText += ` (${result.model})`;
|
|
267
|
+
}
|
|
268
|
+
analyticsText += "\n";
|
|
269
|
+
// Token usage
|
|
270
|
+
if (this.isValidTokenUsage(analytics.tokens)) {
|
|
271
|
+
const tokens = analytics.tokens;
|
|
272
|
+
analyticsText += ` Tokens: ${tokens.input} input + ${tokens.output} output = ${tokens.total} total\n`;
|
|
273
|
+
}
|
|
274
|
+
// Cost information
|
|
275
|
+
if (analytics.cost !== undefined &&
|
|
276
|
+
analytics.cost !== null &&
|
|
277
|
+
typeof analytics.cost === "number") {
|
|
278
|
+
analyticsText += ` Cost: $${analytics.cost.toFixed(5)}\n`;
|
|
279
|
+
}
|
|
280
|
+
// Response time
|
|
281
|
+
if (analytics.responseTime && typeof analytics.responseTime === "number") {
|
|
282
|
+
const timeInSeconds = (analytics.responseTime / 1000).toFixed(1);
|
|
283
|
+
analyticsText += ` Time: ${timeInSeconds}s\n`;
|
|
284
|
+
}
|
|
285
|
+
// Tools used
|
|
286
|
+
if (result.toolsUsed && result.toolsUsed.length > 0) {
|
|
287
|
+
analyticsText += ` Tools: ${result.toolsUsed.join(", ")}\n`;
|
|
288
|
+
}
|
|
289
|
+
// Context information
|
|
290
|
+
if (analytics.context &&
|
|
291
|
+
typeof analytics.context === "object" &&
|
|
292
|
+
analytics.context !== null) {
|
|
293
|
+
const contextEntries = Object.entries(analytics.context);
|
|
294
|
+
if (contextEntries.length > 0) {
|
|
295
|
+
const contextItems = contextEntries.map(([key, value]) => `${key}=${value}`);
|
|
296
|
+
analyticsText += ` Context: ${contextItems.join(", ")}\n`;
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
return analyticsText;
|
|
300
|
+
}
|
|
9
301
|
/**
|
|
10
302
|
* Create the new primary 'generate' command
|
|
11
303
|
*/
|
|
12
304
|
static createGenerateCommand() {
|
|
13
305
|
return {
|
|
14
|
-
command: "generate <input>",
|
|
15
|
-
describe: "Generate content using AI
|
|
306
|
+
command: ["generate <input>", "gen <input>"],
|
|
307
|
+
describe: "Generate content using AI providers",
|
|
16
308
|
builder: (yargs) => {
|
|
17
|
-
return yargs
|
|
309
|
+
return this.buildOptions(yargs
|
|
18
310
|
.positional("input", {
|
|
19
|
-
describe: "Text input for generation",
|
|
20
311
|
type: "string",
|
|
312
|
+
description: "Text prompt for AI generation (or read from stdin)",
|
|
21
313
|
})
|
|
22
|
-
.
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
})
|
|
42
|
-
.option("temperature", {
|
|
43
|
-
describe: "Temperature (0-1)",
|
|
44
|
-
type: "number",
|
|
45
|
-
})
|
|
46
|
-
.option("max-tokens", {
|
|
47
|
-
describe: "Maximum tokens",
|
|
48
|
-
type: "number",
|
|
49
|
-
})
|
|
50
|
-
.option("system-prompt", {
|
|
51
|
-
describe: "System prompt",
|
|
314
|
+
.example('$0 generate "Explain quantum computing"', "Basic generation")
|
|
315
|
+
.example('$0 gen "Write a Python function" --provider openai', "Use specific provider")
|
|
316
|
+
.example('$0 generate "Code review" -m gpt-4 -t 0.3', "Use specific model and temperature")
|
|
317
|
+
.example('echo "Summarize this" | $0 generate', "Use stdin input")
|
|
318
|
+
.example('$0 generate "Analyze data" --enable-analytics', "Enable usage analytics"));
|
|
319
|
+
},
|
|
320
|
+
handler: async (argv) => await this.executeGenerate(argv),
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
/**
|
|
324
|
+
* Create stream command
|
|
325
|
+
*/
|
|
326
|
+
static createStreamCommand() {
|
|
327
|
+
return {
|
|
328
|
+
command: "stream <input>",
|
|
329
|
+
describe: "Stream generation in real-time",
|
|
330
|
+
builder: (yargs) => {
|
|
331
|
+
return this.buildOptions(yargs
|
|
332
|
+
.positional("input", {
|
|
52
333
|
type: "string",
|
|
334
|
+
description: "Text prompt for streaming (or read from stdin)",
|
|
53
335
|
})
|
|
54
|
-
.
|
|
55
|
-
|
|
336
|
+
.example('$0 stream "Write a story about space"', "Stream a creative story")
|
|
337
|
+
.example('$0 stream "Explain machine learning" -p anthropic', "Stream with specific provider")
|
|
338
|
+
.example('$0 stream "Code walkthrough" --output story.txt', "Stream to file")
|
|
339
|
+
.example('echo "Live demo" | $0 stream', "Stream from stdin"));
|
|
340
|
+
},
|
|
341
|
+
handler: async (argv) => await this.executeStream(argv),
|
|
342
|
+
};
|
|
343
|
+
}
|
|
344
|
+
/**
|
|
345
|
+
* Create batch command
|
|
346
|
+
*/
|
|
347
|
+
static createBatchCommand() {
|
|
348
|
+
return {
|
|
349
|
+
command: "batch <file>",
|
|
350
|
+
describe: "Process multiple prompts from a file",
|
|
351
|
+
builder: (yargs) => {
|
|
352
|
+
return this.buildOptions(yargs
|
|
353
|
+
.positional("file", {
|
|
56
354
|
type: "string",
|
|
355
|
+
description: "File with prompts (one per line)",
|
|
356
|
+
demandOption: true,
|
|
57
357
|
})
|
|
58
|
-
.
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
358
|
+
.example("$0 batch prompts.txt", "Process prompts from file")
|
|
359
|
+
.example("$0 batch questions.txt --format json", "Export results as JSON")
|
|
360
|
+
.example("$0 batch tasks.txt -p vertex --delay 2000", "Use Vertex AI with 2s delay")
|
|
361
|
+
.example("$0 batch batch.txt --output results.json", "Save results to file"));
|
|
362
|
+
},
|
|
363
|
+
handler: async (argv) => await this.executeBatch(argv),
|
|
364
|
+
};
|
|
365
|
+
}
|
|
366
|
+
/**
|
|
367
|
+
* Create provider commands
|
|
368
|
+
*/
|
|
369
|
+
static createProviderCommands() {
|
|
370
|
+
return {
|
|
371
|
+
command: "provider <subcommand>",
|
|
372
|
+
describe: "Manage AI provider configurations and status",
|
|
373
|
+
builder: (yargs) => {
|
|
374
|
+
return yargs
|
|
375
|
+
.command("status", "Check status of all configured AI providers", (y) => this.buildOptions(y)
|
|
376
|
+
.example("$0 provider status", "Check all provider status")
|
|
377
|
+
.example("$0 provider status --verbose", "Detailed provider diagnostics")
|
|
378
|
+
.example("$0 provider status --quiet", "Minimal status output"), (argv) => CLICommandFactory.executeProviderStatus(argv))
|
|
379
|
+
.demandCommand(1, "Please specify a provider subcommand");
|
|
380
|
+
},
|
|
381
|
+
handler: () => { }, // No-op handler as subcommands handle everything
|
|
382
|
+
};
|
|
383
|
+
}
|
|
384
|
+
/**
|
|
385
|
+
* Create status command (alias for provider status)
|
|
386
|
+
*/
|
|
387
|
+
static createStatusCommand() {
|
|
388
|
+
return {
|
|
389
|
+
command: "status",
|
|
390
|
+
describe: "Check AI provider connectivity and performance (alias for provider status)",
|
|
391
|
+
builder: (yargs) => this.buildOptions(yargs)
|
|
392
|
+
.example("$0 status", "Quick provider status check")
|
|
393
|
+
.example("$0 status --verbose", "Detailed connectivity diagnostics")
|
|
394
|
+
.example("$0 status --format json", "Export status as JSON"),
|
|
395
|
+
handler: async (argv) => await CLICommandFactory.executeProviderStatus(argv),
|
|
396
|
+
};
|
|
397
|
+
}
|
|
398
|
+
/**
|
|
399
|
+
* Create models commands
|
|
400
|
+
*/
|
|
401
|
+
static createModelsCommands() {
|
|
402
|
+
return ModelsCommandFactory.createModelsCommands();
|
|
403
|
+
}
|
|
404
|
+
/**
|
|
405
|
+
* Create MCP commands
|
|
406
|
+
*/
|
|
407
|
+
static createMCPCommands() {
|
|
408
|
+
return MCPCommandFactory.createMCPCommands();
|
|
409
|
+
}
|
|
410
|
+
/**
|
|
411
|
+
* Create discover command
|
|
412
|
+
*/
|
|
413
|
+
static createDiscoverCommand() {
|
|
414
|
+
return MCPCommandFactory.createDiscoverCommand();
|
|
415
|
+
}
|
|
416
|
+
/**
|
|
417
|
+
* Create config commands
|
|
418
|
+
*/
|
|
419
|
+
static createConfigCommands() {
|
|
420
|
+
return {
|
|
421
|
+
command: "config <subcommand>",
|
|
422
|
+
describe: "Manage NeuroLink configuration",
|
|
423
|
+
builder: (yargs) => {
|
|
424
|
+
return yargs
|
|
425
|
+
.command("init", "Interactive configuration setup wizard", (y) => this.buildOptions(y), async (argv) => {
|
|
426
|
+
const { configManager } = await import("../commands/config.js");
|
|
427
|
+
await configManager.initInteractive();
|
|
62
428
|
})
|
|
63
|
-
.
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
default: false,
|
|
429
|
+
.command("show", "Display current configuration", (y) => this.buildOptions(y), async (argv) => {
|
|
430
|
+
const { configManager } = await import("../commands/config.js");
|
|
431
|
+
configManager.showConfig();
|
|
67
432
|
})
|
|
68
|
-
.
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
433
|
+
.command("validate", "Validate current configuration", (y) => this.buildOptions(y), async (argv) => {
|
|
434
|
+
const { configManager } = await import("../commands/config.js");
|
|
435
|
+
const result = configManager.validateConfig();
|
|
436
|
+
if (result.valid) {
|
|
437
|
+
logger.always(chalk.green("✅ Configuration is valid"));
|
|
438
|
+
}
|
|
439
|
+
else {
|
|
440
|
+
logger.always(chalk.red("❌ Configuration has errors:"));
|
|
441
|
+
result.errors.forEach((error) => logger.always(` • ${error}`));
|
|
442
|
+
process.exit(1);
|
|
443
|
+
}
|
|
72
444
|
})
|
|
73
|
-
.
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
choices: ["text", "structured", "json"],
|
|
77
|
-
default: "text",
|
|
78
|
-
alias: "format",
|
|
445
|
+
.command("reset", "Reset configuration to defaults", (y) => this.buildOptions(y), async (argv) => {
|
|
446
|
+
const { configManager } = await import("../commands/config.js");
|
|
447
|
+
configManager.resetConfig();
|
|
79
448
|
})
|
|
80
|
-
.
|
|
81
|
-
|
|
82
|
-
type: "boolean",
|
|
83
|
-
default: false,
|
|
84
|
-
});
|
|
449
|
+
.command("export", "Export current configuration", (y) => this.buildOptions(y), (argv) => this.executeConfigExport(argv))
|
|
450
|
+
.demandCommand(1, "");
|
|
85
451
|
},
|
|
86
|
-
handler:
|
|
452
|
+
handler: () => { }, // No-op handler as subcommands handle everything
|
|
453
|
+
};
|
|
454
|
+
}
|
|
455
|
+
/**
|
|
456
|
+
* Create get-best-provider command
|
|
457
|
+
*/
|
|
458
|
+
static createBestProviderCommand() {
|
|
459
|
+
return {
|
|
460
|
+
command: "get-best-provider",
|
|
461
|
+
describe: "Show the best available AI provider",
|
|
462
|
+
builder: (yargs) => this.buildOptions(yargs)
|
|
463
|
+
.example("$0 get-best-provider", "Get best available provider")
|
|
464
|
+
.example("$0 get-best-provider --format json", "Get provider as JSON")
|
|
465
|
+
.example("$0 get-best-provider --quiet", "Just the provider name"),
|
|
466
|
+
handler: async (argv) => await this.executeGetBestProvider(argv),
|
|
467
|
+
};
|
|
468
|
+
}
|
|
469
|
+
/**
|
|
470
|
+
* Create completion command
|
|
471
|
+
*/
|
|
472
|
+
static createCompletionCommand() {
|
|
473
|
+
return {
|
|
474
|
+
command: "completion",
|
|
475
|
+
describe: "Generate shell completion script",
|
|
476
|
+
builder: (yargs) => this.buildOptions(yargs)
|
|
477
|
+
.example("$0 completion", "Generate shell completion")
|
|
478
|
+
.example("$0 completion > ~/.neurolink-completion.sh", "Save completion script")
|
|
479
|
+
.example("source ~/.neurolink-completion.sh", "Enable completions (bash)")
|
|
480
|
+
.epilogue("Add the completion script to your shell profile for persistent completions"),
|
|
481
|
+
handler: async (argv) => await this.executeCompletion(argv),
|
|
87
482
|
};
|
|
88
483
|
}
|
|
89
484
|
/**
|
|
90
485
|
* Execute provider status command
|
|
91
486
|
*/
|
|
92
|
-
async executeProviderStatus(argv) {
|
|
487
|
+
static async executeProviderStatus(argv) {
|
|
93
488
|
if (argv.verbose && !argv.quiet) {
|
|
94
|
-
|
|
489
|
+
logger.always(chalk.yellow("ℹ️ Verbose mode enabled. Displaying detailed status.\n"));
|
|
95
490
|
}
|
|
96
491
|
const spinner = argv.quiet
|
|
97
492
|
? null
|
|
@@ -99,7 +494,7 @@ export class CLICommandFactory {
|
|
|
99
494
|
try {
|
|
100
495
|
// Use SDK's provider diagnostic method instead of manual testing
|
|
101
496
|
const sdk = new NeuroLink();
|
|
102
|
-
const results = await sdk.getProviderStatus();
|
|
497
|
+
const results = await sdk.getProviderStatus({ quiet: !!argv.quiet });
|
|
103
498
|
if (spinner) {
|
|
104
499
|
const working = results.filter((r) => r.status === "working").length;
|
|
105
500
|
const configured = results.filter((r) => r.configured).length;
|
|
@@ -114,21 +509,21 @@ export class CLICommandFactory {
|
|
|
114
509
|
: chalk.gray("⚪ Not configured");
|
|
115
510
|
const time = result.responseTime ? ` (${result.responseTime}ms)` : "";
|
|
116
511
|
const model = result.model ? ` [${result.model}]` : "";
|
|
117
|
-
|
|
512
|
+
logger.always(`${result.provider}: ${status}${time}${model}`);
|
|
118
513
|
if (argv.verbose && result.error) {
|
|
119
|
-
|
|
514
|
+
logger.always(` Error: ${chalk.red(result.error)}`);
|
|
120
515
|
}
|
|
121
516
|
}
|
|
122
517
|
if (argv.verbose && !argv.quiet) {
|
|
123
|
-
|
|
124
|
-
|
|
518
|
+
logger.always(chalk.blue("\n📋 Detailed Results:"));
|
|
519
|
+
logger.always(JSON.stringify(results, null, 2));
|
|
125
520
|
}
|
|
126
521
|
}
|
|
127
522
|
catch (error) {
|
|
128
523
|
if (spinner) {
|
|
129
524
|
spinner.fail("Provider status check failed");
|
|
130
525
|
}
|
|
131
|
-
|
|
526
|
+
logger.error(chalk.red("Error checking provider status:"), error);
|
|
132
527
|
process.exit(1);
|
|
133
528
|
}
|
|
134
529
|
}
|
|
@@ -136,32 +531,73 @@ export class CLICommandFactory {
|
|
|
136
531
|
* Execute the generate command
|
|
137
532
|
*/
|
|
138
533
|
static async executeGenerate(argv) {
|
|
139
|
-
|
|
534
|
+
// Handle stdin input if no input provided
|
|
535
|
+
if (!argv.input && !process.stdin.isTTY) {
|
|
536
|
+
let stdinData = "";
|
|
537
|
+
process.stdin.setEncoding("utf8");
|
|
538
|
+
for await (const chunk of process.stdin) {
|
|
539
|
+
stdinData += chunk;
|
|
540
|
+
}
|
|
541
|
+
argv.input = stdinData.trim();
|
|
542
|
+
if (!argv.input) {
|
|
543
|
+
throw new Error("No input received from stdin");
|
|
544
|
+
}
|
|
545
|
+
}
|
|
546
|
+
else if (!argv.input) {
|
|
547
|
+
throw new Error('Input required. Use: neurolink generate "your prompt" or echo "prompt" | neurolink generate');
|
|
548
|
+
}
|
|
549
|
+
const options = this.processOptions(argv);
|
|
550
|
+
const spinner = argv.quiet ? null : ora("🤖 Generating text...").start();
|
|
140
551
|
try {
|
|
552
|
+
// Add delay if specified
|
|
553
|
+
if (options.delay) {
|
|
554
|
+
await new Promise((resolve) => setTimeout(resolve, options.delay));
|
|
555
|
+
}
|
|
556
|
+
// Process context if provided
|
|
557
|
+
let inputText = argv.input;
|
|
558
|
+
let contextMetadata;
|
|
559
|
+
if (options.context && options.contextConfig) {
|
|
560
|
+
const processedContextResult = ContextFactory.processContext(options.context, options.contextConfig);
|
|
561
|
+
// Integrate context into prompt if configured
|
|
562
|
+
if (processedContextResult.processedContext) {
|
|
563
|
+
inputText = processedContextResult.processedContext + inputText;
|
|
564
|
+
}
|
|
565
|
+
// Add context metadata for analytics
|
|
566
|
+
contextMetadata = {
|
|
567
|
+
...ContextFactory.extractAnalyticsContext(options.context),
|
|
568
|
+
contextMode: processedContextResult.config.mode,
|
|
569
|
+
contextTruncated: processedContextResult.metadata.truncated,
|
|
570
|
+
};
|
|
571
|
+
if (options.debug) {
|
|
572
|
+
logger.debug("Context processed:", {
|
|
573
|
+
mode: processedContextResult.config.mode,
|
|
574
|
+
truncated: processedContextResult.metadata.truncated,
|
|
575
|
+
processingTime: processedContextResult.metadata.processingTime,
|
|
576
|
+
});
|
|
577
|
+
}
|
|
578
|
+
}
|
|
141
579
|
const sdk = new NeuroLink();
|
|
142
|
-
const outputFormat = argv.outputFormat || argv.format || "text";
|
|
143
580
|
const result = await sdk.generate({
|
|
144
|
-
input: { text:
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
enableAnalytics:
|
|
153
|
-
enableEvaluation:
|
|
581
|
+
input: { text: inputText },
|
|
582
|
+
provider: options.provider,
|
|
583
|
+
model: options.model,
|
|
584
|
+
temperature: options.temperature,
|
|
585
|
+
maxTokens: options.maxTokens,
|
|
586
|
+
systemPrompt: options.systemPrompt,
|
|
587
|
+
timeout: options.timeout,
|
|
588
|
+
disableTools: options.disableTools,
|
|
589
|
+
enableAnalytics: options.enableAnalytics,
|
|
590
|
+
enableEvaluation: options.enableEvaluation,
|
|
591
|
+
evaluationDomain: options.evaluationDomain,
|
|
592
|
+
toolUsageContext: options.toolUsageContext,
|
|
593
|
+
context: contextMetadata,
|
|
154
594
|
});
|
|
155
|
-
spinner
|
|
156
|
-
|
|
157
|
-
if (outputFormat === "json") {
|
|
158
|
-
console.log(JSON.stringify(result, null, 2));
|
|
159
|
-
}
|
|
160
|
-
else {
|
|
161
|
-
console.log("\n" + chalk.cyan("Generated Content:"));
|
|
162
|
-
console.log(result.content);
|
|
595
|
+
if (spinner) {
|
|
596
|
+
spinner.succeed(chalk.green("✅ Text generated successfully!"));
|
|
163
597
|
}
|
|
164
|
-
|
|
598
|
+
// Handle output with universal formatting
|
|
599
|
+
this.handleOutput(result, options);
|
|
600
|
+
if (options.debug) {
|
|
165
601
|
logger.debug("\n" + chalk.yellow("Debug Information:"));
|
|
166
602
|
logger.debug("Provider:", result.provider);
|
|
167
603
|
logger.debug("Model:", result.model);
|
|
@@ -172,13 +608,325 @@ export class CLICommandFactory {
|
|
|
172
608
|
logger.debug("Evaluation:", JSON.stringify(result.evaluation, null, 2));
|
|
173
609
|
}
|
|
174
610
|
}
|
|
175
|
-
// Exit successfully
|
|
176
611
|
process.exit(0);
|
|
177
612
|
}
|
|
178
613
|
catch (error) {
|
|
179
|
-
spinner
|
|
180
|
-
|
|
614
|
+
if (spinner) {
|
|
615
|
+
spinner.fail();
|
|
616
|
+
}
|
|
617
|
+
logger.error(chalk.red(`❌ Generation failed: ${error.message}`));
|
|
618
|
+
if (options.debug) {
|
|
619
|
+
logger.error(chalk.gray(error.stack));
|
|
620
|
+
}
|
|
621
|
+
process.exit(1);
|
|
622
|
+
}
|
|
623
|
+
}
|
|
624
|
+
/**
|
|
625
|
+
* Execute the stream command
|
|
626
|
+
*/
|
|
627
|
+
static async executeStream(argv) {
|
|
628
|
+
// Handle stdin input if no input provided
|
|
629
|
+
if (!argv.input && !process.stdin.isTTY) {
|
|
630
|
+
let stdinData = "";
|
|
631
|
+
process.stdin.setEncoding("utf8");
|
|
632
|
+
for await (const chunk of process.stdin) {
|
|
633
|
+
stdinData += chunk;
|
|
634
|
+
}
|
|
635
|
+
argv.input = stdinData.trim();
|
|
636
|
+
if (!argv.input) {
|
|
637
|
+
throw new Error("No input received from stdin");
|
|
638
|
+
}
|
|
639
|
+
}
|
|
640
|
+
else if (!argv.input) {
|
|
641
|
+
throw new Error('Input required. Use: neurolink stream "your prompt" or echo "prompt" | neurolink stream');
|
|
642
|
+
}
|
|
643
|
+
const options = this.processOptions(argv);
|
|
644
|
+
if (!options.quiet) {
|
|
645
|
+
logger.always(chalk.blue("🔄 Streaming..."));
|
|
646
|
+
}
|
|
647
|
+
try {
|
|
648
|
+
// Add delay if specified
|
|
649
|
+
if (options.delay) {
|
|
650
|
+
await new Promise((resolve) => setTimeout(resolve, options.delay));
|
|
651
|
+
}
|
|
652
|
+
// Process context if provided (same as generate command)
|
|
653
|
+
let inputText = argv.input;
|
|
654
|
+
let contextMetadata;
|
|
655
|
+
if (options.context && options.contextConfig) {
|
|
656
|
+
const processedContextResult = ContextFactory.processContext(options.context, options.contextConfig);
|
|
657
|
+
// Integrate context into prompt if configured
|
|
658
|
+
if (processedContextResult.processedContext) {
|
|
659
|
+
inputText = processedContextResult.processedContext + inputText;
|
|
660
|
+
}
|
|
661
|
+
// Add context metadata for analytics
|
|
662
|
+
contextMetadata = {
|
|
663
|
+
...ContextFactory.extractAnalyticsContext(options.context),
|
|
664
|
+
contextMode: processedContextResult.config.mode,
|
|
665
|
+
contextTruncated: processedContextResult.metadata.truncated,
|
|
666
|
+
};
|
|
667
|
+
if (options.debug) {
|
|
668
|
+
logger.debug("Context processed for streaming:", {
|
|
669
|
+
mode: processedContextResult.config.mode,
|
|
670
|
+
truncated: processedContextResult.metadata.truncated,
|
|
671
|
+
processingTime: processedContextResult.metadata.processingTime,
|
|
672
|
+
});
|
|
673
|
+
}
|
|
674
|
+
}
|
|
675
|
+
const sdk = new NeuroLink();
|
|
676
|
+
const stream = await sdk.stream({
|
|
677
|
+
input: { text: inputText },
|
|
678
|
+
provider: options.provider,
|
|
679
|
+
model: options.model,
|
|
680
|
+
temperature: options.temperature,
|
|
681
|
+
maxTokens: options.maxTokens,
|
|
682
|
+
systemPrompt: options.systemPrompt,
|
|
683
|
+
timeout: options.timeout,
|
|
684
|
+
disableTools: options.disableTools,
|
|
685
|
+
enableAnalytics: options.enableAnalytics,
|
|
686
|
+
enableEvaluation: options.enableEvaluation,
|
|
687
|
+
context: contextMetadata,
|
|
688
|
+
});
|
|
689
|
+
let fullContent = "";
|
|
690
|
+
// Process the stream
|
|
691
|
+
for await (const chunk of stream.stream) {
|
|
692
|
+
if (options.delay && options.delay > 0) {
|
|
693
|
+
// Demo mode - add delay between chunks
|
|
694
|
+
await new Promise((resolve) => setTimeout(resolve, options.delay));
|
|
695
|
+
}
|
|
696
|
+
process.stdout.write(chunk.content);
|
|
697
|
+
fullContent += chunk.content;
|
|
698
|
+
}
|
|
699
|
+
if (!options.quiet) {
|
|
700
|
+
process.stdout.write("\n");
|
|
701
|
+
}
|
|
702
|
+
// 🔧 NEW: Display analytics and evaluation after streaming (similar to generate command)
|
|
703
|
+
if (options.enableAnalytics && stream.analytics) {
|
|
704
|
+
const resolvedAnalytics = await (stream.analytics instanceof Promise
|
|
705
|
+
? stream.analytics
|
|
706
|
+
: Promise.resolve(stream.analytics));
|
|
707
|
+
const streamAnalytics = {
|
|
708
|
+
success: true,
|
|
709
|
+
content: fullContent,
|
|
710
|
+
analytics: resolvedAnalytics,
|
|
711
|
+
model: stream.model,
|
|
712
|
+
toolsUsed: stream.toolCalls?.map((tc) => tc.toolName) || [],
|
|
713
|
+
};
|
|
714
|
+
const analyticsDisplay = this.formatAnalyticsForTextMode(streamAnalytics);
|
|
715
|
+
logger.always(analyticsDisplay);
|
|
716
|
+
}
|
|
717
|
+
// 🔧 NEW: Display evaluation after streaming
|
|
718
|
+
if (options.enableEvaluation && stream.evaluation) {
|
|
719
|
+
const resolvedEvaluation = await (stream.evaluation instanceof Promise
|
|
720
|
+
? stream.evaluation
|
|
721
|
+
: Promise.resolve(stream.evaluation));
|
|
722
|
+
logger.always(chalk.blue("\n📊 Response Evaluation:"));
|
|
723
|
+
logger.always(` Relevance: ${resolvedEvaluation.relevance}/10`);
|
|
724
|
+
logger.always(` Accuracy: ${resolvedEvaluation.accuracy}/10`);
|
|
725
|
+
logger.always(` Completeness: ${resolvedEvaluation.completeness}/10`);
|
|
726
|
+
logger.always(` Overall: ${resolvedEvaluation.overall}/10`);
|
|
727
|
+
if (resolvedEvaluation.reasoning) {
|
|
728
|
+
logger.always(` Reasoning: ${resolvedEvaluation.reasoning}`);
|
|
729
|
+
}
|
|
730
|
+
}
|
|
731
|
+
// Handle output file if specified
|
|
732
|
+
if (options.output) {
|
|
733
|
+
fs.writeFileSync(options.output, fullContent);
|
|
734
|
+
if (!options.quiet) {
|
|
735
|
+
logger.always(`\nOutput saved to ${options.output}`);
|
|
736
|
+
}
|
|
737
|
+
}
|
|
738
|
+
// 🔧 NEW: Debug output for streaming (similar to generate command)
|
|
739
|
+
if (options.debug) {
|
|
740
|
+
logger.debug("\n" + chalk.yellow("Debug Information (Streaming):"));
|
|
741
|
+
logger.debug("Provider:", stream.provider);
|
|
742
|
+
logger.debug("Model:", stream.model);
|
|
743
|
+
if (stream.analytics) {
|
|
744
|
+
const resolvedAnalytics = await (stream.analytics instanceof Promise
|
|
745
|
+
? stream.analytics
|
|
746
|
+
: Promise.resolve(stream.analytics));
|
|
747
|
+
logger.debug("Analytics:", JSON.stringify(resolvedAnalytics, null, 2));
|
|
748
|
+
}
|
|
749
|
+
if (stream.evaluation) {
|
|
750
|
+
const resolvedEvaluation = await (stream.evaluation instanceof Promise
|
|
751
|
+
? stream.evaluation
|
|
752
|
+
: Promise.resolve(stream.evaluation));
|
|
753
|
+
logger.debug("Evaluation:", JSON.stringify(resolvedEvaluation, null, 2));
|
|
754
|
+
}
|
|
755
|
+
if (stream.metadata) {
|
|
756
|
+
logger.debug("Metadata:", JSON.stringify(stream.metadata, null, 2));
|
|
757
|
+
}
|
|
758
|
+
}
|
|
759
|
+
process.exit(0);
|
|
760
|
+
}
|
|
761
|
+
catch (error) {
|
|
762
|
+
logger.error(chalk.red(`❌ Streaming failed: ${error.message}`));
|
|
763
|
+
if (options.debug) {
|
|
764
|
+
logger.error(chalk.gray(error.stack));
|
|
765
|
+
}
|
|
766
|
+
process.exit(1);
|
|
767
|
+
}
|
|
768
|
+
}
|
|
769
|
+
/**
|
|
770
|
+
* Execute the batch command
|
|
771
|
+
*/
|
|
772
|
+
static async executeBatch(argv) {
|
|
773
|
+
const options = this.processOptions(argv);
|
|
774
|
+
const spinner = options.quiet ? null : ora().start();
|
|
775
|
+
try {
|
|
776
|
+
if (!argv.file) {
|
|
777
|
+
throw new Error("No file specified");
|
|
778
|
+
}
|
|
779
|
+
if (!fs.existsSync(argv.file)) {
|
|
780
|
+
throw new Error(`File not found: ${argv.file}`);
|
|
781
|
+
}
|
|
782
|
+
const buffer = fs.readFileSync(argv.file);
|
|
783
|
+
const prompts = buffer
|
|
784
|
+
.toString("utf8")
|
|
785
|
+
.split("\n")
|
|
786
|
+
.map((line) => line.trim())
|
|
787
|
+
.filter(Boolean);
|
|
788
|
+
if (prompts.length === 0) {
|
|
789
|
+
throw new Error("No prompts found in file");
|
|
790
|
+
}
|
|
791
|
+
if (spinner) {
|
|
792
|
+
spinner.text = `📦 Processing ${prompts.length} prompts...`;
|
|
793
|
+
}
|
|
794
|
+
else if (!options.quiet) {
|
|
795
|
+
logger.always(chalk.blue(`📦 Processing ${prompts.length} prompts...\n`));
|
|
796
|
+
}
|
|
797
|
+
const results = [];
|
|
798
|
+
const sdk = new NeuroLink();
|
|
799
|
+
for (let i = 0; i < prompts.length; i++) {
|
|
800
|
+
if (spinner) {
|
|
801
|
+
spinner.text = `Processing ${i + 1}/${prompts.length}: ${prompts[i].substring(0, 30)}...`;
|
|
802
|
+
}
|
|
803
|
+
try {
|
|
804
|
+
// Process context for each batch item
|
|
805
|
+
let inputText = prompts[i];
|
|
806
|
+
let contextMetadata;
|
|
807
|
+
if (options.context && options.contextConfig) {
|
|
808
|
+
const processedContextResult = ContextFactory.processContext(options.context, options.contextConfig);
|
|
809
|
+
if (processedContextResult.processedContext) {
|
|
810
|
+
inputText = processedContextResult.processedContext + inputText;
|
|
811
|
+
}
|
|
812
|
+
contextMetadata = {
|
|
813
|
+
...ContextFactory.extractAnalyticsContext(options.context),
|
|
814
|
+
contextMode: processedContextResult.config.mode,
|
|
815
|
+
contextTruncated: processedContextResult.metadata.truncated,
|
|
816
|
+
batchIndex: i,
|
|
817
|
+
};
|
|
818
|
+
}
|
|
819
|
+
const result = await sdk.generate({
|
|
820
|
+
input: { text: inputText },
|
|
821
|
+
provider: options.provider,
|
|
822
|
+
model: options.model,
|
|
823
|
+
temperature: options.temperature,
|
|
824
|
+
maxTokens: options.maxTokens,
|
|
825
|
+
systemPrompt: options.systemPrompt,
|
|
826
|
+
timeout: options.timeout,
|
|
827
|
+
disableTools: options.disableTools,
|
|
828
|
+
enableAnalytics: options.enableAnalytics,
|
|
829
|
+
enableEvaluation: options.enableEvaluation,
|
|
830
|
+
context: contextMetadata,
|
|
831
|
+
});
|
|
832
|
+
results.push({ prompt: prompts[i], response: result.content });
|
|
833
|
+
if (spinner) {
|
|
834
|
+
spinner.render();
|
|
835
|
+
}
|
|
836
|
+
}
|
|
837
|
+
catch (error) {
|
|
838
|
+
results.push({
|
|
839
|
+
prompt: prompts[i],
|
|
840
|
+
error: error.message,
|
|
841
|
+
});
|
|
842
|
+
if (spinner) {
|
|
843
|
+
spinner.render();
|
|
844
|
+
}
|
|
845
|
+
}
|
|
846
|
+
// Add delay between requests
|
|
847
|
+
if (i < prompts.length - 1) {
|
|
848
|
+
await new Promise((resolve) => setTimeout(resolve, options.delay || 1000));
|
|
849
|
+
}
|
|
850
|
+
}
|
|
851
|
+
if (spinner) {
|
|
852
|
+
spinner.succeed(chalk.green("✅ Batch processing complete!"));
|
|
853
|
+
}
|
|
854
|
+
// Handle output with universal formatting
|
|
855
|
+
this.handleOutput(results, options);
|
|
856
|
+
process.exit(0);
|
|
857
|
+
}
|
|
858
|
+
catch (error) {
|
|
859
|
+
if (spinner) {
|
|
860
|
+
spinner.fail();
|
|
861
|
+
}
|
|
862
|
+
logger.error(chalk.red(`❌ Batch processing failed: ${error.message}`));
|
|
863
|
+
if (options.debug) {
|
|
864
|
+
logger.error(chalk.gray(error.stack));
|
|
865
|
+
}
|
|
181
866
|
process.exit(1);
|
|
182
867
|
}
|
|
183
868
|
}
|
|
869
|
+
/**
|
|
870
|
+
* Execute config export command
|
|
871
|
+
*/
|
|
872
|
+
static async executeConfigExport(argv) {
|
|
873
|
+
const options = this.processOptions(argv);
|
|
874
|
+
try {
|
|
875
|
+
const config = {
|
|
876
|
+
providers: {
|
|
877
|
+
openai: !!process.env.OPENAI_API_KEY,
|
|
878
|
+
bedrock: !!(process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY),
|
|
879
|
+
vertex: !!(process.env.GOOGLE_APPLICATION_CREDENTIALS ||
|
|
880
|
+
process.env.GOOGLE_SERVICE_ACCOUNT_KEY),
|
|
881
|
+
anthropic: !!process.env.ANTHROPIC_API_KEY,
|
|
882
|
+
azure: !!(process.env.AZURE_OPENAI_API_KEY &&
|
|
883
|
+
process.env.AZURE_OPENAI_ENDPOINT),
|
|
884
|
+
"google-ai": !!process.env.GOOGLE_AI_API_KEY,
|
|
885
|
+
},
|
|
886
|
+
defaults: {
|
|
887
|
+
temperature: 0.7,
|
|
888
|
+
maxTokens: 500,
|
|
889
|
+
},
|
|
890
|
+
timestamp: new Date().toISOString(),
|
|
891
|
+
};
|
|
892
|
+
this.handleOutput(config, options);
|
|
893
|
+
}
|
|
894
|
+
catch (error) {
|
|
895
|
+
logger.error(chalk.red(`❌ Configuration export failed: ${error.message}`));
|
|
896
|
+
process.exit(1);
|
|
897
|
+
}
|
|
898
|
+
}
|
|
899
|
+
/**
|
|
900
|
+
* Execute get best provider command
|
|
901
|
+
*/
|
|
902
|
+
static async executeGetBestProvider(argv) {
|
|
903
|
+
const options = this.processOptions(argv);
|
|
904
|
+
try {
|
|
905
|
+
const { getBestProvider } = await import("../../lib/utils/providerUtils.js");
|
|
906
|
+
const bestProvider = await getBestProvider();
|
|
907
|
+
if (options.format === "json") {
|
|
908
|
+
this.handleOutput({ provider: bestProvider }, options);
|
|
909
|
+
}
|
|
910
|
+
else {
|
|
911
|
+
if (!options.quiet) {
|
|
912
|
+
logger.always(chalk.green(`🎯 Best available provider: ${bestProvider}`));
|
|
913
|
+
}
|
|
914
|
+
else {
|
|
915
|
+
this.handleOutput(bestProvider, options);
|
|
916
|
+
}
|
|
917
|
+
}
|
|
918
|
+
}
|
|
919
|
+
catch (error) {
|
|
920
|
+
logger.error(chalk.red(`❌ Provider selection failed: ${error.message}`));
|
|
921
|
+
process.exit(1);
|
|
922
|
+
}
|
|
923
|
+
}
|
|
924
|
+
/**
|
|
925
|
+
* Execute completion command
|
|
926
|
+
*/
|
|
927
|
+
static async executeCompletion(argv) {
|
|
928
|
+
// This would need to be implemented with the actual CLI instance
|
|
929
|
+
logger.always("# Completion script would be generated here");
|
|
930
|
+
logger.always("# This requires access to the yargs CLI instance");
|
|
931
|
+
}
|
|
184
932
|
}
|