@juspay/neurolink 6.2.1 → 7.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +32 -16
- package/README.md +3 -3
- package/dist/cli/factories/commandFactory.d.ts +70 -0
- package/dist/cli/factories/commandFactory.js +633 -0
- package/dist/cli/index.d.ts +6 -0
- package/dist/cli/index.js +18 -990
- package/dist/cli/utils/{complete-setup.d.ts → completeSetup.d.ts} +1 -1
- package/dist/cli/utils/{complete-setup.js → completeSetup.js} +2 -2
- package/dist/{lib/core/base-provider.d.ts → core/baseProvider.d.ts} +1 -1
- package/dist/core/{base-provider.js → baseProvider.js} +2 -2
- package/dist/core/{dynamic-models.js → dynamicModels.js} +1 -1
- package/dist/core/evaluation.js +1 -1
- package/dist/core/factory.js +7 -5
- package/dist/core/types.d.ts +2 -2
- package/dist/{lib/factories/provider-registry.js → factories/providerRegistry.js} +11 -9
- package/dist/index.d.ts +4 -4
- package/dist/index.js +1 -1
- package/dist/{core/base-provider.d.ts → lib/core/baseProvider.d.ts} +1 -1
- package/dist/lib/core/{base-provider.js → baseProvider.js} +2 -2
- package/dist/lib/core/{dynamic-models.js → dynamicModels.js} +1 -1
- package/dist/lib/core/evaluation.js +1 -1
- package/dist/lib/core/factory.js +6 -3
- package/dist/lib/core/types.d.ts +2 -2
- package/dist/{factories/provider-registry.js → lib/factories/providerRegistry.js} +11 -9
- package/dist/lib/index.d.ts +4 -4
- package/dist/lib/index.js +1 -1
- package/dist/lib/mcp/factory.d.ts +2 -2
- package/dist/lib/mcp/factory.js +2 -2
- package/dist/lib/mcp/servers/agent/{direct-tools-server.js → directToolsServer.js} +1 -1
- package/dist/lib/mcp/servers/{ai-providers/ai-core-server.js → aiProviders/aiCoreServer.js} +1 -1
- package/dist/lib/mcp/{tool-registry.d.ts → toolRegistry.d.ts} +5 -0
- package/dist/lib/mcp/{tool-registry.js → toolRegistry.js} +60 -0
- package/dist/lib/neurolink.d.ts +4 -4
- package/dist/lib/neurolink.js +13 -8
- package/dist/lib/providers/{amazon-bedrock.d.ts → amazonBedrock.d.ts} +2 -2
- package/dist/lib/providers/{amazon-bedrock.js → amazonBedrock.js} +1 -1
- package/dist/lib/providers/anthropic.d.ts +2 -2
- package/dist/lib/providers/anthropic.js +1 -1
- package/dist/{providers/anthropic-baseprovider.d.ts → lib/providers/anthropicBaseProvider.d.ts} +2 -2
- package/dist/lib/providers/{anthropic-baseprovider.js → anthropicBaseProvider.js} +1 -1
- package/dist/{providers/azure-openai.d.ts → lib/providers/azureOpenai.d.ts} +2 -2
- package/dist/{providers/azure-openai.js → lib/providers/azureOpenai.js} +1 -1
- package/dist/{providers/google-ai-studio.d.ts → lib/providers/googleAiStudio.d.ts} +2 -2
- package/dist/lib/providers/{google-ai-studio.js → googleAiStudio.js} +1 -1
- package/dist/{providers/google-vertex.d.ts → lib/providers/googleVertex.d.ts} +2 -2
- package/dist/lib/providers/{google-vertex.js → googleVertex.js} +1 -1
- package/dist/lib/providers/huggingFace.d.ts +2 -2
- package/dist/lib/providers/huggingFace.js +1 -1
- package/dist/lib/providers/index.d.ts +4 -4
- package/dist/lib/providers/index.js +4 -4
- package/dist/lib/providers/mistral.d.ts +2 -2
- package/dist/lib/providers/mistral.js +1 -1
- package/dist/lib/providers/ollama.d.ts +2 -2
- package/dist/lib/providers/ollama.js +1 -1
- package/dist/lib/providers/openAI.d.ts +2 -2
- package/dist/lib/providers/openAI.js +1 -1
- package/dist/{sdk/tool-registration.d.ts → lib/sdk/toolRegistration.d.ts} +1 -1
- package/dist/lib/sdk/{tool-registration.js → toolRegistration.js} +17 -0
- package/dist/lib/telemetry/index.d.ts +2 -2
- package/dist/lib/telemetry/index.js +3 -3
- package/dist/lib/utils/logger.js +5 -6
- package/dist/lib/utils/providerConfig.js +1 -1
- package/dist/lib/utils/{provider-setup-messages.js → providerSetupMessages.js} +1 -1
- package/dist/lib/utils/providerUtils.js +10 -3
- package/dist/mcp/factory.d.ts +2 -2
- package/dist/mcp/factory.js +2 -2
- package/dist/mcp/servers/agent/{direct-tools-server.js → directToolsServer.js} +1 -1
- package/dist/mcp/servers/{ai-providers/ai-core-server.js → aiProviders/aiCoreServer.js} +2 -2
- package/dist/mcp/{tool-registry.d.ts → toolRegistry.d.ts} +5 -0
- package/dist/mcp/{tool-registry.js → toolRegistry.js} +60 -0
- package/dist/neurolink.d.ts +5 -5
- package/dist/neurolink.js +13 -8
- package/dist/providers/{amazon-bedrock.d.ts → amazonBedrock.d.ts} +2 -2
- package/dist/providers/{amazon-bedrock.js → amazonBedrock.js} +1 -1
- package/dist/providers/anthropic.d.ts +2 -2
- package/dist/providers/anthropic.js +1 -1
- package/dist/{lib/providers/anthropic-baseprovider.d.ts → providers/anthropicBaseProvider.d.ts} +2 -2
- package/dist/providers/{anthropic-baseprovider.js → anthropicBaseProvider.js} +1 -1
- package/dist/{lib/providers/azure-openai.d.ts → providers/azureOpenai.d.ts} +2 -2
- package/dist/{lib/providers/azure-openai.js → providers/azureOpenai.js} +1 -1
- package/dist/{lib/providers/google-ai-studio.d.ts → providers/googleAiStudio.d.ts} +2 -2
- package/dist/providers/{google-ai-studio.js → googleAiStudio.js} +2 -2
- package/dist/{lib/providers/google-vertex.d.ts → providers/googleVertex.d.ts} +2 -2
- package/dist/providers/{google-vertex.js → googleVertex.js} +1 -1
- package/dist/providers/huggingFace.d.ts +2 -2
- package/dist/providers/huggingFace.js +1 -1
- package/dist/providers/index.d.ts +4 -4
- package/dist/providers/index.js +4 -4
- package/dist/providers/mistral.d.ts +2 -2
- package/dist/providers/mistral.js +1 -1
- package/dist/providers/ollama.d.ts +2 -2
- package/dist/providers/ollama.js +1 -1
- package/dist/providers/openAI.d.ts +2 -2
- package/dist/providers/openAI.js +1 -1
- package/dist/{lib/sdk/tool-registration.d.ts → sdk/toolRegistration.d.ts} +1 -1
- package/dist/sdk/{tool-registration.js → toolRegistration.js} +17 -0
- package/dist/telemetry/index.d.ts +2 -2
- package/dist/telemetry/index.js +3 -3
- package/dist/utils/logger.js +5 -6
- package/dist/utils/providerConfig.js +1 -1
- package/dist/utils/{provider-setup-messages.js → providerSetupMessages.js} +1 -1
- package/dist/utils/providerUtils.js +10 -3
- package/package.json +23 -22
- package/dist/cli/factories/command-factory.d.ts +0 -19
- package/dist/cli/factories/command-factory.js +0 -184
- /package/dist/agent/{direct-tools.d.ts → directTools.d.ts} +0 -0
- /package/dist/agent/{direct-tools.js → directTools.js} +0 -0
- /package/dist/cli/utils/{env-manager.d.ts → envManager.d.ts} +0 -0
- /package/dist/cli/utils/{env-manager.js → envManager.js} +0 -0
- /package/dist/cli/utils/{interactive-setup.d.ts → interactiveSetup.d.ts} +0 -0
- /package/dist/cli/utils/{interactive-setup.js → interactiveSetup.js} +0 -0
- /package/dist/core/{dynamic-models.d.ts → dynamicModels.d.ts} +0 -0
- /package/dist/core/{evaluation-providers.d.ts → evaluationProviders.d.ts} +0 -0
- /package/dist/core/{evaluation-providers.js → evaluationProviders.js} +0 -0
- /package/dist/core/{service-registry.d.ts → serviceRegistry.d.ts} +0 -0
- /package/dist/core/{service-registry.js → serviceRegistry.js} +0 -0
- /package/dist/factories/{provider-factory.d.ts → providerFactory.d.ts} +0 -0
- /package/dist/factories/{provider-factory.js → providerFactory.js} +0 -0
- /package/dist/factories/{provider-registry.d.ts → providerRegistry.d.ts} +0 -0
- /package/dist/lib/agent/{direct-tools.d.ts → directTools.d.ts} +0 -0
- /package/dist/lib/agent/{direct-tools.js → directTools.js} +0 -0
- /package/dist/lib/core/{dynamic-models.d.ts → dynamicModels.d.ts} +0 -0
- /package/dist/lib/core/{evaluation-providers.d.ts → evaluationProviders.d.ts} +0 -0
- /package/dist/lib/core/{evaluation-providers.js → evaluationProviders.js} +0 -0
- /package/dist/lib/core/{service-registry.d.ts → serviceRegistry.d.ts} +0 -0
- /package/dist/lib/core/{service-registry.js → serviceRegistry.js} +0 -0
- /package/dist/lib/factories/{provider-factory.d.ts → providerFactory.d.ts} +0 -0
- /package/dist/lib/factories/{provider-factory.js → providerFactory.js} +0 -0
- /package/dist/lib/factories/{provider-registry.d.ts → providerRegistry.d.ts} +0 -0
- /package/dist/lib/mcp/servers/agent/{direct-tools-server.d.ts → directToolsServer.d.ts} +0 -0
- /package/dist/lib/mcp/servers/{ai-providers/ai-analysis-tools.d.ts → aiProviders/aiAnalysisTools.d.ts} +0 -0
- /package/dist/lib/mcp/servers/{ai-providers/ai-analysis-tools.js → aiProviders/aiAnalysisTools.js} +0 -0
- /package/dist/lib/mcp/servers/{ai-providers/ai-core-server.d.ts → aiProviders/aiCoreServer.d.ts} +0 -0
- /package/dist/lib/mcp/servers/{ai-providers/ai-workflow-tools.d.ts → aiProviders/aiWorkflowTools.d.ts} +0 -0
- /package/dist/lib/mcp/servers/{ai-providers/ai-workflow-tools.js → aiProviders/aiWorkflowTools.js} +0 -0
- /package/dist/lib/mcp/servers/utilities/{utility-server.d.ts → utilityServer.d.ts} +0 -0
- /package/dist/lib/mcp/servers/utilities/{utility-server.js → utilityServer.js} +0 -0
- /package/dist/lib/proxy/{proxy-fetch.d.ts → proxyFetch.d.ts} +0 -0
- /package/dist/lib/proxy/{proxy-fetch.js → proxyFetch.js} +0 -0
- /package/dist/lib/telemetry/{telemetry-service.d.ts → telemetryService.d.ts} +0 -0
- /package/dist/lib/telemetry/{telemetry-service.js → telemetryService.js} +0 -0
- /package/dist/lib/types/{generate-types.d.ts → generateTypes.d.ts} +0 -0
- /package/dist/lib/types/{generate-types.js → generateTypes.js} +0 -0
- /package/dist/lib/types/{mcp-types.d.ts → mcpTypes.d.ts} +0 -0
- /package/dist/lib/types/{mcp-types.js → mcpTypes.js} +0 -0
- /package/dist/lib/types/{stream-types.d.ts → streamTypes.d.ts} +0 -0
- /package/dist/lib/types/{stream-types.js → streamTypes.js} +0 -0
- /package/dist/lib/types/{universal-provider-options.d.ts → universalProviderOptions.d.ts} +0 -0
- /package/dist/lib/types/{universal-provider-options.js → universalProviderOptions.js} +0 -0
- /package/dist/lib/utils/{provider-setup-messages.d.ts → providerSetupMessages.d.ts} +0 -0
- /package/dist/mcp/servers/agent/{direct-tools-server.d.ts → directToolsServer.d.ts} +0 -0
- /package/dist/mcp/servers/{ai-providers/ai-analysis-tools.d.ts → aiProviders/aiAnalysisTools.d.ts} +0 -0
- /package/dist/mcp/servers/{ai-providers/ai-analysis-tools.js → aiProviders/aiAnalysisTools.js} +0 -0
- /package/dist/mcp/servers/{ai-providers/ai-core-server.d.ts → aiProviders/aiCoreServer.d.ts} +0 -0
- /package/dist/mcp/servers/{ai-providers/ai-workflow-tools.d.ts → aiProviders/aiWorkflowTools.d.ts} +0 -0
- /package/dist/mcp/servers/{ai-providers/ai-workflow-tools.js → aiProviders/aiWorkflowTools.js} +0 -0
- /package/dist/mcp/servers/utilities/{utility-server.d.ts → utilityServer.d.ts} +0 -0
- /package/dist/mcp/servers/utilities/{utility-server.js → utilityServer.js} +0 -0
- /package/dist/proxy/{proxy-fetch.d.ts → proxyFetch.d.ts} +0 -0
- /package/dist/proxy/{proxy-fetch.js → proxyFetch.js} +0 -0
- /package/dist/telemetry/{telemetry-service.d.ts → telemetryService.d.ts} +0 -0
- /package/dist/telemetry/{telemetry-service.js → telemetryService.js} +0 -0
- /package/dist/types/{generate-types.d.ts → generateTypes.d.ts} +0 -0
- /package/dist/types/{generate-types.js → generateTypes.js} +0 -0
- /package/dist/types/{mcp-types.d.ts → mcpTypes.d.ts} +0 -0
- /package/dist/types/{mcp-types.js → mcpTypes.js} +0 -0
- /package/dist/types/{stream-types.d.ts → streamTypes.d.ts} +0 -0
- /package/dist/types/{stream-types.js → streamTypes.js} +0 -0
- /package/dist/types/{universal-provider-options.d.ts → universalProviderOptions.d.ts} +0 -0
- /package/dist/types/{universal-provider-options.js → universalProviderOptions.js} +0 -0
- /package/dist/utils/{provider-setup-messages.d.ts → providerSetupMessages.d.ts} +0 -0
|
@@ -0,0 +1,633 @@
|
|
|
1
|
+
import { NeuroLink } from "../../lib/neurolink.js";
|
|
2
|
+
import ora from "ora";
|
|
3
|
+
import chalk from "chalk";
|
|
4
|
+
import { logger } from "../../lib/utils/logger.js";
|
|
5
|
+
import fs from "fs";
|
|
6
|
+
/**
|
|
7
|
+
* CLI Command Factory for generate commands
|
|
8
|
+
*/
|
|
9
|
+
export class CLICommandFactory {
|
|
10
|
+
// Common options available on all commands
|
|
11
|
+
static commonOptions = {
|
|
12
|
+
// Core generation options
|
|
13
|
+
provider: {
|
|
14
|
+
choices: [
|
|
15
|
+
"auto",
|
|
16
|
+
"openai",
|
|
17
|
+
"bedrock",
|
|
18
|
+
"vertex",
|
|
19
|
+
"googleVertex",
|
|
20
|
+
"anthropic",
|
|
21
|
+
"azure",
|
|
22
|
+
"google-ai",
|
|
23
|
+
"huggingface",
|
|
24
|
+
"ollama",
|
|
25
|
+
"mistral",
|
|
26
|
+
],
|
|
27
|
+
default: "auto",
|
|
28
|
+
description: "AI provider to use (auto-selects best available)",
|
|
29
|
+
},
|
|
30
|
+
model: {
|
|
31
|
+
type: "string",
|
|
32
|
+
description: "Specific model to use (e.g. gemini-2.5-pro, gemini-2.5-flash)",
|
|
33
|
+
},
|
|
34
|
+
temperature: {
|
|
35
|
+
type: "number",
|
|
36
|
+
default: 0.7,
|
|
37
|
+
description: "Creativity level (0.0 = focused, 1.0 = creative)",
|
|
38
|
+
},
|
|
39
|
+
maxTokens: {
|
|
40
|
+
type: "number",
|
|
41
|
+
default: 1000,
|
|
42
|
+
description: "Maximum tokens to generate",
|
|
43
|
+
},
|
|
44
|
+
system: {
|
|
45
|
+
type: "string",
|
|
46
|
+
description: "System prompt to guide AI behavior",
|
|
47
|
+
},
|
|
48
|
+
// Output control options
|
|
49
|
+
format: {
|
|
50
|
+
choices: ["text", "json", "table"],
|
|
51
|
+
default: "text",
|
|
52
|
+
alias: ["f", "output-format"],
|
|
53
|
+
description: "Output format",
|
|
54
|
+
},
|
|
55
|
+
output: {
|
|
56
|
+
type: "string",
|
|
57
|
+
description: "Save output to file",
|
|
58
|
+
},
|
|
59
|
+
// Behavior control options
|
|
60
|
+
timeout: {
|
|
61
|
+
type: "number",
|
|
62
|
+
default: 120,
|
|
63
|
+
description: "Maximum execution time in seconds",
|
|
64
|
+
},
|
|
65
|
+
delay: {
|
|
66
|
+
type: "number",
|
|
67
|
+
description: "Delay between operations (ms)",
|
|
68
|
+
},
|
|
69
|
+
// Tools & features options
|
|
70
|
+
disableTools: {
|
|
71
|
+
type: "boolean",
|
|
72
|
+
default: false,
|
|
73
|
+
description: "Disable MCP tool integration (tools enabled by default)",
|
|
74
|
+
},
|
|
75
|
+
enableAnalytics: {
|
|
76
|
+
type: "boolean",
|
|
77
|
+
default: false,
|
|
78
|
+
description: "Enable usage analytics collection",
|
|
79
|
+
},
|
|
80
|
+
enableEvaluation: {
|
|
81
|
+
type: "boolean",
|
|
82
|
+
default: false,
|
|
83
|
+
description: "Enable AI response quality evaluation",
|
|
84
|
+
},
|
|
85
|
+
evaluationDomain: {
|
|
86
|
+
type: "string",
|
|
87
|
+
description: "Domain expertise for evaluation (e.g., 'AI coding assistant', 'Customer service expert')",
|
|
88
|
+
},
|
|
89
|
+
toolUsageContext: {
|
|
90
|
+
type: "string",
|
|
91
|
+
description: "Tool usage context for evaluation (e.g., 'Used sales-data MCP tools')",
|
|
92
|
+
},
|
|
93
|
+
lighthouseStyle: {
|
|
94
|
+
type: "boolean",
|
|
95
|
+
default: false,
|
|
96
|
+
description: "Use Lighthouse-compatible domain-aware evaluation",
|
|
97
|
+
},
|
|
98
|
+
context: {
|
|
99
|
+
type: "string",
|
|
100
|
+
description: "JSON context object for custom data",
|
|
101
|
+
},
|
|
102
|
+
// Debug & output options
|
|
103
|
+
debug: {
|
|
104
|
+
type: "boolean",
|
|
105
|
+
alias: ["v", "verbose"],
|
|
106
|
+
default: false,
|
|
107
|
+
description: "Enable debug mode with verbose output",
|
|
108
|
+
},
|
|
109
|
+
quiet: {
|
|
110
|
+
type: "boolean",
|
|
111
|
+
alias: "q",
|
|
112
|
+
default: false,
|
|
113
|
+
description: "Suppress non-essential output",
|
|
114
|
+
},
|
|
115
|
+
};
|
|
116
|
+
// Helper method to build options for commands
|
|
117
|
+
static buildOptions(yargs, additionalOptions = {}) {
|
|
118
|
+
return yargs.options({
|
|
119
|
+
...this.commonOptions,
|
|
120
|
+
...additionalOptions,
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
// Helper method to process common options
|
|
124
|
+
static processOptions(argv) {
|
|
125
|
+
return {
|
|
126
|
+
provider: argv.provider === "auto" ? undefined : argv.provider,
|
|
127
|
+
model: argv.model,
|
|
128
|
+
temperature: argv.temperature,
|
|
129
|
+
maxTokens: argv.maxTokens,
|
|
130
|
+
systemPrompt: argv.system,
|
|
131
|
+
timeout: argv.timeout,
|
|
132
|
+
disableTools: argv.disableTools,
|
|
133
|
+
enableAnalytics: argv.enableAnalytics,
|
|
134
|
+
enableEvaluation: argv.enableEvaluation,
|
|
135
|
+
evaluationDomain: argv.evaluationDomain,
|
|
136
|
+
toolUsageContext: argv.toolUsageContext,
|
|
137
|
+
lighthouseStyle: argv.lighthouseStyle,
|
|
138
|
+
context: argv.context
|
|
139
|
+
? typeof argv.context === "string"
|
|
140
|
+
? JSON.parse(argv.context)
|
|
141
|
+
: argv.context
|
|
142
|
+
: undefined,
|
|
143
|
+
debug: argv.debug,
|
|
144
|
+
quiet: argv.quiet,
|
|
145
|
+
format: argv.format,
|
|
146
|
+
output: argv.output,
|
|
147
|
+
delay: argv.delay,
|
|
148
|
+
};
|
|
149
|
+
}
|
|
150
|
+
// Helper method to handle output
|
|
151
|
+
static handleOutput(result, options) {
|
|
152
|
+
let output;
|
|
153
|
+
if (options.format === "json") {
|
|
154
|
+
output = JSON.stringify(result, null, 2);
|
|
155
|
+
}
|
|
156
|
+
else if (options.format === "table" && Array.isArray(result)) {
|
|
157
|
+
console.table(result);
|
|
158
|
+
return;
|
|
159
|
+
}
|
|
160
|
+
else {
|
|
161
|
+
if (typeof result === "string") {
|
|
162
|
+
output = result;
|
|
163
|
+
}
|
|
164
|
+
else if (result && typeof result === "object" && "content" in result) {
|
|
165
|
+
output = result.content;
|
|
166
|
+
}
|
|
167
|
+
else if (result && typeof result === "object" && "text" in result) {
|
|
168
|
+
output = result.text;
|
|
169
|
+
}
|
|
170
|
+
else {
|
|
171
|
+
output = JSON.stringify(result);
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
if (options.output) {
|
|
175
|
+
fs.writeFileSync(options.output, output);
|
|
176
|
+
if (!options.quiet) {
|
|
177
|
+
console.log(`Output saved to ${options.output}`);
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
else {
|
|
181
|
+
console.log(output);
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
/**
|
|
185
|
+
* Create the new primary 'generate' command
|
|
186
|
+
*/
|
|
187
|
+
static createGenerateCommand() {
|
|
188
|
+
return {
|
|
189
|
+
command: ["generate <input>", "gen <input>"],
|
|
190
|
+
describe: "Generate content using AI providers",
|
|
191
|
+
builder: (yargs) => {
|
|
192
|
+
return this.buildOptions(yargs.positional("input", {
|
|
193
|
+
type: "string",
|
|
194
|
+
description: "Text prompt for AI generation (or read from stdin)",
|
|
195
|
+
}));
|
|
196
|
+
},
|
|
197
|
+
handler: async (argv) => await this.executeGenerate(argv),
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
/**
|
|
201
|
+
* Create stream command
|
|
202
|
+
*/
|
|
203
|
+
static createStreamCommand() {
|
|
204
|
+
return {
|
|
205
|
+
command: "stream <input>",
|
|
206
|
+
describe: "Stream generation in real-time",
|
|
207
|
+
builder: (yargs) => {
|
|
208
|
+
return this.buildOptions(yargs.positional("input", {
|
|
209
|
+
type: "string",
|
|
210
|
+
description: "Text prompt for streaming (or read from stdin)",
|
|
211
|
+
}));
|
|
212
|
+
},
|
|
213
|
+
handler: async (argv) => await this.executeStream(argv),
|
|
214
|
+
};
|
|
215
|
+
}
|
|
216
|
+
/**
|
|
217
|
+
* Create batch command
|
|
218
|
+
*/
|
|
219
|
+
static createBatchCommand() {
|
|
220
|
+
return {
|
|
221
|
+
command: "batch <file>",
|
|
222
|
+
describe: "Process multiple prompts from a file",
|
|
223
|
+
builder: (yargs) => {
|
|
224
|
+
return this.buildOptions(yargs.positional("file", {
|
|
225
|
+
type: "string",
|
|
226
|
+
description: "File with prompts (one per line)",
|
|
227
|
+
demandOption: true,
|
|
228
|
+
}));
|
|
229
|
+
},
|
|
230
|
+
handler: async (argv) => await this.executeBatch(argv),
|
|
231
|
+
};
|
|
232
|
+
}
|
|
233
|
+
/**
|
|
234
|
+
* Create provider commands
|
|
235
|
+
*/
|
|
236
|
+
static createProviderCommands() {
|
|
237
|
+
return {
|
|
238
|
+
command: "provider <subcommand>",
|
|
239
|
+
describe: "Manage AI provider configurations and status",
|
|
240
|
+
builder: (yargs) => {
|
|
241
|
+
return yargs
|
|
242
|
+
.command("status", "Check status of all configured AI providers", (y) => this.buildOptions(y), (argv) => CLICommandFactory.executeProviderStatus(argv))
|
|
243
|
+
.demandCommand(1, "");
|
|
244
|
+
},
|
|
245
|
+
handler: () => { }, // No-op handler as subcommands handle everything
|
|
246
|
+
};
|
|
247
|
+
}
|
|
248
|
+
/**
|
|
249
|
+
* Create status command (alias for provider status)
|
|
250
|
+
*/
|
|
251
|
+
static createStatusCommand() {
|
|
252
|
+
return {
|
|
253
|
+
command: "status",
|
|
254
|
+
describe: "Check AI provider connectivity and performance (alias for provider status)",
|
|
255
|
+
builder: (yargs) => this.buildOptions(yargs),
|
|
256
|
+
handler: async (argv) => await CLICommandFactory.executeProviderStatus(argv),
|
|
257
|
+
};
|
|
258
|
+
}
|
|
259
|
+
/**
|
|
260
|
+
* Create config commands
|
|
261
|
+
*/
|
|
262
|
+
static createConfigCommands() {
|
|
263
|
+
return {
|
|
264
|
+
command: "config <subcommand>",
|
|
265
|
+
describe: "Manage NeuroLink configuration",
|
|
266
|
+
builder: (yargs) => {
|
|
267
|
+
return yargs
|
|
268
|
+
.command("export", "Export current configuration", (y) => this.buildOptions(y), (argv) => this.executeConfigExport(argv))
|
|
269
|
+
.demandCommand(1, "");
|
|
270
|
+
},
|
|
271
|
+
handler: () => { }, // No-op handler as subcommands handle everything
|
|
272
|
+
};
|
|
273
|
+
}
|
|
274
|
+
/**
|
|
275
|
+
* Create get-best-provider command
|
|
276
|
+
*/
|
|
277
|
+
static createBestProviderCommand() {
|
|
278
|
+
return {
|
|
279
|
+
command: "get-best-provider",
|
|
280
|
+
describe: "Show the best available AI provider",
|
|
281
|
+
builder: (yargs) => this.buildOptions(yargs),
|
|
282
|
+
handler: async (argv) => await this.executeGetBestProvider(argv),
|
|
283
|
+
};
|
|
284
|
+
}
|
|
285
|
+
/**
|
|
286
|
+
* Create completion command
|
|
287
|
+
*/
|
|
288
|
+
static createCompletionCommand() {
|
|
289
|
+
return {
|
|
290
|
+
command: "completion",
|
|
291
|
+
describe: "Generate shell completion script",
|
|
292
|
+
builder: (yargs) => this.buildOptions(yargs),
|
|
293
|
+
handler: async (argv) => await this.executeCompletion(argv),
|
|
294
|
+
};
|
|
295
|
+
}
|
|
296
|
+
/**
|
|
297
|
+
* Execute provider status command
|
|
298
|
+
*/
|
|
299
|
+
static async executeProviderStatus(argv) {
|
|
300
|
+
if (argv.verbose && !argv.quiet) {
|
|
301
|
+
console.log(chalk.yellow("ℹ️ Verbose mode enabled. Displaying detailed status.\n"));
|
|
302
|
+
}
|
|
303
|
+
const spinner = argv.quiet
|
|
304
|
+
? null
|
|
305
|
+
: ora("🔍 Checking AI provider status...\n").start();
|
|
306
|
+
try {
|
|
307
|
+
// Use SDK's provider diagnostic method instead of manual testing
|
|
308
|
+
const sdk = new NeuroLink();
|
|
309
|
+
const results = await sdk.getProviderStatus();
|
|
310
|
+
if (spinner) {
|
|
311
|
+
const working = results.filter((r) => r.status === "working").length;
|
|
312
|
+
const configured = results.filter((r) => r.configured).length;
|
|
313
|
+
spinner.succeed(`Provider check complete: ${working}/${configured} providers working`);
|
|
314
|
+
}
|
|
315
|
+
// Display results
|
|
316
|
+
for (const result of results) {
|
|
317
|
+
const status = result.status === "working"
|
|
318
|
+
? chalk.green("✅ Working")
|
|
319
|
+
: result.status === "failed"
|
|
320
|
+
? chalk.red("❌ Failed")
|
|
321
|
+
: chalk.gray("⚪ Not configured");
|
|
322
|
+
const time = result.responseTime ? ` (${result.responseTime}ms)` : "";
|
|
323
|
+
const model = result.model ? ` [${result.model}]` : "";
|
|
324
|
+
console.log(`${result.provider}: ${status}${time}${model}`);
|
|
325
|
+
if (argv.verbose && result.error) {
|
|
326
|
+
console.log(` Error: ${chalk.red(result.error)}`);
|
|
327
|
+
}
|
|
328
|
+
}
|
|
329
|
+
if (argv.verbose && !argv.quiet) {
|
|
330
|
+
console.log(chalk.blue("\n📋 Detailed Results:"));
|
|
331
|
+
console.log(JSON.stringify(results, null, 2));
|
|
332
|
+
}
|
|
333
|
+
}
|
|
334
|
+
catch (error) {
|
|
335
|
+
if (spinner) {
|
|
336
|
+
spinner.fail("Provider status check failed");
|
|
337
|
+
}
|
|
338
|
+
console.error(chalk.red("Error checking provider status:"), error);
|
|
339
|
+
process.exit(1);
|
|
340
|
+
}
|
|
341
|
+
}
|
|
342
|
+
/**
|
|
343
|
+
* Execute the generate command
|
|
344
|
+
*/
|
|
345
|
+
static async executeGenerate(argv) {
|
|
346
|
+
// Handle stdin input if no input provided
|
|
347
|
+
if (!argv.input && !process.stdin.isTTY) {
|
|
348
|
+
let stdinData = "";
|
|
349
|
+
process.stdin.setEncoding("utf8");
|
|
350
|
+
for await (const chunk of process.stdin) {
|
|
351
|
+
stdinData += chunk;
|
|
352
|
+
}
|
|
353
|
+
argv.input = stdinData.trim();
|
|
354
|
+
if (!argv.input) {
|
|
355
|
+
throw new Error("No input received from stdin");
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
else if (!argv.input) {
|
|
359
|
+
throw new Error('Input required. Use: neurolink generate "your prompt" or echo "prompt" | neurolink generate');
|
|
360
|
+
}
|
|
361
|
+
const options = this.processOptions(argv);
|
|
362
|
+
const spinner = argv.quiet ? null : ora("🤖 Generating text...").start();
|
|
363
|
+
try {
|
|
364
|
+
// Add delay if specified
|
|
365
|
+
if (options.delay) {
|
|
366
|
+
await new Promise((resolve) => setTimeout(resolve, options.delay));
|
|
367
|
+
}
|
|
368
|
+
const sdk = new NeuroLink();
|
|
369
|
+
const result = await sdk.generate({
|
|
370
|
+
input: { text: argv.input },
|
|
371
|
+
provider: options.provider,
|
|
372
|
+
model: options.model,
|
|
373
|
+
temperature: options.temperature,
|
|
374
|
+
maxTokens: options.maxTokens,
|
|
375
|
+
systemPrompt: options.systemPrompt,
|
|
376
|
+
timeout: options.timeout,
|
|
377
|
+
disableTools: options.disableTools,
|
|
378
|
+
enableAnalytics: options.enableAnalytics,
|
|
379
|
+
enableEvaluation: options.enableEvaluation,
|
|
380
|
+
evaluationDomain: options.evaluationDomain,
|
|
381
|
+
toolUsageContext: options.toolUsageContext,
|
|
382
|
+
context: options.context,
|
|
383
|
+
});
|
|
384
|
+
if (spinner) {
|
|
385
|
+
spinner.succeed(chalk.green("✅ Text generated successfully!"));
|
|
386
|
+
}
|
|
387
|
+
// Handle output with universal formatting
|
|
388
|
+
this.handleOutput(result, options);
|
|
389
|
+
if (options.debug) {
|
|
390
|
+
logger.debug("\n" + chalk.yellow("Debug Information:"));
|
|
391
|
+
logger.debug("Provider:", result.provider);
|
|
392
|
+
logger.debug("Model:", result.model);
|
|
393
|
+
if (result.analytics) {
|
|
394
|
+
logger.debug("Analytics:", JSON.stringify(result.analytics, null, 2));
|
|
395
|
+
}
|
|
396
|
+
if (result.evaluation) {
|
|
397
|
+
logger.debug("Evaluation:", JSON.stringify(result.evaluation, null, 2));
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
process.exit(0);
|
|
401
|
+
}
|
|
402
|
+
catch (error) {
|
|
403
|
+
if (spinner) {
|
|
404
|
+
spinner.fail();
|
|
405
|
+
}
|
|
406
|
+
console.error(chalk.red(`❌ Generation failed: ${error.message}`));
|
|
407
|
+
if (options.debug) {
|
|
408
|
+
console.error(chalk.gray(error.stack));
|
|
409
|
+
}
|
|
410
|
+
process.exit(1);
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
/**
|
|
414
|
+
* Execute the stream command
|
|
415
|
+
*/
|
|
416
|
+
static async executeStream(argv) {
|
|
417
|
+
// Handle stdin input if no input provided
|
|
418
|
+
if (!argv.input && !process.stdin.isTTY) {
|
|
419
|
+
let stdinData = "";
|
|
420
|
+
process.stdin.setEncoding("utf8");
|
|
421
|
+
for await (const chunk of process.stdin) {
|
|
422
|
+
stdinData += chunk;
|
|
423
|
+
}
|
|
424
|
+
argv.input = stdinData.trim();
|
|
425
|
+
if (!argv.input) {
|
|
426
|
+
throw new Error("No input received from stdin");
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
else if (!argv.input) {
|
|
430
|
+
throw new Error('Input required. Use: neurolink stream "your prompt" or echo "prompt" | neurolink stream');
|
|
431
|
+
}
|
|
432
|
+
const options = this.processOptions(argv);
|
|
433
|
+
if (!options.quiet) {
|
|
434
|
+
console.log(chalk.blue("🔄 Streaming..."));
|
|
435
|
+
}
|
|
436
|
+
try {
|
|
437
|
+
// Add delay if specified
|
|
438
|
+
if (options.delay) {
|
|
439
|
+
await new Promise((resolve) => setTimeout(resolve, options.delay));
|
|
440
|
+
}
|
|
441
|
+
const sdk = new NeuroLink();
|
|
442
|
+
const stream = await sdk.stream({
|
|
443
|
+
input: { text: argv.input },
|
|
444
|
+
provider: options.provider,
|
|
445
|
+
model: options.model,
|
|
446
|
+
temperature: options.temperature,
|
|
447
|
+
maxTokens: options.maxTokens,
|
|
448
|
+
systemPrompt: options.systemPrompt,
|
|
449
|
+
timeout: options.timeout,
|
|
450
|
+
disableTools: options.disableTools,
|
|
451
|
+
enableAnalytics: options.enableAnalytics,
|
|
452
|
+
enableEvaluation: options.enableEvaluation,
|
|
453
|
+
context: options.context,
|
|
454
|
+
});
|
|
455
|
+
let fullContent = "";
|
|
456
|
+
// Process the stream
|
|
457
|
+
for await (const chunk of stream.stream) {
|
|
458
|
+
if (options.delay && options.delay > 0) {
|
|
459
|
+
// Demo mode - add delay between chunks
|
|
460
|
+
await new Promise((resolve) => setTimeout(resolve, options.delay));
|
|
461
|
+
}
|
|
462
|
+
process.stdout.write(chunk.content);
|
|
463
|
+
fullContent += chunk.content;
|
|
464
|
+
}
|
|
465
|
+
if (!options.quiet) {
|
|
466
|
+
process.stdout.write("\n");
|
|
467
|
+
}
|
|
468
|
+
// Handle output file if specified
|
|
469
|
+
if (options.output) {
|
|
470
|
+
fs.writeFileSync(options.output, fullContent);
|
|
471
|
+
if (!options.quiet) {
|
|
472
|
+
console.log(`\nOutput saved to ${options.output}`);
|
|
473
|
+
}
|
|
474
|
+
}
|
|
475
|
+
process.exit(0);
|
|
476
|
+
}
|
|
477
|
+
catch (error) {
|
|
478
|
+
console.error(chalk.red(`❌ Streaming failed: ${error.message}`));
|
|
479
|
+
if (options.debug) {
|
|
480
|
+
console.error(chalk.gray(error.stack));
|
|
481
|
+
}
|
|
482
|
+
process.exit(1);
|
|
483
|
+
}
|
|
484
|
+
}
|
|
485
|
+
/**
|
|
486
|
+
* Execute the batch command
|
|
487
|
+
*/
|
|
488
|
+
static async executeBatch(argv) {
|
|
489
|
+
const options = this.processOptions(argv);
|
|
490
|
+
const spinner = options.quiet ? null : ora().start();
|
|
491
|
+
try {
|
|
492
|
+
if (!argv.file) {
|
|
493
|
+
throw new Error("No file specified");
|
|
494
|
+
}
|
|
495
|
+
if (!fs.existsSync(argv.file)) {
|
|
496
|
+
throw new Error(`File not found: ${argv.file}`);
|
|
497
|
+
}
|
|
498
|
+
const buffer = fs.readFileSync(argv.file);
|
|
499
|
+
const prompts = buffer
|
|
500
|
+
.toString("utf8")
|
|
501
|
+
.split("\n")
|
|
502
|
+
.map((line) => line.trim())
|
|
503
|
+
.filter(Boolean);
|
|
504
|
+
if (prompts.length === 0) {
|
|
505
|
+
throw new Error("No prompts found in file");
|
|
506
|
+
}
|
|
507
|
+
if (spinner) {
|
|
508
|
+
spinner.text = `📦 Processing ${prompts.length} prompts...`;
|
|
509
|
+
}
|
|
510
|
+
else if (!options.quiet) {
|
|
511
|
+
console.log(chalk.blue(`📦 Processing ${prompts.length} prompts...\n`));
|
|
512
|
+
}
|
|
513
|
+
const results = [];
|
|
514
|
+
const sdk = new NeuroLink();
|
|
515
|
+
for (let i = 0; i < prompts.length; i++) {
|
|
516
|
+
if (spinner) {
|
|
517
|
+
spinner.text = `Processing ${i + 1}/${prompts.length}: ${prompts[i].substring(0, 30)}...`;
|
|
518
|
+
}
|
|
519
|
+
try {
|
|
520
|
+
const result = await sdk.generate({
|
|
521
|
+
input: { text: prompts[i] },
|
|
522
|
+
provider: options.provider,
|
|
523
|
+
model: options.model,
|
|
524
|
+
temperature: options.temperature,
|
|
525
|
+
maxTokens: options.maxTokens,
|
|
526
|
+
systemPrompt: options.systemPrompt,
|
|
527
|
+
timeout: options.timeout,
|
|
528
|
+
disableTools: options.disableTools,
|
|
529
|
+
enableAnalytics: options.enableAnalytics,
|
|
530
|
+
enableEvaluation: options.enableEvaluation,
|
|
531
|
+
context: options.context,
|
|
532
|
+
});
|
|
533
|
+
results.push({ prompt: prompts[i], response: result.content });
|
|
534
|
+
if (spinner) {
|
|
535
|
+
spinner.render();
|
|
536
|
+
}
|
|
537
|
+
}
|
|
538
|
+
catch (error) {
|
|
539
|
+
results.push({
|
|
540
|
+
prompt: prompts[i],
|
|
541
|
+
error: error.message,
|
|
542
|
+
});
|
|
543
|
+
if (spinner) {
|
|
544
|
+
spinner.render();
|
|
545
|
+
}
|
|
546
|
+
}
|
|
547
|
+
// Add delay between requests
|
|
548
|
+
if (i < prompts.length - 1) {
|
|
549
|
+
await new Promise((resolve) => setTimeout(resolve, options.delay || 1000));
|
|
550
|
+
}
|
|
551
|
+
}
|
|
552
|
+
if (spinner) {
|
|
553
|
+
spinner.succeed(chalk.green("✅ Batch processing complete!"));
|
|
554
|
+
}
|
|
555
|
+
// Handle output with universal formatting
|
|
556
|
+
this.handleOutput(results, options);
|
|
557
|
+
process.exit(0);
|
|
558
|
+
}
|
|
559
|
+
catch (error) {
|
|
560
|
+
if (spinner) {
|
|
561
|
+
spinner.fail();
|
|
562
|
+
}
|
|
563
|
+
console.error(chalk.red(`❌ Batch processing failed: ${error.message}`));
|
|
564
|
+
if (options.debug) {
|
|
565
|
+
console.error(chalk.gray(error.stack));
|
|
566
|
+
}
|
|
567
|
+
process.exit(1);
|
|
568
|
+
}
|
|
569
|
+
}
|
|
570
|
+
/**
|
|
571
|
+
* Execute config export command
|
|
572
|
+
*/
|
|
573
|
+
static async executeConfigExport(argv) {
|
|
574
|
+
const options = this.processOptions(argv);
|
|
575
|
+
try {
|
|
576
|
+
const config = {
|
|
577
|
+
providers: {
|
|
578
|
+
openai: !!process.env.OPENAI_API_KEY,
|
|
579
|
+
bedrock: !!(process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY),
|
|
580
|
+
vertex: !!(process.env.GOOGLE_APPLICATION_CREDENTIALS ||
|
|
581
|
+
process.env.GOOGLE_SERVICE_ACCOUNT_KEY),
|
|
582
|
+
anthropic: !!process.env.ANTHROPIC_API_KEY,
|
|
583
|
+
azure: !!(process.env.AZURE_OPENAI_API_KEY &&
|
|
584
|
+
process.env.AZURE_OPENAI_ENDPOINT),
|
|
585
|
+
"google-ai": !!process.env.GOOGLE_AI_API_KEY,
|
|
586
|
+
},
|
|
587
|
+
defaults: {
|
|
588
|
+
temperature: 0.7,
|
|
589
|
+
maxTokens: 500,
|
|
590
|
+
},
|
|
591
|
+
timestamp: new Date().toISOString(),
|
|
592
|
+
};
|
|
593
|
+
this.handleOutput(config, options);
|
|
594
|
+
}
|
|
595
|
+
catch (error) {
|
|
596
|
+
console.error(chalk.red(`❌ Configuration export failed: ${error.message}`));
|
|
597
|
+
process.exit(1);
|
|
598
|
+
}
|
|
599
|
+
}
|
|
600
|
+
/**
|
|
601
|
+
* Execute get best provider command
|
|
602
|
+
*/
|
|
603
|
+
static async executeGetBestProvider(argv) {
|
|
604
|
+
const options = this.processOptions(argv);
|
|
605
|
+
try {
|
|
606
|
+
const { getBestProvider } = await import("../../lib/utils/providerUtils.js");
|
|
607
|
+
const bestProvider = await getBestProvider();
|
|
608
|
+
if (options.format === "json") {
|
|
609
|
+
this.handleOutput({ provider: bestProvider }, options);
|
|
610
|
+
}
|
|
611
|
+
else {
|
|
612
|
+
if (!options.quiet) {
|
|
613
|
+
console.log(chalk.green(`🎯 Best available provider: ${bestProvider}`));
|
|
614
|
+
}
|
|
615
|
+
else {
|
|
616
|
+
this.handleOutput(bestProvider, options);
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
}
|
|
620
|
+
catch (error) {
|
|
621
|
+
console.error(chalk.red(`❌ Provider selection failed: ${error.message}`));
|
|
622
|
+
process.exit(1);
|
|
623
|
+
}
|
|
624
|
+
}
|
|
625
|
+
/**
|
|
626
|
+
* Execute completion command
|
|
627
|
+
*/
|
|
628
|
+
static async executeCompletion(argv) {
|
|
629
|
+
// This would need to be implemented with the actual CLI instance
|
|
630
|
+
console.log("# Completion script would be generated here");
|
|
631
|
+
console.log("# This requires access to the yargs CLI instance");
|
|
632
|
+
}
|
|
633
|
+
}
|