cmdvault 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,314 @@
1
+ const https = require('https');
2
+ const OpenAI = require('openai');
3
+ const { GoogleGenerativeAI } = require('@google/generative-ai');
4
+
5
+ const SUPPORTED_PROVIDERS = {
6
+ 'openai': {
7
+ name: 'OpenAI (GPT-4, GPT-3.5)',
8
+ endpoint: 'api.openai.com',
9
+ path: '/v1/chat/completions',
10
+ models: ['gpt-4', 'gpt-3.5-turbo']
11
+ },
12
+ 'anthropic': {
13
+ name: 'Anthropic (Claude)',
14
+ endpoint: 'api.anthropic.com',
15
+ path: '/v1/messages',
16
+ models: ['claude-3-5-sonnet-20241022', 'claude-3-opus-20240229']
17
+ },
18
+ 'gemini': {
19
+ name: 'Google Gemini',
20
+ endpoint: 'generativelanguage.googleapis.com',
21
+ path: '/v1/models',
22
+ models: ['gemini-1.5-flash', 'gemini-1.5-pro']
23
+ },
24
+ 'groq': {
25
+ name: 'Groq (Fast Inference)',
26
+ endpoint: 'api.groq.com',
27
+ path: '/openai/v1/chat/completions',
28
+ models: ['llama-3.1-70b-versatile', 'mixtral-8x7b-32768']
29
+ },
30
+ 'openrouter': {
31
+ name: 'OpenRouter (Multiple Models)',
32
+ endpoint: 'openrouter.ai',
33
+ path: '/api/v1/chat/completions',
34
+ models: ['openai/gpt-4', 'anthropic/claude-3-opus']
35
+ }
36
+ };
37
+
38
+ function getSupportedProviders() {
39
+ return SUPPORTED_PROVIDERS;
40
+ }
41
+
42
+ function generateCommandPrompt(userInput) {
43
+ const platform = process.platform;
44
+ const shell = platform === 'win32' ? 'Windows Command Prompt/PowerShell' : 'Unix Shell (bash/zsh)';
45
+
46
+ return `You are a shell command expert. Generate a single, executable shell command for ${shell} based on the user's request.
47
+
48
+ User request: "${userInput}"
49
+
50
+ Rules:
51
+ 1. Return ONLY the command, no explanations or markdown
52
+ 2. Make it safe and commonly used
53
+ 3. Use standard tools available on ${platform}
54
+ 4. If multiple commands needed, chain them with && or ;
55
+ 5. Do not include any backticks, quotes around the entire command, or extra text
56
+
57
+ Command:`;
58
+ }
59
+
60
+ async function callOpenAI(apiKey, prompt, model = 'gpt-3.5-turbo') {
61
+ try {
62
+ const client = new OpenAI({
63
+ apiKey: apiKey
64
+ });
65
+
66
+ const response = await client.chat.completions.create({
67
+ model: model,
68
+ messages: [
69
+ {
70
+ role: 'system',
71
+ content: 'You are a shell command generator. Return only the command, nothing else.'
72
+ },
73
+ {
74
+ role: 'user',
75
+ content: prompt
76
+ }
77
+ ],
78
+ temperature: 0.3,
79
+ max_tokens: 150
80
+ });
81
+
82
+ const command = response.choices[0].message.content.trim();
83
+ return command;
84
+ } catch (error) {
85
+ throw new Error(error.message || 'OpenAI API error');
86
+ }
87
+ }
88
+
89
+ async function callGemini(apiKey, prompt, model = 'gemini-2.0-flash') {
90
+ try {
91
+ const genAI = new GoogleGenerativeAI(apiKey);
92
+ const geminiModel = genAI.getGenerativeModel({ model: model });
93
+
94
+ const fullPrompt = `You are a shell command generator. Return only the command, nothing else.
95
+
96
+ User request: ${prompt}
97
+
98
+ Command:`;
99
+
100
+ const result = await geminiModel.generateContent(fullPrompt);
101
+ const response = await result.response;
102
+ const command = response.text().trim();
103
+
104
+ return command;
105
+ } catch (error) {
106
+ throw new Error(error.message || 'Gemini API error');
107
+ }
108
+ }
109
+
110
+ async function callAnthropic(apiKey, prompt, model = 'claude-3-5-sonnet-20241022') {
111
+ const data = JSON.stringify({
112
+ model: model,
113
+ max_tokens: 150,
114
+ messages: [
115
+ {
116
+ role: 'user',
117
+ content: prompt
118
+ }
119
+ ]
120
+ });
121
+
122
+ return new Promise((resolve, reject) => {
123
+ const options = {
124
+ hostname: 'api.anthropic.com',
125
+ path: '/v1/messages',
126
+ method: 'POST',
127
+ headers: {
128
+ 'Content-Type': 'application/json',
129
+ 'x-api-key': apiKey,
130
+ 'anthropic-version': '2023-06-01',
131
+ 'Content-Length': data.length
132
+ }
133
+ };
134
+
135
+ const req = https.request(options, (res) => {
136
+ let body = '';
137
+
138
+ res.on('data', (chunk) => {
139
+ body += chunk;
140
+ });
141
+
142
+ res.on('end', () => {
143
+ try {
144
+ const response = JSON.parse(body);
145
+
146
+ if (response.error) {
147
+ reject(new Error(response.error.message || 'Anthropic API error'));
148
+ return;
149
+ }
150
+
151
+ const command = response.content[0].text.trim();
152
+ resolve(command);
153
+ } catch (error) {
154
+ reject(new Error(`Failed to parse response: ${error.message}`));
155
+ }
156
+ });
157
+ });
158
+
159
+ req.on('error', (error) => {
160
+ reject(error);
161
+ });
162
+
163
+ req.write(data);
164
+ req.end();
165
+ });
166
+ }
167
+
168
+ async function callGroq(apiKey, prompt, model = 'llama-3.1-70b-versatile') {
169
+ const data = JSON.stringify({
170
+ model: model,
171
+ messages: [
172
+ {
173
+ role: 'system',
174
+ content: 'You are a shell command generator. Return only the command, nothing else.'
175
+ },
176
+ {
177
+ role: 'user',
178
+ content: prompt
179
+ }
180
+ ],
181
+ temperature: 0.3,
182
+ max_tokens: 150
183
+ });
184
+
185
+ return new Promise((resolve, reject) => {
186
+ const options = {
187
+ hostname: 'api.groq.com',
188
+ path: '/openai/v1/chat/completions',
189
+ method: 'POST',
190
+ headers: {
191
+ 'Content-Type': 'application/json',
192
+ 'Authorization': `Bearer ${apiKey}`,
193
+ 'Content-Length': data.length
194
+ }
195
+ };
196
+
197
+ const req = https.request(options, (res) => {
198
+ let body = '';
199
+
200
+ res.on('data', (chunk) => {
201
+ body += chunk;
202
+ });
203
+
204
+ res.on('end', () => {
205
+ try {
206
+ const response = JSON.parse(body);
207
+
208
+ if (response.error) {
209
+ reject(new Error(response.error.message || 'Groq API error'));
210
+ return;
211
+ }
212
+
213
+ const command = response.choices[0].message.content.trim();
214
+ resolve(command);
215
+ } catch (error) {
216
+ reject(new Error(`Failed to parse response: ${error.message}`));
217
+ }
218
+ });
219
+ });
220
+
221
+ req.on('error', (error) => {
222
+ reject(error);
223
+ });
224
+
225
+ req.write(data);
226
+ req.end();
227
+ });
228
+ }
229
+
230
+ async function callOpenRouter(apiKey, prompt, model = 'openai/gpt-3.5-turbo') {
231
+ const data = JSON.stringify({
232
+ model: model,
233
+ messages: [
234
+ {
235
+ role: 'system',
236
+ content: 'You are a shell command generator. Return only the command, nothing else.'
237
+ },
238
+ {
239
+ role: 'user',
240
+ content: prompt
241
+ }
242
+ ],
243
+ temperature: 0.3,
244
+ max_tokens: 150
245
+ });
246
+
247
+ return new Promise((resolve, reject) => {
248
+ const options = {
249
+ hostname: 'openrouter.ai',
250
+ path: '/api/v1/chat/completions',
251
+ method: 'POST',
252
+ headers: {
253
+ 'Content-Type': 'application/json',
254
+ 'Authorization': `Bearer ${apiKey}`,
255
+ 'Content-Length': data.length
256
+ }
257
+ };
258
+
259
+ const req = https.request(options, (res) => {
260
+ let body = '';
261
+
262
+ res.on('data', (chunk) => {
263
+ body += chunk;
264
+ });
265
+
266
+ res.on('end', () => {
267
+ try {
268
+ const response = JSON.parse(body);
269
+
270
+ if (response.error) {
271
+ reject(new Error(response.error.message || 'OpenRouter API error'));
272
+ return;
273
+ }
274
+
275
+ const command = response.choices[0].message.content.trim();
276
+ resolve(command);
277
+ } catch (error) {
278
+ reject(new Error(`Failed to parse response: ${error.message}`));
279
+ }
280
+ });
281
+ });
282
+
283
+ req.on('error', (error) => {
284
+ reject(error);
285
+ });
286
+
287
+ req.write(data);
288
+ req.end();
289
+ });
290
+ }
291
+
292
+ async function generateCommand(providerName, apiKey, userInput, model = null) {
293
+ const prompt = generateCommandPrompt(userInput);
294
+
295
+ switch (providerName.toLowerCase()) {
296
+ case 'openai':
297
+ return await callOpenAI(apiKey, prompt, model || 'gpt-3.5-turbo');
298
+ case 'anthropic':
299
+ return await callAnthropic(apiKey, prompt, model || 'claude-3-5-sonnet-20241022');
300
+ case 'gemini':
301
+ return await callGemini(apiKey, prompt, model || 'gemini-2.0-flash');
302
+ case 'groq':
303
+ return await callGroq(apiKey, prompt, model || 'llama-3.1-70b-versatile');
304
+ case 'openrouter':
305
+ return await callOpenRouter(apiKey, prompt, model || 'openai/gpt-3.5-turbo');
306
+ default:
307
+ throw new Error(`Unsupported provider: ${providerName}`);
308
+ }
309
+ }
310
+
311
+ module.exports = {
312
+ getSupportedProviders,
313
+ generateCommand
314
+ };
package/lib/editor.js ADDED
@@ -0,0 +1,52 @@
1
+ const { spawnSync } = require('child_process');
2
+ const fs = require('fs');
3
+ const path = require('path');
4
+ const os = require('os');
5
+
6
+ function getEditor() {
7
+ return process.env.EDITOR || process.env.VISUAL || getDefaultEditor();
8
+ }
9
+
10
+ function getDefaultEditor() {
11
+ const platform = process.platform;
12
+
13
+ if (platform === 'win32') {
14
+ return 'notepad';
15
+ } else if (platform === 'darwin') {
16
+ return 'nano';
17
+ } else {
18
+ return 'nano';
19
+ }
20
+ }
21
+
22
+ function openEditor(content) {
23
+ const tmpFile = path.join(os.tmpdir(), `cm-edit-${Date.now()}.txt`);
24
+
25
+ try {
26
+ fs.writeFileSync(tmpFile, content);
27
+
28
+ const editor = getEditor();
29
+ const result = spawnSync(editor, [tmpFile], {
30
+ stdio: 'inherit'
31
+ });
32
+
33
+ if (result.error) {
34
+ throw result.error;
35
+ }
36
+
37
+ const editedContent = fs.readFileSync(tmpFile, 'utf8').trim();
38
+
39
+ fs.unlinkSync(tmpFile);
40
+
41
+ return editedContent;
42
+ } catch (error) {
43
+ if (fs.existsSync(tmpFile)) {
44
+ fs.unlinkSync(tmpFile);
45
+ }
46
+ throw error;
47
+ }
48
+ }
49
+
50
+ module.exports = {
51
+ openEditor
52
+ };
@@ -0,0 +1,37 @@
1
+ const { spawn } = require('child_process');
2
+ const chalk = require('chalk');
3
+
4
+ function executeCommand(command, options = {}) {
5
+ return new Promise((resolve, reject) => {
6
+ const isWindows = process.platform === 'win32';
7
+ const shell = isWindows ? 'cmd.exe' : '/bin/sh';
8
+ const shellFlag = isWindows ? '/c' : '-c';
9
+
10
+ const child = spawn(shell, [shellFlag, command], {
11
+ stdio: 'inherit',
12
+ cwd: options.cwd || process.cwd(),
13
+ env: process.env
14
+ });
15
+
16
+ child.on('error', (error) => {
17
+ reject(error);
18
+ });
19
+
20
+ child.on('close', (code) => {
21
+ if (code === 0) {
22
+ resolve(code);
23
+ } else {
24
+ reject(new Error(`Command exited with code ${code}`));
25
+ }
26
+ });
27
+ });
28
+ }
29
+
30
+ function printExecutionMessage(command) {
31
+ console.log(chalk.cyan(`[CM] Executed: ${command}`));
32
+ }
33
+
34
+ module.exports = {
35
+ executeCommand,
36
+ printExecutionMessage
37
+ };
package/lib/storage.js ADDED
@@ -0,0 +1,73 @@
1
+ const fs = require('fs');
2
+ const path = require('path');
3
+ const os = require('os');
4
+
5
+ const KI_DIR = path.join(os.homedir(), '.cmvault');
6
+ const COMMANDS_FILE = path.join(KI_DIR, 'commands.json');
7
+
8
+ function ensureKiDirectory() {
9
+ if (!fs.existsSync(KI_DIR)) {
10
+ fs.mkdirSync(KI_DIR, { recursive: true });
11
+ }
12
+ }
13
+
14
+ function ensureCommandsFile() {
15
+ ensureKiDirectory();
16
+ if (!fs.existsSync(COMMANDS_FILE)) {
17
+ fs.writeFileSync(COMMANDS_FILE, JSON.stringify({}, null, 2));
18
+ }
19
+ }
20
+
21
+ function loadCommands() {
22
+ ensureCommandsFile();
23
+ try {
24
+ const data = fs.readFileSync(COMMANDS_FILE, 'utf8');
25
+ return JSON.parse(data);
26
+ } catch (error) {
27
+ return {};
28
+ }
29
+ }
30
+
31
+ function saveCommands(commands) {
32
+ ensureCommandsFile();
33
+ fs.writeFileSync(COMMANDS_FILE, JSON.stringify(commands, null, 2));
34
+ }
35
+
36
+ function getCommand(key) {
37
+ const commands = loadCommands();
38
+ return commands[key];
39
+ }
40
+
41
+ function setCommand(key, command) {
42
+ const commands = loadCommands();
43
+ commands[key] = command;
44
+ saveCommands(commands);
45
+ }
46
+
47
+ function deleteCommand(key) {
48
+ const commands = loadCommands();
49
+ if (commands[key]) {
50
+ delete commands[key];
51
+ saveCommands(commands);
52
+ return true;
53
+ }
54
+ return false;
55
+ }
56
+
57
+ function getAllCommands() {
58
+ return loadCommands();
59
+ }
60
+
61
+ function commandExists(key) {
62
+ const commands = loadCommands();
63
+ return key in commands;
64
+ }
65
+
66
+ module.exports = {
67
+ getCommand,
68
+ setCommand,
69
+ deleteCommand,
70
+ getAllCommands,
71
+ commandExists,
72
+ COMMANDS_FILE
73
+ };
package/package.json ADDED
@@ -0,0 +1,36 @@
1
+ {
2
+ "name": "cmdvault",
3
+ "version": "1.0.0",
4
+ "description": "CmdVault - A smart terminal assistant that remembers and executes shell commands",
5
+ "main": "index.js",
6
+ "bin": {
7
+ "cm": "bin/cm.js"
8
+ },
9
+ "scripts": {
10
+ "test": "echo \"Error: no test specified\" && exit 1"
11
+ },
12
+ "keywords": [
13
+ "cli",
14
+ "terminal",
15
+ "command",
16
+ "assistant",
17
+ "shell",
18
+ "productivity"
19
+ ],
20
+ "author": "Kiran Vaddi <kiranvaddi3008@gmail.com>",
21
+ "repository": {
22
+ "type": "git",
23
+ "url": "git+https://github.com/vaddisiva5034/cmdvault.git"
24
+ },
25
+ "license": "MIT",
26
+ "dependencies": {
27
+ "@google/generative-ai": "^0.24.1",
28
+ "chalk": "^4.1.2",
29
+ "cli-table3": "^0.6.3",
30
+ "commander": "^11.1.0",
31
+ "openai": "^4.20.1"
32
+ },
33
+ "engines": {
34
+ "node": ">=14.0.0"
35
+ }
36
+ }