@nclamvn/vibecode-cli 2.2.0 → 3.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/vibecode.js +101 -2
- package/package.json +3 -1
- package/src/commands/config.js +42 -4
- package/src/commands/deploy.js +728 -0
- package/src/commands/favorite.js +412 -0
- package/src/commands/feedback.js +473 -0
- package/src/commands/go.js +170 -4
- package/src/commands/history.js +249 -0
- package/src/commands/images.js +465 -0
- package/src/commands/preview.js +554 -0
- package/src/commands/voice.js +580 -0
- package/src/commands/watch.js +3 -20
- package/src/index.js +49 -2
- package/src/services/image-service.js +513 -0
- package/src/utils/history.js +357 -0
- package/src/utils/notifications.js +343 -0
|
@@ -0,0 +1,580 @@
|
|
|
1
|
+
// ═══════════════════════════════════════════════════════════════════════════════
|
|
2
|
+
// VIBECODE CLI - Voice Command
|
|
3
|
+
// Voice-controlled commands - hands-free coding
|
|
4
|
+
// ═══════════════════════════════════════════════════════════════════════════════
|
|
5
|
+
|
|
6
|
+
import { spawn, exec } from 'child_process';
|
|
7
|
+
import { promisify } from 'util';
|
|
8
|
+
import fs from 'fs/promises';
|
|
9
|
+
import path from 'path';
|
|
10
|
+
import chalk from 'chalk';
|
|
11
|
+
import inquirer from 'inquirer';
|
|
12
|
+
import readline from 'readline';
|
|
13
|
+
|
|
14
|
+
const execAsync = promisify(exec);
|
|
15
|
+
|
|
16
|
+
// Voice recognition methods
|
|
17
|
+
const VOICE_METHODS = {
|
|
18
|
+
macos: {
|
|
19
|
+
name: 'macOS Dictation',
|
|
20
|
+
available: process.platform === 'darwin',
|
|
21
|
+
description: 'Built-in macOS speech recognition'
|
|
22
|
+
},
|
|
23
|
+
whisper: {
|
|
24
|
+
name: 'OpenAI Whisper',
|
|
25
|
+
available: true,
|
|
26
|
+
description: 'Cloud-based speech recognition (requires API key)'
|
|
27
|
+
},
|
|
28
|
+
text: {
|
|
29
|
+
name: 'Text Input',
|
|
30
|
+
available: true,
|
|
31
|
+
description: 'Type commands as fallback'
|
|
32
|
+
}
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Voice command entry point
|
|
37
|
+
*/
|
|
38
|
+
export async function voiceCommand(subcommand, options = {}) {
|
|
39
|
+
console.log(chalk.cyan(`
|
|
40
|
+
╭────────────────────────────────────────────────────────────────────╮
|
|
41
|
+
│ 🎤 VIBECODE VOICE MODE │
|
|
42
|
+
│ │
|
|
43
|
+
│ Speak your commands - hands-free coding! │
|
|
44
|
+
│ │
|
|
45
|
+
╰────────────────────────────────────────────────────────────────────╯
|
|
46
|
+
`));
|
|
47
|
+
|
|
48
|
+
// Check available methods
|
|
49
|
+
const method = await selectVoiceMethod(options);
|
|
50
|
+
if (!method) return;
|
|
51
|
+
|
|
52
|
+
// If subcommand specified, do one-shot
|
|
53
|
+
if (subcommand) {
|
|
54
|
+
return oneShotVoice(subcommand, method, options);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Interactive voice mode
|
|
58
|
+
return interactiveVoice(method, options);
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Select voice recognition method
|
|
63
|
+
*/
|
|
64
|
+
async function selectVoiceMethod(options) {
|
|
65
|
+
// If specified in options
|
|
66
|
+
if (options.whisper) return 'whisper';
|
|
67
|
+
if (options.macos) return 'macos';
|
|
68
|
+
if (options.text) return 'text';
|
|
69
|
+
|
|
70
|
+
// Check what's available
|
|
71
|
+
const available = [];
|
|
72
|
+
|
|
73
|
+
if (process.platform === 'darwin') {
|
|
74
|
+
available.push({
|
|
75
|
+
name: `🍎 macOS Dictation ${chalk.gray('(Built-in, Free)')}`,
|
|
76
|
+
value: 'macos'
|
|
77
|
+
});
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// Check for Whisper API key
|
|
81
|
+
if (process.env.OPENAI_API_KEY) {
|
|
82
|
+
available.push({
|
|
83
|
+
name: `🤖 OpenAI Whisper ${chalk.gray('(Cloud, API Key set)')}`,
|
|
84
|
+
value: 'whisper'
|
|
85
|
+
});
|
|
86
|
+
} else {
|
|
87
|
+
available.push({
|
|
88
|
+
name: `🤖 OpenAI Whisper ${chalk.yellow('(Requires OPENAI_API_KEY)')}`,
|
|
89
|
+
value: 'whisper'
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
available.push({
|
|
94
|
+
name: `⌨️ Text Input ${chalk.gray('(Type commands)')}`,
|
|
95
|
+
value: 'text'
|
|
96
|
+
});
|
|
97
|
+
|
|
98
|
+
if (available.length === 1) {
|
|
99
|
+
return available[0].value;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
const { method } = await inquirer.prompt([{
|
|
103
|
+
type: 'list',
|
|
104
|
+
name: 'method',
|
|
105
|
+
message: 'Select input method:',
|
|
106
|
+
choices: available
|
|
107
|
+
}]);
|
|
108
|
+
|
|
109
|
+
return method;
|
|
110
|
+
}
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* One-shot voice command (e.g., vibecode voice go)
|
|
114
|
+
*/
|
|
115
|
+
async function oneShotVoice(subcommand, method, options) {
|
|
116
|
+
console.log(chalk.yellow(`\n 🎤 Listening for "${subcommand}" command...\n`));
|
|
117
|
+
|
|
118
|
+
if (method === 'text') {
|
|
119
|
+
console.log(chalk.gray(' Type your description:\n'));
|
|
120
|
+
} else {
|
|
121
|
+
console.log(chalk.gray(' Speak now, or type as fallback. Press Enter when done.\n'));
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
const transcript = await listen(method, options);
|
|
125
|
+
|
|
126
|
+
if (!transcript) {
|
|
127
|
+
console.log(chalk.red(' ❌ No input detected.\n'));
|
|
128
|
+
return;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
console.log(chalk.green(`\n 📝 Input: "${transcript}"\n`));
|
|
132
|
+
|
|
133
|
+
// Build command
|
|
134
|
+
const fullCommand = `vibecode ${subcommand} "${transcript}"`;
|
|
135
|
+
|
|
136
|
+
console.log(chalk.cyan(` 🚀 Command: ${fullCommand}\n`));
|
|
137
|
+
|
|
138
|
+
if (options.auto) {
|
|
139
|
+
// Auto-execute
|
|
140
|
+
await executeCommand(fullCommand);
|
|
141
|
+
} else {
|
|
142
|
+
// Confirm first
|
|
143
|
+
const { execute } = await inquirer.prompt([{
|
|
144
|
+
type: 'confirm',
|
|
145
|
+
name: 'execute',
|
|
146
|
+
message: 'Execute this command?',
|
|
147
|
+
default: true
|
|
148
|
+
}]);
|
|
149
|
+
|
|
150
|
+
if (execute) {
|
|
151
|
+
await executeCommand(fullCommand);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
/**
|
|
157
|
+
* Interactive voice mode
|
|
158
|
+
*/
|
|
159
|
+
async function interactiveVoice(method, options) {
|
|
160
|
+
console.log(chalk.white(' Voice Commands:\n'));
|
|
161
|
+
console.log(chalk.gray(' • "go <description>" Create a new project'));
|
|
162
|
+
console.log(chalk.gray(' • "agent <description>" Multi-module build'));
|
|
163
|
+
console.log(chalk.gray(' • "debug" Debug current project'));
|
|
164
|
+
console.log(chalk.gray(' • "preview" Start preview server'));
|
|
165
|
+
console.log(chalk.gray(' • "deploy" Deploy project'));
|
|
166
|
+
console.log(chalk.gray(' • "feedback" Enter feedback mode'));
|
|
167
|
+
console.log(chalk.gray(' • "help" Show all commands'));
|
|
168
|
+
console.log(chalk.gray(' • "exit" or "quit" Stop voice mode\n'));
|
|
169
|
+
|
|
170
|
+
const rl = readline.createInterface({
|
|
171
|
+
input: process.stdin,
|
|
172
|
+
output: process.stdout,
|
|
173
|
+
prompt: chalk.magenta('🎤 voice> ')
|
|
174
|
+
});
|
|
175
|
+
|
|
176
|
+
console.log(chalk.yellow(' Press Enter to start listening, or type directly...\n'));
|
|
177
|
+
rl.prompt();
|
|
178
|
+
|
|
179
|
+
rl.on('line', async (line) => {
|
|
180
|
+
const directInput = line.trim();
|
|
181
|
+
|
|
182
|
+
let transcript;
|
|
183
|
+
|
|
184
|
+
if (directInput) {
|
|
185
|
+
// User typed something directly
|
|
186
|
+
transcript = directInput;
|
|
187
|
+
} else {
|
|
188
|
+
// Start listening
|
|
189
|
+
console.log(chalk.yellow('\n 🎤 Listening... (speak or type)\n'));
|
|
190
|
+
transcript = await listen(method, { timeout: options.timeout || 10 });
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
if (!transcript) {
|
|
194
|
+
console.log(chalk.gray(' No input detected. Try again.\n'));
|
|
195
|
+
rl.prompt();
|
|
196
|
+
return;
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
console.log(chalk.green(` 📝 Input: "${transcript}"\n`));
|
|
200
|
+
|
|
201
|
+
// Parse command
|
|
202
|
+
const command = parseVoiceCommand(transcript);
|
|
203
|
+
|
|
204
|
+
if (command.type === 'exit') {
|
|
205
|
+
console.log(chalk.cyan('\n 👋 Voice mode ended.\n'));
|
|
206
|
+
rl.close();
|
|
207
|
+
return;
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
if (command.type === 'help') {
|
|
211
|
+
showVoiceHelp();
|
|
212
|
+
rl.prompt();
|
|
213
|
+
return;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
if (command.fullCommand) {
|
|
217
|
+
console.log(chalk.cyan(` 🚀 Executing: ${command.fullCommand}\n`));
|
|
218
|
+
|
|
219
|
+
if (options.auto) {
|
|
220
|
+
await executeCommand(command.fullCommand);
|
|
221
|
+
} else {
|
|
222
|
+
const { execute } = await inquirer.prompt([{
|
|
223
|
+
type: 'confirm',
|
|
224
|
+
name: 'execute',
|
|
225
|
+
message: 'Execute?',
|
|
226
|
+
default: true
|
|
227
|
+
}]);
|
|
228
|
+
|
|
229
|
+
if (execute) {
|
|
230
|
+
await executeCommand(command.fullCommand);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
} else {
|
|
234
|
+
console.log(chalk.yellow(` ⚠️ Couldn't understand: "${transcript}"`));
|
|
235
|
+
console.log(chalk.gray(' Try: "go <description>", "debug", "preview", etc.\n'));
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
console.log('');
|
|
239
|
+
rl.prompt();
|
|
240
|
+
});
|
|
241
|
+
|
|
242
|
+
rl.on('close', () => {
|
|
243
|
+
process.exit(0);
|
|
244
|
+
});
|
|
245
|
+
|
|
246
|
+
// Handle Ctrl+C
|
|
247
|
+
process.on('SIGINT', () => {
|
|
248
|
+
console.log(chalk.cyan('\n\n 👋 Voice mode ended.\n'));
|
|
249
|
+
rl.close();
|
|
250
|
+
});
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
/**
|
|
254
|
+
* Parse voice command into executable command
|
|
255
|
+
*/
|
|
256
|
+
function parseVoiceCommand(transcript) {
|
|
257
|
+
const lower = transcript.toLowerCase().trim();
|
|
258
|
+
|
|
259
|
+
// Exit commands
|
|
260
|
+
if (['exit', 'quit', 'stop', 'bye', 'goodbye', 'done'].some(w => lower === w || lower === `say ${w}`)) {
|
|
261
|
+
return { type: 'exit' };
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// Help
|
|
265
|
+
if (lower === 'help' || lower.includes('what can you do')) {
|
|
266
|
+
return { type: 'help' };
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
// Go command - various phrasings
|
|
270
|
+
const goPatterns = [
|
|
271
|
+
/^go\s+(.+)$/i,
|
|
272
|
+
/^create\s+(.+)$/i,
|
|
273
|
+
/^build\s+(.+)$/i,
|
|
274
|
+
/^make\s+(.+)$/i,
|
|
275
|
+
/^new\s+(.+)$/i,
|
|
276
|
+
/^generate\s+(.+)$/i
|
|
277
|
+
];
|
|
278
|
+
|
|
279
|
+
for (const pattern of goPatterns) {
|
|
280
|
+
const match = transcript.match(pattern);
|
|
281
|
+
if (match) {
|
|
282
|
+
const description = match[1].trim();
|
|
283
|
+
return { type: 'go', fullCommand: `vibecode go "${description}"` };
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
// Agent command
|
|
288
|
+
const agentMatch = transcript.match(/^agent\s+(.+)$/i);
|
|
289
|
+
if (agentMatch) {
|
|
290
|
+
const description = agentMatch[1].trim();
|
|
291
|
+
return { type: 'agent', fullCommand: `vibecode agent "${description}" --new` };
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
// Simple commands
|
|
295
|
+
const simpleCommands = {
|
|
296
|
+
debug: 'vibecode debug --auto',
|
|
297
|
+
status: 'vibecode status',
|
|
298
|
+
preview: 'vibecode preview',
|
|
299
|
+
deploy: 'vibecode deploy',
|
|
300
|
+
templates: 'vibecode templates',
|
|
301
|
+
template: 'vibecode templates',
|
|
302
|
+
feedback: 'vibecode feedback --preview',
|
|
303
|
+
images: 'vibecode images',
|
|
304
|
+
doctor: 'vibecode doctor',
|
|
305
|
+
undo: 'vibecode undo',
|
|
306
|
+
learn: 'vibecode learn --stats'
|
|
307
|
+
};
|
|
308
|
+
|
|
309
|
+
for (const [keyword, command] of Object.entries(simpleCommands)) {
|
|
310
|
+
if (lower === keyword || lower.includes(keyword)) {
|
|
311
|
+
return { type: keyword, fullCommand: command };
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
// Template with name
|
|
316
|
+
const templateMatch = transcript.match(/template\s+(\w+)/i);
|
|
317
|
+
if (templateMatch) {
|
|
318
|
+
const templateId = templateMatch[1].toLowerCase();
|
|
319
|
+
return { type: 'template', fullCommand: `vibecode go --template ${templateId}` };
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
// If long enough, interpret as go command
|
|
323
|
+
if (lower.length > 15 && !lower.includes('vibecode')) {
|
|
324
|
+
return { type: 'go', fullCommand: `vibecode go "${transcript}"` };
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
return { type: 'unknown' };
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
/**
|
|
331
|
+
* Listen for voice input
|
|
332
|
+
*/
|
|
333
|
+
async function listen(method, options = {}) {
|
|
334
|
+
const timeout = parseInt(options.timeout) || 10;
|
|
335
|
+
|
|
336
|
+
if (method === 'text') {
|
|
337
|
+
return listenText(timeout);
|
|
338
|
+
} else if (method === 'macos') {
|
|
339
|
+
return listenMacOS(timeout);
|
|
340
|
+
} else if (method === 'whisper') {
|
|
341
|
+
return listenWhisper(timeout, options);
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
return listenText(timeout);
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
/**
|
|
348
|
+
* Text input (fallback)
|
|
349
|
+
*/
|
|
350
|
+
async function listenText(timeout) {
|
|
351
|
+
return new Promise((resolve) => {
|
|
352
|
+
const rl = readline.createInterface({
|
|
353
|
+
input: process.stdin,
|
|
354
|
+
output: process.stdout
|
|
355
|
+
});
|
|
356
|
+
|
|
357
|
+
let resolved = false;
|
|
358
|
+
|
|
359
|
+
const timer = setTimeout(() => {
|
|
360
|
+
if (!resolved) {
|
|
361
|
+
resolved = true;
|
|
362
|
+
rl.close();
|
|
363
|
+
resolve(null);
|
|
364
|
+
}
|
|
365
|
+
}, timeout * 1000);
|
|
366
|
+
|
|
367
|
+
rl.question(chalk.gray(' > '), (answer) => {
|
|
368
|
+
if (!resolved) {
|
|
369
|
+
resolved = true;
|
|
370
|
+
clearTimeout(timer);
|
|
371
|
+
rl.close();
|
|
372
|
+
resolve(answer.trim() || null);
|
|
373
|
+
}
|
|
374
|
+
});
|
|
375
|
+
});
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
/**
|
|
379
|
+
* macOS speech recognition
|
|
380
|
+
*/
|
|
381
|
+
async function listenMacOS(timeout) {
|
|
382
|
+
// Check if sox is available for recording
|
|
383
|
+
try {
|
|
384
|
+
await execAsync('which sox');
|
|
385
|
+
} catch {
|
|
386
|
+
console.log(chalk.yellow(' ⚠️ sox not installed. Using text input.'));
|
|
387
|
+
console.log(chalk.gray(' Install with: brew install sox\n'));
|
|
388
|
+
return listenText(timeout);
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
console.log(chalk.gray(` [Recording for ${timeout} seconds...]\n`));
|
|
392
|
+
|
|
393
|
+
const audioFile = `/tmp/vibecode-voice-${Date.now()}.wav`;
|
|
394
|
+
|
|
395
|
+
try {
|
|
396
|
+
// Record audio with sox
|
|
397
|
+
await execAsync(`sox -d -r 16000 -c 1 -b 16 "${audioFile}" trim 0 ${timeout} 2>/dev/null`, {
|
|
398
|
+
timeout: (timeout + 5) * 1000
|
|
399
|
+
});
|
|
400
|
+
|
|
401
|
+
// Check if we have Whisper API key for transcription
|
|
402
|
+
if (process.env.OPENAI_API_KEY) {
|
|
403
|
+
const transcript = await transcribeWithWhisper(audioFile, process.env.OPENAI_API_KEY);
|
|
404
|
+
await fs.unlink(audioFile).catch(() => {});
|
|
405
|
+
return transcript;
|
|
406
|
+
} else {
|
|
407
|
+
console.log(chalk.yellow(' ⚠️ No OPENAI_API_KEY for transcription.'));
|
|
408
|
+
await fs.unlink(audioFile).catch(() => {});
|
|
409
|
+
return listenText(timeout);
|
|
410
|
+
}
|
|
411
|
+
} catch (error) {
|
|
412
|
+
console.log(chalk.yellow(' ⚠️ Recording failed. Using text input.'));
|
|
413
|
+
await fs.unlink(audioFile).catch(() => {});
|
|
414
|
+
return listenText(timeout);
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
/**
|
|
419
|
+
* OpenAI Whisper transcription
|
|
420
|
+
*/
|
|
421
|
+
async function listenWhisper(timeout, options) {
|
|
422
|
+
const apiKey = process.env.OPENAI_API_KEY;
|
|
423
|
+
|
|
424
|
+
if (!apiKey) {
|
|
425
|
+
console.log(chalk.yellow(' ⚠️ OPENAI_API_KEY not set.'));
|
|
426
|
+
console.log(chalk.gray(' Set it with: export OPENAI_API_KEY=your-key\n'));
|
|
427
|
+
return listenText(timeout);
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
// Check for sox
|
|
431
|
+
try {
|
|
432
|
+
await execAsync('which sox');
|
|
433
|
+
} catch {
|
|
434
|
+
console.log(chalk.yellow(' ⚠️ sox not installed for audio recording.'));
|
|
435
|
+
console.log(chalk.gray(' Install with: brew install sox\n'));
|
|
436
|
+
return listenText(timeout);
|
|
437
|
+
}
|
|
438
|
+
|
|
439
|
+
const audioFile = `/tmp/vibecode-voice-${Date.now()}.wav`;
|
|
440
|
+
|
|
441
|
+
console.log(chalk.gray(` [Recording for ${timeout} seconds...]\n`));
|
|
442
|
+
|
|
443
|
+
try {
|
|
444
|
+
// Record audio
|
|
445
|
+
await execAsync(`sox -d -r 16000 -c 1 -b 16 "${audioFile}" trim 0 ${timeout} 2>/dev/null`, {
|
|
446
|
+
timeout: (timeout + 5) * 1000
|
|
447
|
+
});
|
|
448
|
+
|
|
449
|
+
// Transcribe with Whisper
|
|
450
|
+
const transcript = await transcribeWithWhisper(audioFile, apiKey);
|
|
451
|
+
|
|
452
|
+
// Cleanup
|
|
453
|
+
await fs.unlink(audioFile).catch(() => {});
|
|
454
|
+
|
|
455
|
+
return transcript;
|
|
456
|
+
|
|
457
|
+
} catch (error) {
|
|
458
|
+
console.log(chalk.yellow(` ⚠️ Recording/transcription failed: ${error.message}`));
|
|
459
|
+
await fs.unlink(audioFile).catch(() => {});
|
|
460
|
+
return listenText(timeout);
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
/**
|
|
465
|
+
* Transcribe audio file with OpenAI Whisper API
|
|
466
|
+
*/
|
|
467
|
+
async function transcribeWithWhisper(audioFile, apiKey) {
|
|
468
|
+
try {
|
|
469
|
+
// Read audio file
|
|
470
|
+
const audioBuffer = await fs.readFile(audioFile);
|
|
471
|
+
|
|
472
|
+
// Create form data manually (avoid extra dependencies)
|
|
473
|
+
const boundary = '----VibecodeFormBoundary' + Date.now();
|
|
474
|
+
const fileName = path.basename(audioFile);
|
|
475
|
+
|
|
476
|
+
const header = [
|
|
477
|
+
`--${boundary}`,
|
|
478
|
+
`Content-Disposition: form-data; name="file"; filename="${fileName}"`,
|
|
479
|
+
'Content-Type: audio/wav',
|
|
480
|
+
'',
|
|
481
|
+
''
|
|
482
|
+
].join('\r\n');
|
|
483
|
+
|
|
484
|
+
const modelPart = [
|
|
485
|
+
'',
|
|
486
|
+
`--${boundary}`,
|
|
487
|
+
'Content-Disposition: form-data; name="model"',
|
|
488
|
+
'',
|
|
489
|
+
'whisper-1',
|
|
490
|
+
`--${boundary}--`,
|
|
491
|
+
''
|
|
492
|
+
].join('\r\n');
|
|
493
|
+
|
|
494
|
+
const headerBuffer = Buffer.from(header, 'utf-8');
|
|
495
|
+
const modelBuffer = Buffer.from(modelPart, 'utf-8');
|
|
496
|
+
const body = Buffer.concat([headerBuffer, audioBuffer, modelBuffer]);
|
|
497
|
+
|
|
498
|
+
// Make request
|
|
499
|
+
const response = await fetch('https://api.openai.com/v1/audio/transcriptions', {
|
|
500
|
+
method: 'POST',
|
|
501
|
+
headers: {
|
|
502
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
503
|
+
'Content-Type': `multipart/form-data; boundary=${boundary}`
|
|
504
|
+
},
|
|
505
|
+
body
|
|
506
|
+
});
|
|
507
|
+
|
|
508
|
+
if (!response.ok) {
|
|
509
|
+
const errorText = await response.text();
|
|
510
|
+
throw new Error(`Whisper API error: ${response.status} - ${errorText}`);
|
|
511
|
+
}
|
|
512
|
+
|
|
513
|
+
const result = await response.json();
|
|
514
|
+
return result.text || null;
|
|
515
|
+
|
|
516
|
+
} catch (error) {
|
|
517
|
+
console.log(chalk.yellow(` ⚠️ Whisper transcription failed: ${error.message}`));
|
|
518
|
+
return null;
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
/**
|
|
523
|
+
* Execute a command
|
|
524
|
+
*/
|
|
525
|
+
async function executeCommand(command) {
|
|
526
|
+
return new Promise((resolve) => {
|
|
527
|
+
const child = spawn('sh', ['-c', command], {
|
|
528
|
+
stdio: 'inherit',
|
|
529
|
+
shell: true
|
|
530
|
+
});
|
|
531
|
+
|
|
532
|
+
child.on('close', (code) => {
|
|
533
|
+
resolve(code);
|
|
534
|
+
});
|
|
535
|
+
|
|
536
|
+
child.on('error', (error) => {
|
|
537
|
+
console.log(chalk.red(` ❌ Execution failed: ${error.message}`));
|
|
538
|
+
resolve(1);
|
|
539
|
+
});
|
|
540
|
+
});
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
/**
|
|
544
|
+
* Show voice help
|
|
545
|
+
*/
|
|
546
|
+
function showVoiceHelp() {
|
|
547
|
+
console.log(chalk.cyan(`
|
|
548
|
+
🎤 Voice Commands:
|
|
549
|
+
─────────────────────────────────────────────────────────────────
|
|
550
|
+
|
|
551
|
+
${chalk.white('Project Creation:')}
|
|
552
|
+
"go <description>" Create new project
|
|
553
|
+
"create <description>" Same as go
|
|
554
|
+
"agent <description>" Multi-module build
|
|
555
|
+
"template <id>" Use a template
|
|
556
|
+
|
|
557
|
+
${chalk.white('Project Management:')}
|
|
558
|
+
"debug" Debug current project
|
|
559
|
+
"status" Show project status
|
|
560
|
+
"preview" Start preview server
|
|
561
|
+
"deploy" Deploy project
|
|
562
|
+
"feedback" Interactive feedback mode
|
|
563
|
+
"images" Generate images
|
|
564
|
+
"undo" Undo last change
|
|
565
|
+
|
|
566
|
+
${chalk.white('Navigation:')}
|
|
567
|
+
"templates" Browse templates
|
|
568
|
+
"doctor" Check configuration
|
|
569
|
+
|
|
570
|
+
${chalk.white('Session:')}
|
|
571
|
+
"help" Show this help
|
|
572
|
+
"exit" / "quit" End voice mode
|
|
573
|
+
|
|
574
|
+
${chalk.gray('Examples:')}
|
|
575
|
+
${chalk.gray('"go landing page for my SaaS startup"')}
|
|
576
|
+
${chalk.gray('"create e-commerce site with dark theme"')}
|
|
577
|
+
${chalk.gray('"template landing-saas"')}
|
|
578
|
+
${chalk.gray('"deploy"')}
|
|
579
|
+
`));
|
|
580
|
+
}
|
package/src/commands/watch.js
CHANGED
|
@@ -10,6 +10,7 @@ import { promisify } from 'util';
|
|
|
10
10
|
import path from 'path';
|
|
11
11
|
import fs from 'fs/promises';
|
|
12
12
|
import readline from 'readline';
|
|
13
|
+
import { notify as sendNotification, notifyError, notifySuccess } from '../utils/notifications.js';
|
|
13
14
|
|
|
14
15
|
const execAsync = promisify(exec);
|
|
15
16
|
|
|
@@ -123,9 +124,9 @@ export async function watchCommand(options) {
|
|
|
123
124
|
|
|
124
125
|
// Notify if enabled
|
|
125
126
|
if (options.notify && state.errors.length > 0) {
|
|
126
|
-
|
|
127
|
+
notifyError(`${state.errors.length} errors found`, 'Watch Mode');
|
|
127
128
|
} else if (options.notify && state.errors.length === 0) {
|
|
128
|
-
|
|
129
|
+
notifySuccess('All checks passed!', 'Watch Mode');
|
|
129
130
|
}
|
|
130
131
|
|
|
131
132
|
state.running = false;
|
|
@@ -535,22 +536,4 @@ function setupKeyboardShortcuts(state, checks, cwd, watchDirs, watcher, runCheck
|
|
|
535
536
|
});
|
|
536
537
|
}
|
|
537
538
|
|
|
538
|
-
/**
|
|
539
|
-
* Send desktop notification
|
|
540
|
-
*/
|
|
541
|
-
function notify(title, message) {
|
|
542
|
-
const platform = process.platform;
|
|
543
|
-
|
|
544
|
-
try {
|
|
545
|
-
if (platform === 'darwin') {
|
|
546
|
-
exec(`osascript -e 'display notification "${message}" with title "${title}"'`);
|
|
547
|
-
} else if (platform === 'linux') {
|
|
548
|
-
exec(`notify-send "${title}" "${message}"`);
|
|
549
|
-
}
|
|
550
|
-
// Windows would need different approach (powershell or node-notifier)
|
|
551
|
-
} catch {
|
|
552
|
-
// Silently fail - notifications are optional
|
|
553
|
-
}
|
|
554
|
-
}
|
|
555
|
-
|
|
556
539
|
export default watchCommand;
|
package/src/index.js
CHANGED
|
@@ -17,7 +17,7 @@ export { reviewCommand } from './commands/review.js';
|
|
|
17
17
|
export { snapshotCommand } from './commands/snapshot.js';
|
|
18
18
|
|
|
19
19
|
// Phase C Commands
|
|
20
|
-
export { configCommand } from './commands/config.js';
|
|
20
|
+
export { configCommand, getNotificationsSetting } from './commands/config.js';
|
|
21
21
|
|
|
22
22
|
// Phase E Commands - Magic Mode
|
|
23
23
|
export { goCommand } from './commands/go.js';
|
|
@@ -58,7 +58,7 @@ export { securityCommand } from './commands/security.js';
|
|
|
58
58
|
export { askCommand } from './commands/ask.js';
|
|
59
59
|
export { migrateCommand } from './commands/migrate.js';
|
|
60
60
|
|
|
61
|
-
// Phase M Commands - Templates
|
|
61
|
+
// Phase M Commands - Templates & Preview
|
|
62
62
|
export { templatesCommand } from './commands/templates.js';
|
|
63
63
|
export {
|
|
64
64
|
TEMPLATES,
|
|
@@ -71,6 +71,30 @@ export {
|
|
|
71
71
|
isValidTemplate
|
|
72
72
|
} from './templates/index.js';
|
|
73
73
|
|
|
74
|
+
export { previewCommand, autoPreview } from './commands/preview.js';
|
|
75
|
+
export { imagesCommand, autoGenerateImages } from './commands/images.js';
|
|
76
|
+
export { deployCommand, autoDeploy } from './commands/deploy.js';
|
|
77
|
+
export { feedbackCommand, startFeedbackMode } from './commands/feedback.js';
|
|
78
|
+
export { voiceCommand } from './commands/voice.js';
|
|
79
|
+
|
|
80
|
+
// Phase M8 Commands - History & Favorites
|
|
81
|
+
export { historyCommand } from './commands/history.js';
|
|
82
|
+
export { favoriteCommand } from './commands/favorite.js';
|
|
83
|
+
export {
|
|
84
|
+
addToHistory,
|
|
85
|
+
loadHistory,
|
|
86
|
+
clearHistory,
|
|
87
|
+
searchHistory,
|
|
88
|
+
getHistoryItem,
|
|
89
|
+
loadFavorites,
|
|
90
|
+
addFavorite,
|
|
91
|
+
removeFavorite,
|
|
92
|
+
getFavorite,
|
|
93
|
+
searchFavorites,
|
|
94
|
+
exportFavorites,
|
|
95
|
+
importFavorites
|
|
96
|
+
} from './utils/history.js';
|
|
97
|
+
|
|
74
98
|
// UI exports (Phase H2: Dashboard)
|
|
75
99
|
export {
|
|
76
100
|
ProgressDashboard,
|
|
@@ -126,3 +150,26 @@ export { VERSION, SPEC_HASH, STATES } from './config/constants.js';
|
|
|
126
150
|
|
|
127
151
|
// Providers
|
|
128
152
|
export { PROVIDERS, getProvider, getDefaultProvider } from './providers/index.js';
|
|
153
|
+
|
|
154
|
+
// Notifications (Phase M7)
|
|
155
|
+
export {
|
|
156
|
+
notify,
|
|
157
|
+
notifyBuildComplete,
|
|
158
|
+
notifyDeployComplete,
|
|
159
|
+
notifyWatchChange,
|
|
160
|
+
notifyTestComplete,
|
|
161
|
+
notifyError,
|
|
162
|
+
notifySuccess,
|
|
163
|
+
notifyAgentProgress,
|
|
164
|
+
isNotificationSupported
|
|
165
|
+
} from './utils/notifications.js';
|
|
166
|
+
|
|
167
|
+
// Image Service exports (Phase M3)
|
|
168
|
+
export {
|
|
169
|
+
ImageService,
|
|
170
|
+
createImageService,
|
|
171
|
+
searchImages,
|
|
172
|
+
generateImages,
|
|
173
|
+
getCuratedCollection,
|
|
174
|
+
CURATED_COLLECTIONS
|
|
175
|
+
} from './services/image-service.js';
|