@vibeframe/cli 0.27.0 → 0.29.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. package/LICENSE +21 -0
  2. package/dist/agent/adapters/index.d.ts +1 -0
  3. package/dist/agent/adapters/index.d.ts.map +1 -1
  4. package/dist/agent/adapters/index.js +5 -0
  5. package/dist/agent/adapters/index.js.map +1 -1
  6. package/dist/agent/adapters/openrouter.d.ts +16 -0
  7. package/dist/agent/adapters/openrouter.d.ts.map +1 -0
  8. package/dist/agent/adapters/openrouter.js +100 -0
  9. package/dist/agent/adapters/openrouter.js.map +1 -0
  10. package/dist/agent/types.d.ts +1 -1
  11. package/dist/agent/types.d.ts.map +1 -1
  12. package/dist/commands/agent.d.ts.map +1 -1
  13. package/dist/commands/agent.js +3 -1
  14. package/dist/commands/agent.js.map +1 -1
  15. package/dist/commands/setup.js +5 -2
  16. package/dist/commands/setup.js.map +1 -1
  17. package/dist/config/schema.d.ts +2 -1
  18. package/dist/config/schema.d.ts.map +1 -1
  19. package/dist/config/schema.js +2 -0
  20. package/dist/config/schema.js.map +1 -1
  21. package/dist/index.js +0 -0
  22. package/package.json +16 -12
  23. package/.turbo/turbo-build.log +0 -4
  24. package/.turbo/turbo-lint.log +0 -21
  25. package/.turbo/turbo-test.log +0 -689
  26. package/src/agent/adapters/claude.ts +0 -143
  27. package/src/agent/adapters/gemini.ts +0 -159
  28. package/src/agent/adapters/index.ts +0 -61
  29. package/src/agent/adapters/ollama.ts +0 -231
  30. package/src/agent/adapters/openai.ts +0 -116
  31. package/src/agent/adapters/xai.ts +0 -119
  32. package/src/agent/index.ts +0 -251
  33. package/src/agent/memory/index.ts +0 -151
  34. package/src/agent/prompts/system.ts +0 -106
  35. package/src/agent/tools/ai-editing.ts +0 -845
  36. package/src/agent/tools/ai-generation.ts +0 -1073
  37. package/src/agent/tools/ai-pipeline.ts +0 -1055
  38. package/src/agent/tools/ai.ts +0 -21
  39. package/src/agent/tools/batch.ts +0 -429
  40. package/src/agent/tools/e2e.test.ts +0 -545
  41. package/src/agent/tools/export.ts +0 -184
  42. package/src/agent/tools/filesystem.ts +0 -237
  43. package/src/agent/tools/index.ts +0 -150
  44. package/src/agent/tools/integration.test.ts +0 -775
  45. package/src/agent/tools/media.ts +0 -697
  46. package/src/agent/tools/project.ts +0 -313
  47. package/src/agent/tools/timeline.ts +0 -951
  48. package/src/agent/types.ts +0 -68
  49. package/src/commands/agent.ts +0 -340
  50. package/src/commands/ai-analyze.ts +0 -429
  51. package/src/commands/ai-animated-caption.ts +0 -390
  52. package/src/commands/ai-audio.ts +0 -941
  53. package/src/commands/ai-broll.ts +0 -490
  54. package/src/commands/ai-edit-cli.ts +0 -658
  55. package/src/commands/ai-edit.ts +0 -1542
  56. package/src/commands/ai-fill-gaps.ts +0 -566
  57. package/src/commands/ai-helpers.ts +0 -65
  58. package/src/commands/ai-highlights.ts +0 -1303
  59. package/src/commands/ai-image.ts +0 -761
  60. package/src/commands/ai-motion.ts +0 -347
  61. package/src/commands/ai-narrate.ts +0 -451
  62. package/src/commands/ai-review.ts +0 -309
  63. package/src/commands/ai-script-pipeline-cli.ts +0 -1710
  64. package/src/commands/ai-script-pipeline.ts +0 -1365
  65. package/src/commands/ai-suggest-edit.ts +0 -264
  66. package/src/commands/ai-video-fx.ts +0 -445
  67. package/src/commands/ai-video.ts +0 -915
  68. package/src/commands/ai-viral.ts +0 -595
  69. package/src/commands/ai-visual-fx.ts +0 -601
  70. package/src/commands/ai.test.ts +0 -627
  71. package/src/commands/ai.ts +0 -307
  72. package/src/commands/analyze.ts +0 -282
  73. package/src/commands/audio.ts +0 -644
  74. package/src/commands/batch.test.ts +0 -279
  75. package/src/commands/batch.ts +0 -440
  76. package/src/commands/detect.ts +0 -329
  77. package/src/commands/doctor.ts +0 -237
  78. package/src/commands/edit-cmd.ts +0 -1014
  79. package/src/commands/export.ts +0 -918
  80. package/src/commands/generate.ts +0 -2146
  81. package/src/commands/media.ts +0 -177
  82. package/src/commands/output.ts +0 -142
  83. package/src/commands/pipeline.ts +0 -398
  84. package/src/commands/project.test.ts +0 -127
  85. package/src/commands/project.ts +0 -149
  86. package/src/commands/sanitize.ts +0 -60
  87. package/src/commands/schema.ts +0 -130
  88. package/src/commands/setup.ts +0 -509
  89. package/src/commands/timeline.test.ts +0 -499
  90. package/src/commands/timeline.ts +0 -529
  91. package/src/commands/validate.ts +0 -77
  92. package/src/config/config.test.ts +0 -197
  93. package/src/config/index.ts +0 -125
  94. package/src/config/schema.ts +0 -82
  95. package/src/engine/index.ts +0 -2
  96. package/src/engine/project.test.ts +0 -702
  97. package/src/engine/project.ts +0 -439
  98. package/src/index.ts +0 -146
  99. package/src/utils/api-key.test.ts +0 -41
  100. package/src/utils/api-key.ts +0 -247
  101. package/src/utils/audio.ts +0 -83
  102. package/src/utils/exec-safe.ts +0 -75
  103. package/src/utils/first-run.ts +0 -52
  104. package/src/utils/provider-resolver.ts +0 -56
  105. package/src/utils/remotion.ts +0 -951
  106. package/src/utils/subtitle.test.ts +0 -227
  107. package/src/utils/subtitle.ts +0 -169
  108. package/src/utils/tty.ts +0 -196
  109. package/tsconfig.json +0 -20
@@ -1,451 +0,0 @@
1
- /**
2
- * @module ai-narrate
3
- *
4
- * Auto-narration pipeline and provider listing.
5
- *
6
- * CLI commands: narrate, providers
7
- *
8
- * Execute function:
9
- * autoNarrate - Analyze video -> generate script -> TTS voiceover
10
- *
11
- * Extracted from ai.ts as part of modularisation.
12
- * ai.ts calls registerNarrateCommands(aiCommand).
13
- *
14
- * @dependencies Gemini (video analysis), Claude/OpenAI (script generation),
15
- * ElevenLabs (TTS), FFmpeg (duration probe)
16
- */
17
-
18
- import { type Command } from "commander";
19
- import { readFile, writeFile, mkdir } from "node:fs/promises";
20
- import { resolve, dirname, basename } from "node:path";
21
- import { existsSync } from "node:fs";
22
- import chalk from "chalk";
23
- import ora from "ora";
24
- import {
25
- providerRegistry,
26
- whisperProvider,
27
- geminiProvider,
28
- openaiProvider,
29
- claudeProvider,
30
- elevenLabsProvider,
31
- openaiImageProvider,
32
- runwayProvider,
33
- klingProvider,
34
- replicateProvider,
35
- GeminiProvider,
36
- OpenAIProvider,
37
- ClaudeProvider,
38
- ElevenLabsProvider,
39
- } from "@vibeframe/ai-providers";
40
- import { Project, type ProjectFile } from "../engine/index.js";
41
- import { getApiKey } from "../utils/api-key.js";
42
- import { ffprobeDuration } from "../utils/exec-safe.js";
43
- import { getAudioDuration } from "../utils/audio.js";
44
- import { formatTime } from "./ai-helpers.js";
45
-
46
- // ==========================================
47
- // Auto-Narrate Feature Types and Functions
48
- // ==========================================
49
-
50
- /** Options for {@link autoNarrate}. */
51
- export interface AutoNarrateOptions {
52
- /** Path to video file */
53
- videoPath: string;
54
- /** Duration of the video in seconds */
55
- duration: number;
56
- /** Output directory for generated files */
57
- outputDir: string;
58
- /** ElevenLabs voice name or ID (default: "rachel") */
59
- voice?: string;
60
- /** Narration style */
61
- style?: "informative" | "energetic" | "calm" | "dramatic";
62
- /** Language for narration (default: "en") */
63
- language?: string;
64
- /** LLM provider for script generation: "claude" (default) or "openai" */
65
- scriptProvider?: "claude" | "openai";
66
- }
67
-
68
- /** Result from {@link autoNarrate}. */
69
- export interface AutoNarrateResult {
70
- success: boolean;
71
- /** Path to generated audio file */
72
- audioPath?: string;
73
- /** Generated narration script */
74
- script?: string;
75
- /** Transcript segments for timeline sync */
76
- segments?: Array<{
77
- startTime: number;
78
- endTime: number;
79
- text: string;
80
- }>;
81
- /** Error message if failed */
82
- error?: string;
83
- }
84
-
85
- /**
86
- * Generate narration for a video that doesn't have one.
87
- *
88
- * Pipeline:
89
- * 1. Analyze video with Gemini Video Understanding
90
- * 2. Generate narration script with Claude (fallback to OpenAI on 529)
91
- * 3. Convert to speech with ElevenLabs TTS
92
- *
93
- * Saves both the audio file and script text to the output directory.
94
- *
95
- * @param options - Auto-narrate configuration
96
- * @returns Result with audio path, script text, and timed segments
97
- */
98
- export async function autoNarrate(options: AutoNarrateOptions): Promise<AutoNarrateResult> {
99
- const {
100
- videoPath,
101
- duration,
102
- outputDir,
103
- voice = "rachel",
104
- style = "informative",
105
- language = "en",
106
- scriptProvider = "claude",
107
- } = options;
108
-
109
- // Validate API keys
110
- const geminiApiKey = await getApiKey("GOOGLE_API_KEY", "Google");
111
- if (!geminiApiKey) {
112
- return { success: false, error: "GOOGLE_API_KEY required for video analysis. Run 'vibe setup' or set GOOGLE_API_KEY in .env" };
113
- }
114
-
115
- let claudeApiKey: string | null = null;
116
- let openaiScriptApiKey: string | null = null;
117
- if (scriptProvider === "openai") {
118
- openaiScriptApiKey = await getApiKey("OPENAI_API_KEY", "OpenAI");
119
- if (!openaiScriptApiKey) {
120
- return { success: false, error: "OPENAI_API_KEY required for script generation. Run 'vibe setup' or set OPENAI_API_KEY in .env" };
121
- }
122
- } else {
123
- claudeApiKey = await getApiKey("ANTHROPIC_API_KEY", "Anthropic");
124
- if (!claudeApiKey) {
125
- return { success: false, error: "ANTHROPIC_API_KEY required for script generation. Run 'vibe setup' or set ANTHROPIC_API_KEY in .env" };
126
- }
127
- }
128
-
129
- const elevenlabsApiKey = await getApiKey("ELEVENLABS_API_KEY", "ElevenLabs");
130
- if (!elevenlabsApiKey) {
131
- return { success: false, error: "ELEVENLABS_API_KEY required for TTS. Run 'vibe setup' or set ELEVENLABS_API_KEY in .env" };
132
- }
133
-
134
- try {
135
- // Step 1: Analyze video with Gemini
136
- const gemini = new GeminiProvider();
137
- await gemini.initialize({ apiKey: geminiApiKey });
138
-
139
- const videoBuffer = await readFile(videoPath);
140
-
141
- const analysisPrompt = `Analyze this video in detail for narration purposes. Describe:
142
- 1. What is happening visually (actions, movements, subjects)
143
- 2. The setting and environment
144
- 3. Any text or graphics visible
145
- 4. The mood and tone of the content
146
- 5. Key moments and their approximate timestamps
147
-
148
- Provide a detailed description that could be used to write a voiceover narration.
149
- Focus on what viewers need to know to understand and appreciate the video.`;
150
-
151
- const analysisResult = await gemini.analyzeVideo(videoBuffer, analysisPrompt, {
152
- fps: 0.5, // Lower FPS for cost optimization
153
- lowResolution: duration > 60, // Use low res for longer videos
154
- });
155
-
156
- if (!analysisResult.success || !analysisResult.response) {
157
- return { success: false, error: `Video analysis failed: ${analysisResult.error}` };
158
- }
159
-
160
- // Step 2: Generate narration script with Claude or OpenAI
161
- let scriptResult: { success: boolean; script?: string; segments?: Array<{ startTime: number; endTime: number; text: string }>; error?: string };
162
-
163
- if (scriptProvider === "openai") {
164
- const gpt = new OpenAIProvider();
165
- await gpt.initialize({ apiKey: openaiScriptApiKey! });
166
- scriptResult = await gpt.generateNarrationScript(
167
- analysisResult.response,
168
- duration,
169
- style,
170
- language
171
- );
172
- } else {
173
- const claude = new ClaudeProvider();
174
- await claude.initialize({ apiKey: claudeApiKey! });
175
- scriptResult = await claude.generateNarrationScript(
176
- analysisResult.response,
177
- duration,
178
- style,
179
- language
180
- );
181
-
182
- // Auto-fallback to OpenAI on Claude overload (529)
183
- if (!scriptResult.success && scriptResult.error?.includes("529")) {
184
- const fallbackKey = await getApiKey("OPENAI_API_KEY", "OpenAI");
185
- if (fallbackKey) {
186
- console.error("⚠️ Claude overloaded, falling back to OpenAI...");
187
- const gpt = new OpenAIProvider();
188
- await gpt.initialize({ apiKey: fallbackKey });
189
- scriptResult = await gpt.generateNarrationScript(
190
- analysisResult.response,
191
- duration,
192
- style,
193
- language
194
- );
195
- }
196
- }
197
- }
198
-
199
- if (!scriptResult.success || !scriptResult.script) {
200
- return { success: false, error: `Script generation failed: ${scriptResult.error}` };
201
- }
202
-
203
- // Step 3: Convert to speech with ElevenLabs
204
- const elevenlabs = new ElevenLabsProvider();
205
- await elevenlabs.initialize({ apiKey: elevenlabsApiKey });
206
-
207
- const ttsResult = await elevenlabs.textToSpeech(scriptResult.script, {
208
- voiceId: voice,
209
- });
210
-
211
- if (!ttsResult.success || !ttsResult.audioBuffer) {
212
- return { success: false, error: `TTS generation failed: ${ttsResult.error}` };
213
- }
214
-
215
- // Ensure output directory exists
216
- if (!existsSync(outputDir)) {
217
- await mkdir(outputDir, { recursive: true });
218
- }
219
-
220
- // Save audio file
221
- const audioPath = resolve(outputDir, "auto-narration.mp3");
222
- await writeFile(audioPath, ttsResult.audioBuffer);
223
-
224
- // Save script for reference
225
- const scriptPath = resolve(outputDir, "narration-script.txt");
226
- await writeFile(scriptPath, scriptResult.script, "utf-8");
227
-
228
- return {
229
- success: true,
230
- audioPath,
231
- script: scriptResult.script,
232
- segments: scriptResult.segments,
233
- };
234
- } catch (error) {
235
- return {
236
- success: false,
237
- error: error instanceof Error ? error.message : "Unknown error in autoNarrate",
238
- };
239
- }
240
- }
241
-
242
- // ==========================================
243
- // CLI Command Registration
244
- // ==========================================
245
-
246
- export function registerNarrateCommands(ai: Command): void {
247
-
248
- ai
249
- .command("providers")
250
- .description("List available AI providers")
251
- .action(async () => {
252
- // Register default providers
253
- providerRegistry.register(whisperProvider);
254
- providerRegistry.register(geminiProvider);
255
- providerRegistry.register(openaiProvider);
256
- providerRegistry.register(claudeProvider);
257
- providerRegistry.register(elevenLabsProvider);
258
- providerRegistry.register(openaiImageProvider);
259
- providerRegistry.register(runwayProvider);
260
- providerRegistry.register(klingProvider);
261
- providerRegistry.register(replicateProvider);
262
-
263
- console.log();
264
- console.log(chalk.bold.cyan("Available AI Providers"));
265
- console.log(chalk.dim("─".repeat(60)));
266
-
267
- const providers = providerRegistry.getAll();
268
- for (const provider of providers) {
269
- const status = provider.isAvailable ? chalk.green("●") : chalk.red("○");
270
- console.log();
271
- console.log(`${status} ${chalk.bold(provider.name)} ${chalk.dim(`(${provider.id})`)}`);
272
- console.log(` ${provider.description}`);
273
- console.log(` ${chalk.dim("Capabilities:")} ${provider.capabilities.join(", ")}`);
274
- }
275
-
276
- console.log();
277
- });
278
-
279
- // Auto-Narrate command
280
- ai
281
- .command("narrate")
282
- .description("Generate AI narration for a video file or project")
283
- .argument("<input>", "Video file or project file (.vibe.json)")
284
- .option("-o, --output <dir>", "Output directory for generated files", ".")
285
- .option("-v, --voice <name>", "ElevenLabs voice name (rachel, adam, josh, etc.)", "rachel")
286
- .option("-s, --style <style>", "Narration style: informative, energetic, calm, dramatic", "informative")
287
- .option("-l, --language <lang>", "Language code (e.g., en, ko)", "en")
288
- .option("-p, --provider <name>", "LLM for script generation: claude (default), openai", "claude")
289
- .option("--add-to-project", "Add narration to project (only for .vibe.json input)")
290
- .action(async (inputPath: string, options) => {
291
- try {
292
- const absPath = resolve(process.cwd(), inputPath);
293
- if (!existsSync(absPath)) {
294
- console.error(chalk.red(`File not found: ${absPath}`));
295
- process.exit(1);
296
- }
297
-
298
- console.log();
299
- console.log(chalk.bold.cyan("🎙️ Auto-Narrate Pipeline"));
300
- console.log(chalk.dim("─".repeat(60)));
301
- console.log();
302
-
303
- const isProject = inputPath.endsWith(".vibe.json");
304
- let videoPath: string;
305
- let project: Project | null = null;
306
- let outputDir = resolve(process.cwd(), options.output);
307
-
308
- if (isProject) {
309
- // Load project to find video source
310
- const content = await readFile(absPath, "utf-8");
311
- const data: ProjectFile = JSON.parse(content);
312
- project = Project.fromJSON(data);
313
- const sources = project.getSources();
314
- const videoSource = sources.find((s) => s.type === "video");
315
-
316
- if (!videoSource) {
317
- console.error(chalk.red("No video source found in project"));
318
- process.exit(1);
319
- }
320
-
321
- videoPath = resolve(dirname(absPath), videoSource.url);
322
- if (!existsSync(videoPath)) {
323
- console.error(chalk.red(`Video file not found: ${videoPath}`));
324
- process.exit(1);
325
- }
326
-
327
- // Use project directory as output if not specified
328
- if (options.output === ".") {
329
- outputDir = dirname(absPath);
330
- }
331
-
332
- console.log(`📁 Project: ${chalk.bold(project.getMeta().name)}`);
333
- } else {
334
- videoPath = absPath;
335
- console.log(`🎬 Video: ${chalk.bold(basename(videoPath))}`);
336
- }
337
-
338
- // Get video duration
339
- const durationSpinner = ora("📊 Analyzing video...").start();
340
- let duration: number;
341
- try {
342
- duration = await ffprobeDuration(videoPath);
343
- durationSpinner.succeed(chalk.green(`Duration: ${formatTime(duration)}`));
344
- } catch {
345
- durationSpinner.fail(chalk.red("Failed to get video duration"));
346
- process.exit(1);
347
- }
348
-
349
- // Validate style option
350
- const validStyles = ["informative", "energetic", "calm", "dramatic"];
351
- if (!validStyles.includes(options.style)) {
352
- console.error(chalk.red(`Invalid style: ${options.style}`));
353
- console.error(chalk.dim(`Valid styles: ${validStyles.join(", ")}`));
354
- process.exit(1);
355
- }
356
-
357
- // Generate narration
358
- const generateSpinner = ora("🤖 Generating narration...").start();
359
-
360
- generateSpinner.text = "📹 Analyzing video with Gemini...";
361
- const result = await autoNarrate({
362
- videoPath,
363
- duration,
364
- outputDir,
365
- voice: options.voice,
366
- style: options.style as "informative" | "energetic" | "calm" | "dramatic",
367
- language: options.language,
368
- scriptProvider: options.provider as "claude" | "openai",
369
- });
370
-
371
- if (!result.success) {
372
- generateSpinner.fail(chalk.red(`Failed: ${result.error}`));
373
- process.exit(1);
374
- }
375
-
376
- generateSpinner.succeed(chalk.green("Narration generated successfully"));
377
-
378
- // Display result
379
- console.log();
380
- console.log(chalk.bold.cyan("Generated Files"));
381
- console.log(chalk.dim("─".repeat(60)));
382
- console.log(` 🎵 Audio: ${chalk.green(result.audioPath)}`);
383
- console.log(` 📝 Script: ${chalk.green(resolve(outputDir, "narration-script.txt"))}`);
384
-
385
- if (result.segments && result.segments.length > 0) {
386
- console.log();
387
- console.log(chalk.bold.cyan("Narration Segments"));
388
- console.log(chalk.dim("─".repeat(60)));
389
- for (const seg of result.segments.slice(0, 5)) {
390
- console.log(` [${formatTime(seg.startTime)} - ${formatTime(seg.endTime)}] ${chalk.dim(seg.text.substring(0, 50))}${seg.text.length > 50 ? "..." : ""}`);
391
- }
392
- if (result.segments.length > 5) {
393
- console.log(chalk.dim(` ... and ${result.segments.length - 5} more segments`));
394
- }
395
- }
396
-
397
- // Add to project if requested
398
- if (options.addToProject && project && isProject) {
399
- const addSpinner = ora("Adding narration to project...").start();
400
-
401
- // Get audio duration
402
- let audioDuration: number;
403
- try {
404
- audioDuration = await getAudioDuration(result.audioPath!);
405
- } catch {
406
- audioDuration = duration; // Fallback to video duration
407
- }
408
-
409
- // Add audio source
410
- const audioSource = project.addSource({
411
- name: "Auto-generated narration",
412
- url: basename(result.audioPath!),
413
- type: "audio",
414
- duration: audioDuration,
415
- });
416
-
417
- // Add audio clip to audio track
418
- const audioTrack = project.getTracks().find((t) => t.type === "audio");
419
- if (audioTrack) {
420
- project.addClip({
421
- sourceId: audioSource.id,
422
- trackId: audioTrack.id,
423
- startTime: 0,
424
- duration: Math.min(audioDuration, duration),
425
- sourceStartOffset: 0,
426
- sourceEndOffset: Math.min(audioDuration, duration),
427
- });
428
- }
429
-
430
- // Save updated project
431
- await writeFile(absPath, JSON.stringify(project.toJSON(), null, 2), "utf-8");
432
- addSpinner.succeed(chalk.green("Narration added to project"));
433
- }
434
-
435
- console.log();
436
- console.log(chalk.bold.green("✅ Auto-narrate complete!"));
437
-
438
- if (!options.addToProject && isProject) {
439
- console.log();
440
- console.log(chalk.dim("Tip: Use --add-to-project to automatically add the narration to your project"));
441
- }
442
-
443
- console.log();
444
- } catch (error) {
445
- console.error(chalk.red("Auto-narrate failed"));
446
- console.error(error);
447
- process.exit(1);
448
- }
449
- });
450
-
451
- } // end registerNarrateCommands