@hustle-together/api-dev-tools 3.3.0 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. package/README.md +712 -377
  2. package/commands/api-create.md +68 -23
  3. package/demo/hustle-together/blog/gemini-vs-claude-widgets.html +1 -1
  4. package/demo/hustle-together/blog/interview-driven-api-development.html +1 -1
  5. package/demo/hustle-together/blog/tdd-for-ai.html +1 -1
  6. package/demo/hustle-together/index.html +2 -2
  7. package/demo/workflow-demo-v3.5-backup.html +5008 -0
  8. package/demo/workflow-demo.html +5137 -3805
  9. package/hooks/enforce-deep-research.py +6 -1
  10. package/hooks/enforce-disambiguation.py +7 -1
  11. package/hooks/enforce-documentation.py +6 -1
  12. package/hooks/enforce-environment.py +5 -1
  13. package/hooks/enforce-interview.py +5 -1
  14. package/hooks/enforce-refactor.py +3 -1
  15. package/hooks/enforce-schema.py +0 -0
  16. package/hooks/enforce-scope.py +5 -1
  17. package/hooks/enforce-tdd-red.py +5 -1
  18. package/hooks/enforce-verify.py +0 -0
  19. package/hooks/track-tool-use.py +167 -0
  20. package/hooks/verify-implementation.py +0 -0
  21. package/package.json +1 -1
  22. package/templates/api-dev-state.json +24 -0
  23. package/demo/audio/audio-sync.js +0 -295
  24. package/demo/audio/generate-all-narrations.js +0 -581
  25. package/demo/audio/generate-narration.js +0 -486
  26. package/demo/audio/generate-voice-previews.js +0 -140
  27. package/demo/audio/narration-adam-timing.json +0 -4675
  28. package/demo/audio/narration-adam.mp3 +0 -0
  29. package/demo/audio/narration-creature-timing.json +0 -4675
  30. package/demo/audio/narration-creature.mp3 +0 -0
  31. package/demo/audio/narration-gaming-timing.json +0 -4675
  32. package/demo/audio/narration-gaming.mp3 +0 -0
  33. package/demo/audio/narration-hope-timing.json +0 -4675
  34. package/demo/audio/narration-hope.mp3 +0 -0
  35. package/demo/audio/narration-mark-timing.json +0 -4675
  36. package/demo/audio/narration-mark.mp3 +0 -0
  37. package/demo/audio/narration-timing.json +0 -3614
  38. package/demo/audio/narration-timing.sample.json +0 -48
  39. package/demo/audio/narration.mp3 +0 -0
  40. package/demo/audio/previews/manifest.json +0 -30
  41. package/demo/audio/previews/preview-creature.mp3 +0 -0
  42. package/demo/audio/previews/preview-gaming.mp3 +0 -0
  43. package/demo/audio/previews/preview-hope.mp3 +0 -0
  44. package/demo/audio/previews/preview-mark.mp3 +0 -0
  45. package/demo/audio/voices-manifest.json +0 -50
@@ -1,581 +0,0 @@
1
- #!/usr/bin/env node
2
- /**
3
- * Generate full narration audio with word-level timestamps for ALL voices
4
- *
5
- * Usage: ELEVENLABS_API_KEY=your_key node generate-all-narrations.js
6
- *
7
- * Output:
8
- * - narration-adam.mp3 + narration-adam-timing.json
9
- * - narration-mark.mp3 + narration-mark-timing.json
10
- * - narration-hope.mp3 + narration-hope-timing.json
11
- * - narration-creature.mp3 + narration-creature-timing.json
12
- * - narration-gaming.mp3 + narration-gaming-timing.json
13
- */
14
-
15
- const fs = require('fs');
16
- const path = require('path');
17
-
18
- // ElevenLabs API configuration
19
- const API_BASE = 'https://api.elevenlabs.io/v1';
20
- const MODEL_ID = 'eleven_turbo_v2_5'; // Fast, high-quality model
21
-
22
- // Voice configurations - All available voices
23
- const VOICES = [
24
- {
25
- id: 'pNInz6obpgDQGcFmaJgB',
26
- name: 'adam',
27
- displayName: 'Adam',
28
- description: 'Deep, professional'
29
- },
30
- {
31
- id: 'UgBBYS2sOqTuMpoF3BR0',
32
- name: 'mark',
33
- displayName: 'Mark',
34
- description: 'Warm, conversational'
35
- },
36
- {
37
- id: 'tnSpp4vdxKPjI9w0GnoV',
38
- name: 'hope',
39
- displayName: 'Hope',
40
- description: 'Bright, energetic'
41
- },
42
- {
43
- id: 'Z7RrOqZFTyLpIlzCgfsp',
44
- name: 'creature',
45
- displayName: 'Creature',
46
- description: 'Unique, character voice'
47
- },
48
- {
49
- id: 'YOq2y2Up4RgXP2HyXjE5',
50
- name: 'gaming',
51
- displayName: 'Gaming',
52
- description: 'Dynamic, enthusiastic'
53
- }
54
- ];
55
-
56
- // The narration script with section markers and highlight triggers
57
- // Format: [SECTION:id] marks a new section, [HIGHLIGHT:element-selector] marks what to highlight
58
- const NARRATION_SCRIPT = `
59
- [SECTION:intro]
60
- Welcome to Hustle API Dev Tools, version three point oh.
61
-
62
- [HIGHLIGHT:#hustleBrand]
63
- This package enforces a structured, twelve-phase workflow for AI-assisted API development.
64
-
65
- [HIGHLIGHT:[data-phase="research"]]
66
- Research first. No assumptions. No training data. Real documentation from Context7 and web search.
67
-
68
- [HIGHLIGHT:[data-phase="interview"]]
69
- Interview second. The AI asks YOU questions with structured options based on what it actually found.
70
-
71
- [HIGHLIGHT:[data-phase="test"]]
72
- Test before code. Red, green, refactor. No implementation without a failing test.
73
-
74
- [HIGHLIGHT:[data-phase="docs"]]
75
- Document everything. Every endpoint documented with real examples and schemas.
76
-
77
- The philosophy is simple: Hustle together. Share resources. Build stronger.
78
-
79
- [SECTION:problems]
80
- [HIGHLIGHT:#problems h2]
81
- Let's talk about the problem. What goes wrong when AI builds APIs without structure?
82
-
83
- [HIGHLIGHT:.gap-item:nth-child(1)]
84
- Gap one: AI doesn't use your exact words. You say Brandfetch API but it searches for something else. Wrong library. Wrong documentation.
85
-
86
- [HIGHLIGHT:.gap-item:nth-child(2)]
87
- Gap two: Generic questions. Without research first, the AI asks template questions instead of specific ones based on actual API capabilities.
88
-
89
- [HIGHLIGHT:.gap-item:nth-child(3)]
90
- Gap three: Memory-based implementation. After research, the AI forgets what it learned and implements from training data instead.
91
-
92
- [HIGHLIGHT:.gap-item:nth-child(4)]
93
- Gap four: No verification after tests pass. The AI writes code that passes tests but doesn't match the actual documentation.
94
-
95
- [HIGHLIGHT:.gap-item:nth-child(5)]
96
- Gap five: Context dilution. After many turns, the AI forgets project structure, documentation locations, and workflow requirements.
97
-
98
- These gaps compound. Version three solves all of them with loop-back architecture and continuous re-grounding.
99
-
100
- [SECTION:solution]
101
- [HIGHLIGHT:#solution h2]
102
- The solution is enforcement. Python hooks that intercept every tool call.
103
-
104
- [HIGHLIGHT:.hook-box:nth-child(1)]
105
- PreToolUse hooks run before Claude writes any file. They inject interview decisions as reminders and block writes without research.
106
-
107
- [HIGHLIGHT:.hook-box:nth-child(2)]
108
- PostToolUse hooks track tool usage and trigger verification. After tests pass, they force Phase nine: re-read the documentation.
109
-
110
- [HIGHLIGHT:.hook-box:nth-child(3)]
111
- The Stop hook blocks completion if any phase is incomplete. No more premature "done" declarations.
112
-
113
- [HIGHLIGHT:.hook-box:nth-child(4)]
114
- SessionStart and periodic hooks re-inject context every seven turns to prevent dilution in long sessions.
115
-
116
- This isn't about limiting AI. It's about holding it to the same standards we hold ourselves.
117
-
118
- [SECTION:phases]
119
- [HIGHLIGHT:#phases h2]
120
- The workflow now has twelve phases. Two new ones in version three.
121
-
122
- [HIGHLIGHT:[data-phase="0"]]
123
- Phase zero: Disambiguation. When you say Vercel AI, do you mean the SDK or the Gateway? We clarify before researching.
124
-
125
- [HIGHLIGHT:[data-phase="1"]]
126
- Phase one: Scope. Confirm we understand what you want to build.
127
-
128
- [HIGHLIGHT:[data-phase="2"]]
129
- Phase two: Initial research. Context7 and web search. Find the real documentation.
130
-
131
- [HIGHLIGHT:[data-phase="3"]]
132
- Phase three: Interview. Questions generated FROM research findings. Not generic templates.
133
-
134
- [HIGHLIGHT:[data-phase="4"]]
135
- Phase four: Deep research. Based on your interview answers, we propose targeted follow-up searches. Adaptive, not shotgun.
136
-
137
- [HIGHLIGHT:[data-phase="5"]]
138
- Phase five: Schema. Define Zod schemas based on research plus interview decisions.
139
-
140
- [HIGHLIGHT:[data-phase="6"]]
141
- Phase six: Environment. Verify API keys exist before writing code.
142
-
143
- [HIGHLIGHT:[data-phase="7"]]
144
- Phase seven: TDD Red. Write failing tests. Define success before implementation.
145
-
146
- [HIGHLIGHT:[data-phase="8"]]
147
- Phase eight: TDD Green. Minimal code to pass tests. Interview decisions injected by hooks.
148
-
149
- [HIGHLIGHT:[data-phase="9"]]
150
- Phase nine: Verify. This is new. Re-read the original documentation and compare to implementation. Find gaps. Loop back if needed.
151
-
152
- [HIGHLIGHT:[data-phase="10"]]
153
- Phase ten: Refactor. Clean up code while tests stay green.
154
-
155
- [HIGHLIGHT:[data-phase="11"]]
156
- Phase eleven: Documentation. Update OpenAPI spec and test manifest.
157
-
158
- [HIGHLIGHT:[data-phase="12"]]
159
- Phase twelve: Complete. Final verification by the Stop hook.
160
-
161
- Every phase can loop back. If verification finds gaps, we return to Red and write tests for missing features.
162
-
163
- [SECTION:demo]
164
- [HIGHLIGHT:#demo h2]
165
- Let's watch a real example. Creating a Brandfetch API endpoint.
166
-
167
- [HIGHLIGHT:[data-step="0"]]
168
- The user types /api-create brandfetch. The twelve-phase workflow begins.
169
-
170
- [HIGHLIGHT:[data-step="1"]]
171
- Claude confirms scope. We're building an endpoint to fetch brand assets by domain.
172
-
173
- [HIGHLIGHT:[data-step="2"]]
174
- Initial research. Claude uses Context7 to find the SDK documentation. WebSearch finds rate limits and response formats.
175
-
176
- [HIGHLIGHT:[data-step="3"]]
177
- Interview begins. But notice: the questions are specific to what Claude actually found. What's the primary purpose? Options come from the documentation.
178
-
179
- [HIGHLIGHT:[data-step="4"]]
180
- User selects: Full brand kit with logos, colors, and fonts.
181
-
182
- [HIGHLIGHT:[data-step="5"]]
183
- More questions. How should API keys be handled? User selects server environment variables only.
184
-
185
- [HIGHLIGHT:[data-step="7"]]
186
- Deep research. Based on your selections, Claude proposes specific searches for the full brand response format.
187
-
188
- [HIGHLIGHT:[data-step="8"]]
189
- Schema created. Zod types based on research plus interview decisions.
190
-
191
- [HIGHLIGHT:[data-step="9"]]
192
- Environment check. The hook verifies BRANDFETCH_API_KEY exists.
193
-
194
- [HIGHLIGHT:[data-step="10"]]
195
- TDD Red. Claude writes twelve failing test cases.
196
-
197
- [HIGHLIGHT:[data-step="11"]]
198
- TDD Green. Implementation begins. Watch the hook inject interview decisions.
199
-
200
- [HIGHLIGHT:[data-step="12"]]
201
- The hook reminds Claude: Remember user decisions. Purpose: full brand kit. API key handling: server only.
202
-
203
- [HIGHLIGHT:[data-step="13"]]
204
- Phase nine: Verify. Claude re-reads the Brandfetch documentation and compares to implementation. All features accounted for.
205
-
206
- [HIGHLIGHT:[data-step="14"]]
207
- Refactor. Code cleaned up. Tests still pass.
208
-
209
- [HIGHLIGHT:[data-step="15"]]
210
- Documentation updated. API test manifest and OpenAPI spec.
211
-
212
- [HIGHLIGHT:[data-step="16"]]
213
- Complete. All twelve phases verified. Four files created. Twelve tests passing.
214
-
215
- [SECTION:installation]
216
- [HIGHLIGHT:#installation h2]
217
- Installation takes one command.
218
-
219
- [HIGHLIGHT:.install-command]
220
- Run npx @hustle-together/api-dev-tools. That's it.
221
-
222
- The CLI copies slash commands, Python hooks, and settings. It creates the research cache folder and updates your CLAUDE.md with workflow documentation.
223
-
224
- Version three adds automatic CLAUDE.md updates so Claude understands the workflow in your project.
225
-
226
- Your project is now enforced. Every API follows the twelve-phase workflow.
227
-
228
- [SECTION:credits]
229
- [HIGHLIGHT:#credits h2]
230
- This project builds on the work of others.
231
-
232
- The TDD workflow is based on @wbern/claude-instructions by William Bernmalm.
233
-
234
- Context7 provides live documentation lookup. Current docs, not stale training data.
235
-
236
- And the interview methodology ensures questions come from research, not templates.
237
-
238
- Thank you to the Claude Code community. Together, we're making AI development better.
239
-
240
- [SECTION:outro]
241
- [HIGHLIGHT:#intro]
242
- Hustle API Dev Tools version three. Twelve phases. Loop-back architecture. Continuous verification.
243
-
244
- Research first. Questions FROM findings. Verify after green. Document always.
245
-
246
- Install it now with npx @hustle-together/api-dev-tools.
247
- `.trim();
248
-
249
- /**
250
- * Extract plain text from the narration script (remove markers)
251
- */
252
- function extractPlainText(script) {
253
- return script
254
- .replace(/\[SECTION:[^\]]+\]/g, '')
255
- .replace(/\[HIGHLIGHT:[^\]]+\]/g, '')
256
- .replace(/\n{3,}/g, '\n\n')
257
- .trim();
258
- }
259
-
260
- /**
261
- * Parse the script to extract section and highlight markers with their positions
262
- */
263
- function parseMarkers(script) {
264
- const markers = [];
265
- const lines = script.split('\n');
266
- let charPosition = 0;
267
- let currentSection = 'intro';
268
-
269
- for (const line of lines) {
270
- // Check for section marker
271
- const sectionMatch = line.match(/\[SECTION:([^\]]+)\]/);
272
- if (sectionMatch) {
273
- currentSection = sectionMatch[1];
274
- markers.push({
275
- type: 'section',
276
- id: currentSection,
277
- charPosition
278
- });
279
- }
280
-
281
- // Check for highlight marker
282
- // Use a more robust regex that handles nested brackets like [data-phase="0"]
283
- const highlightMatch = line.match(/\[HIGHLIGHT:(.+?)\]$/);
284
- if (highlightMatch) {
285
- // Extract the selector - handle double brackets for attribute selectors
286
- let selector = highlightMatch[1];
287
- // If selector starts with [ but doesn't end with ], add the closing bracket
288
- if (selector.startsWith('[') && !selector.endsWith(']')) {
289
- selector = selector + ']';
290
- }
291
- markers.push({
292
- type: 'highlight',
293
- selector: selector,
294
- section: currentSection,
295
- charPosition
296
- });
297
- }
298
-
299
- // Update char position (for plain text, not including markers)
300
- const plainLine = line
301
- .replace(/\[SECTION:[^\]]+\]/g, '')
302
- .replace(/\[HIGHLIGHT:[^\]]+\]/g, '');
303
- if (plainLine.trim()) {
304
- charPosition += plainLine.length + 1; // +1 for newline
305
- }
306
- }
307
-
308
- return markers;
309
- }
310
-
311
- /**
312
- * Convert character-level timestamps to word-level timestamps
313
- */
314
- function characterToWordTimestamps(alignment) {
315
- const words = [];
316
- let currentWord = '';
317
- let wordStart = null;
318
- let wordEnd = null;
319
-
320
- for (let i = 0; i < alignment.characters.length; i++) {
321
- const char = alignment.characters[i];
322
- const startTime = alignment.character_start_times_seconds[i];
323
- const endTime = alignment.character_end_times_seconds[i];
324
-
325
- if (char === ' ' || char === '\n') {
326
- if (currentWord) {
327
- words.push({
328
- word: currentWord,
329
- start: wordStart,
330
- end: wordEnd,
331
- charIndex: i - currentWord.length
332
- });
333
- currentWord = '';
334
- wordStart = null;
335
- wordEnd = null;
336
- }
337
- } else {
338
- if (wordStart === null) {
339
- wordStart = startTime;
340
- }
341
- wordEnd = endTime;
342
- currentWord += char;
343
- }
344
- }
345
-
346
- // Don't forget the last word
347
- if (currentWord) {
348
- words.push({
349
- word: currentWord,
350
- start: wordStart,
351
- end: wordEnd,
352
- charIndex: alignment.characters.length - currentWord.length
353
- });
354
- }
355
-
356
- return words;
357
- }
358
-
359
- /**
360
- * Match markers to timestamps based on text position
361
- */
362
- function matchMarkersToTimestamps(markers, wordTimestamps, plainText) {
363
- const timedMarkers = [];
364
-
365
- for (const marker of markers) {
366
- // Find the word closest to this marker's position
367
- let closestWord = wordTimestamps[0];
368
- let minDiff = Infinity;
369
-
370
- for (const word of wordTimestamps) {
371
- const diff = Math.abs(word.charIndex - marker.charPosition);
372
- if (diff < minDiff) {
373
- minDiff = diff;
374
- closestWord = word;
375
- }
376
- }
377
-
378
- timedMarkers.push({
379
- ...marker,
380
- timestamp: closestWord ? closestWord.start : 0
381
- });
382
- }
383
-
384
- return timedMarkers;
385
- }
386
-
387
- /**
388
- * Call ElevenLabs API to generate speech with timestamps
389
- */
390
- async function generateSpeech(text, voiceId, apiKey) {
391
- const url = `${API_BASE}/text-to-speech/${voiceId}/with-timestamps`;
392
-
393
- const response = await fetch(url, {
394
- method: 'POST',
395
- headers: {
396
- 'xi-api-key': apiKey,
397
- 'Content-Type': 'application/json'
398
- },
399
- body: JSON.stringify({
400
- text,
401
- model_id: MODEL_ID,
402
- voice_settings: {
403
- stability: 0.5,
404
- similarity_boost: 0.75,
405
- style: 0.3,
406
- use_speaker_boost: true
407
- }
408
- })
409
- });
410
-
411
- if (!response.ok) {
412
- const error = await response.text();
413
- throw new Error(`ElevenLabs API error: ${response.status} - ${error}`);
414
- }
415
-
416
- return response.json();
417
- }
418
-
419
- /**
420
- * Generate narration for a single voice
421
- */
422
- async function generateForVoice(voice, plainText, markers, apiKey, outputDir) {
423
- console.log(`\n=== Generating narration for ${voice.displayName} ===`);
424
- console.log(`Voice ID: ${voice.id}`);
425
-
426
- const audioPath = path.join(outputDir, `narration-${voice.name}.mp3`);
427
- const timingPath = path.join(outputDir, `narration-${voice.name}-timing.json`);
428
-
429
- try {
430
- // Generate speech with timestamps
431
- const result = await generateSpeech(plainText, voice.id, apiKey);
432
-
433
- console.log(`Audio generated successfully!`);
434
-
435
- // Decode and save audio
436
- const audioBuffer = Buffer.from(result.audio_base64, 'base64');
437
- fs.writeFileSync(audioPath, audioBuffer);
438
- console.log(`Audio saved: ${audioPath}`);
439
- console.log(`Audio size: ${(audioBuffer.length / 1024 / 1024).toFixed(2)} MB`);
440
-
441
- // Convert character timestamps to word timestamps
442
- const wordTimestamps = characterToWordTimestamps(result.alignment);
443
- console.log(`Extracted ${wordTimestamps.length} word timestamps`);
444
-
445
- // Match markers to timestamps
446
- const timedMarkers = matchMarkersToTimestamps(markers, wordTimestamps, plainText);
447
-
448
- // Calculate duration
449
- const lastWord = wordTimestamps[wordTimestamps.length - 1];
450
- const duration = lastWord ? lastWord.end : 0;
451
-
452
- // Build timing data
453
- const timingData = {
454
- generated: new Date().toISOString(),
455
- voice: voice.displayName,
456
- voiceId: voice.id,
457
- duration,
458
- wordCount: wordTimestamps.length,
459
- sections: [],
460
- highlights: [],
461
- words: wordTimestamps.map(w => ({
462
- word: w.word,
463
- start: w.start,
464
- end: w.end
465
- }))
466
- };
467
-
468
- // Separate sections and highlights
469
- for (const marker of timedMarkers) {
470
- if (marker.type === 'section') {
471
- timingData.sections.push({
472
- id: marker.id,
473
- timestamp: marker.timestamp
474
- });
475
- } else if (marker.type === 'highlight') {
476
- timingData.highlights.push({
477
- selector: marker.selector,
478
- section: marker.section,
479
- timestamp: marker.timestamp
480
- });
481
- }
482
- }
483
-
484
- // Save timing data
485
- fs.writeFileSync(timingPath, JSON.stringify(timingData, null, 2));
486
- console.log(`Timing saved: ${timingPath}`);
487
-
488
- console.log(`Duration: ${duration.toFixed(1)} seconds`);
489
- console.log(`Sections: ${timingData.sections.length}`);
490
- console.log(`Highlights: ${timingData.highlights.length}`);
491
-
492
- return {
493
- voice: voice.displayName,
494
- name: voice.name,
495
- audioFile: `narration-${voice.name}.mp3`,
496
- timingFile: `narration-${voice.name}-timing.json`,
497
- duration,
498
- wordCount: wordTimestamps.length,
499
- audioSize: audioBuffer.length
500
- };
501
-
502
- } catch (error) {
503
- console.error(`Error generating for ${voice.displayName}: ${error.message}`);
504
- return null;
505
- }
506
- }
507
-
508
- /**
509
- * Main function
510
- */
511
- async function main() {
512
- const apiKey = process.env.ELEVENLABS_API_KEY;
513
-
514
- if (!apiKey) {
515
- console.error('Error: ELEVENLABS_API_KEY environment variable is required');
516
- console.error('Usage: ELEVENLABS_API_KEY=your_key node generate-all-narrations.js');
517
- process.exit(1);
518
- }
519
-
520
- // Parse command line args for specific voice
521
- const args = process.argv.slice(2);
522
- const specificVoice = args[0];
523
-
524
- const outputDir = __dirname;
525
-
526
- // Extract plain text for TTS
527
- const plainText = extractPlainText(NARRATION_SCRIPT);
528
- console.log('Plain text extracted:', plainText.substring(0, 100) + '...');
529
- console.log(`Total characters: ${plainText.length}`);
530
-
531
- // Parse markers from script
532
- const markers = parseMarkers(NARRATION_SCRIPT);
533
- console.log(`Found ${markers.length} markers`);
534
-
535
- // Filter voices if specific one requested
536
- let voicesToGenerate = VOICES;
537
- if (specificVoice) {
538
- voicesToGenerate = VOICES.filter(v =>
539
- v.name.toLowerCase() === specificVoice.toLowerCase() ||
540
- v.displayName.toLowerCase() === specificVoice.toLowerCase()
541
- );
542
- if (voicesToGenerate.length === 0) {
543
- console.error(`Voice "${specificVoice}" not found. Available: ${VOICES.map(v => v.name).join(', ')}`);
544
- process.exit(1);
545
- }
546
- }
547
-
548
- console.log(`\nGenerating ${voicesToGenerate.length} voice narration(s)...`);
549
-
550
- const results = [];
551
-
552
- for (const voice of voicesToGenerate) {
553
- const result = await generateForVoice(voice, plainText, markers, apiKey, outputDir);
554
- if (result) {
555
- results.push(result);
556
- }
557
-
558
- // Add delay between API calls to avoid rate limiting
559
- if (voicesToGenerate.indexOf(voice) < voicesToGenerate.length - 1) {
560
- console.log('\nWaiting 5 seconds before next voice...');
561
- await new Promise(resolve => setTimeout(resolve, 5000));
562
- }
563
- }
564
-
565
- // Write manifest
566
- const manifestPath = path.join(outputDir, 'voices-manifest.json');
567
- fs.writeFileSync(manifestPath, JSON.stringify({
568
- generated: new Date().toISOString(),
569
- voices: results
570
- }, null, 2));
571
-
572
- console.log('\n=== SUMMARY ===');
573
- console.log(`Generated ${results.length}/${voicesToGenerate.length} voice narrations`);
574
- console.log(`Manifest: ${manifestPath}`);
575
-
576
- for (const result of results) {
577
- console.log(` ${result.voice}: ${result.duration.toFixed(1)}s, ${(result.audioSize / 1024 / 1024).toFixed(2)} MB`);
578
- }
579
- }
580
-
581
- main();