@hustle-together/api-dev-tools 2.0.6 → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +283 -478
  2. package/bin/cli.js +55 -11
  3. package/commands/README.md +124 -251
  4. package/commands/api-create.md +318 -136
  5. package/commands/api-interview.md +252 -256
  6. package/commands/api-research.md +209 -234
  7. package/commands/api-verify.md +231 -0
  8. package/demo/audio/generate-all-narrations.js +516 -0
  9. package/demo/audio/generate-voice-previews.js +140 -0
  10. package/demo/audio/narration-adam-timing.json +3666 -0
  11. package/demo/audio/narration-adam.mp3 +0 -0
  12. package/demo/audio/narration-creature-timing.json +3666 -0
  13. package/demo/audio/narration-creature.mp3 +0 -0
  14. package/demo/audio/narration-gaming-timing.json +3666 -0
  15. package/demo/audio/narration-gaming.mp3 +0 -0
  16. package/demo/audio/narration-hope-timing.json +3666 -0
  17. package/demo/audio/narration-hope.mp3 +0 -0
  18. package/demo/audio/narration-mark-timing.json +3666 -0
  19. package/demo/audio/narration-mark.mp3 +0 -0
  20. package/demo/audio/previews/manifest.json +30 -0
  21. package/demo/audio/previews/preview-creature.mp3 +0 -0
  22. package/demo/audio/previews/preview-gaming.mp3 +0 -0
  23. package/demo/audio/previews/preview-hope.mp3 +0 -0
  24. package/demo/audio/previews/preview-mark.mp3 +0 -0
  25. package/demo/audio/voices-manifest.json +50 -0
  26. package/demo/hustle-together/blog/gemini-vs-claude-widgets.html +30 -28
  27. package/demo/hustle-together/blog/interview-driven-api-development.html +37 -23
  28. package/demo/hustle-together/index.html +142 -109
  29. package/demo/workflow-demo.html +1054 -544
  30. package/hooks/periodic-reground.py +154 -0
  31. package/hooks/session-startup.py +151 -0
  32. package/hooks/track-tool-use.py +109 -17
  33. package/hooks/verify-after-green.py +152 -0
  34. package/package.json +2 -2
  35. package/templates/api-dev-state.json +42 -7
  36. package/templates/research-index.json +6 -0
  37. package/templates/settings.json +23 -0
@@ -0,0 +1,516 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * Generate full narration audio with word-level timestamps for ALL voices
4
+ *
5
+ * Usage: ELEVENLABS_API_KEY=your_key node generate-all-narrations.js
6
+ *
7
+ * Output:
8
+ * - narration-adam.mp3 + narration-adam-timing.json
9
+ * - narration-mark.mp3 + narration-mark-timing.json
10
+ * - narration-hope.mp3 + narration-hope-timing.json
11
+ * - narration-creature.mp3 + narration-creature-timing.json
12
+ * - narration-gaming.mp3 + narration-gaming-timing.json
13
+ */
14
+
15
+ const fs = require('fs');
16
+ const path = require('path');
17
+
18
+ // ElevenLabs API configuration
19
+ const API_BASE = 'https://api.elevenlabs.io/v1';
20
+ const MODEL_ID = 'eleven_turbo_v2_5'; // Fast, high-quality model
21
+
22
+ // Voice configurations - All available voices
23
+ const VOICES = [
24
+ {
25
+ id: 'pNInz6obpgDQGcFmaJgB',
26
+ name: 'adam',
27
+ displayName: 'Adam',
28
+ description: 'Deep, professional'
29
+ },
30
+ {
31
+ id: 'UgBBYS2sOqTuMpoF3BR0',
32
+ name: 'mark',
33
+ displayName: 'Mark',
34
+ description: 'Warm, conversational'
35
+ },
36
+ {
37
+ id: 'tnSpp4vdxKPjI9w0GnoV',
38
+ name: 'hope',
39
+ displayName: 'Hope',
40
+ description: 'Bright, energetic'
41
+ },
42
+ {
43
+ id: 'Z7RrOqZFTyLpIlzCgfsp',
44
+ name: 'creature',
45
+ displayName: 'Creature',
46
+ description: 'Unique, character voice'
47
+ },
48
+ {
49
+ id: 'YOq2y2Up4RgXP2HyXjE5',
50
+ name: 'gaming',
51
+ displayName: 'Gaming',
52
+ description: 'Dynamic, enthusiastic'
53
+ }
54
+ ];
55
+
56
+ // The narration script with section markers and highlight triggers
57
+ const NARRATION_SCRIPT = `
58
+ [SECTION:intro]
59
+ Welcome to Hustle API Dev Tools.
60
+
61
+ [HIGHLIGHT:#hustleBrand]
62
+ This package enforces a structured workflow for AI-assisted API development.
63
+
64
+ [HIGHLIGHT:[data-phase="research"]]
65
+ First, you research. No assumptions. No training data. Real documentation.
66
+
67
+ [HIGHLIGHT:[data-phase="interview"]]
68
+ Then you interview. The AI asks YOU questions with structured options based on what it learned.
69
+
70
+ [HIGHLIGHT:[data-phase="test"]]
71
+ Next, you write tests first. Red, green, refactor. No implementation without a failing test.
72
+
73
+ [HIGHLIGHT:[data-phase="code"]]
74
+ Only then do you write code. Minimal. Just enough to pass the tests.
75
+
76
+ [HIGHLIGHT:[data-phase="docs"]]
77
+ Finally, documentation. Every endpoint documented with real examples and schemas.
78
+
79
+ The philosophy is simple: Hustle together. Share resources. Build stronger.
80
+
81
+ [SECTION:problems]
82
+ [HIGHLIGHT:#problems h2]
83
+ Let's talk about the problem. What goes wrong when AI builds APIs without structure?
84
+
85
+ [HIGHLIGHT:.gap-item:nth-child(1)]
86
+ Gap one: AI doesn't use your exact words. You say Vercel AI Gateway but it searches for Vercel AI SDK. Wrong library. Wrong documentation. Wrong code.
87
+
88
+ [HIGHLIGHT:.gap-item:nth-child(2)]
89
+ Gap two: AI claims files are updated without proof. It says I've updated the file but there's no git diff. No verification. You're trusting on faith.
90
+
91
+ [HIGHLIGHT:.gap-item:nth-child(3)]
92
+ Gap three: Skipped tests are accepted. The AI runs tests, some fail, and it moves on. We can fix those later. Those later fixes never come.
93
+
94
+ [HIGHLIGHT:.gap-item:nth-child(4)]
95
+ Gap four: Tasks marked complete without verification. The AI says Done but the feature doesn't work. No one actually checked.
96
+
97
+ [HIGHLIGHT:.gap-item:nth-child(5)]
98
+ Gap five: Environment variable mismatch. Tests pass locally but fail in production. The AI used different values than what's actually deployed.
99
+
100
+ These gaps compound. One wrong assumption leads to another. By the time you notice, you've built on a broken foundation.
101
+
102
+ [SECTION:solution]
103
+ [HIGHLIGHT:#solution h2]
104
+ The solution is enforcement. Python hooks that intercept every tool call.
105
+
106
+ [HIGHLIGHT:.solution-grid .solution-card:nth-child(1)]
107
+ PreToolUse hooks run before Claude can write or edit any file. They check: Did you research first? Did you interview the user? Did you write a failing test?
108
+
109
+ [HIGHLIGHT:.solution-grid .solution-card:nth-child(2)]
110
+ PostToolUse hooks run after research and interviews. They track what was learned. They log every query. They build a paper trail.
111
+
112
+ [HIGHLIGHT:.solution-grid .solution-card:nth-child(3)]
113
+ The Stop hook runs when Claude tries to mark a task complete. It checks: Are all phases done? Did tests pass? Is documentation updated? If not, blocked.
114
+
115
+ This isn't about limiting AI. It's about holding it to the same standards we hold ourselves.
116
+
117
+ [SECTION:workflow]
118
+ [HIGHLIGHT:#workflow h2]
119
+ The workflow has ten phases. Let's walk through each one.
120
+
121
+ [HIGHLIGHT:.phase-box[data-phase='1']]
122
+ Phase one: Scope. Define what you're building. What's the endpoint? What does it do?
123
+
124
+ [HIGHLIGHT:.phase-box[data-phase='2']]
125
+ Phase two: Initial research. Use Context7 or web search. Find the real documentation. No guessing.
126
+
127
+ [HIGHLIGHT:.phase-box[data-phase='3']]
128
+ Phase three: Interview. Ask the user questions with multiple choice options. What provider? What format? What error handling?
129
+
130
+ [HIGHLIGHT:.phase-box[data-phase='4']]
131
+ Phase four: Deep research. Based on interview answers, research specific APIs and SDKs.
132
+
133
+ [HIGHLIGHT:.phase-box[data-phase='5']]
134
+ Phase five: Schema design. Define request and response schemas with Zod. Types before code.
135
+
136
+ [HIGHLIGHT:.phase-box[data-phase='6']]
137
+ Phase six: Environment setup. Check API keys. Verify environment variables. Test connectivity.
138
+
139
+ [HIGHLIGHT:.phase-box[data-phase='7']]
140
+ Phase seven: Red. Write a failing test. Define what success looks like before writing any implementation.
141
+
142
+ [HIGHLIGHT:.phase-box[data-phase='8']]
143
+ Phase eight: Green. Write minimal code to pass the test. No extra features. No premature optimization.
144
+
145
+ [HIGHLIGHT:.phase-box[data-phase='9']]
146
+ Phase nine: Refactor. Clean up the code. Extract utilities. Improve readability. Tests stay green.
147
+
148
+ [HIGHLIGHT:.phase-box[data-phase='10']]
149
+ Phase ten: Documentation. Update OpenAPI spec. Add to test manifest. Include real examples.
150
+
151
+ Only when all ten phases are complete can the workflow finish.
152
+
153
+ [SECTION:installation]
154
+ [HIGHLIGHT:#installation h2]
155
+ Installation takes one command.
156
+
157
+ [HIGHLIGHT:.install-command]
158
+ Run npx @hustle-together/api-dev-tools. That's it.
159
+
160
+ The CLI copies slash commands to your .claude/commands folder. Red, green, refactor, cycle, and the API development commands.
161
+
162
+ It copies Python hooks to .claude/hooks. These are the enforcers. The gatekeepers.
163
+
164
+ It merges settings into your settings.json. Hooks are registered. Permissions are configured.
165
+
166
+ And it offers to add the Context7 MCP server for live documentation lookup.
167
+
168
+ Your project is now enforced. Every API you build follows the workflow.
169
+
170
+ [SECTION:credits]
171
+ [HIGHLIGHT:#credits h2]
172
+ This project builds on the work of others.
173
+
174
+ The TDD workflow commands are based on @wbern/claude-instructions by William Bernmalm. The red, green, refactor pattern that makes AI development rigorous.
175
+
176
+ The interview methodology is inspired by Anthropic's Interviewer approach. Structured discovery before implementation.
177
+
178
+ And Context7 provides live documentation lookup. Current docs, not stale training data.
179
+
180
+ Thank you to the Claude Code community. Together, we're making AI development better.
181
+
182
+ [SECTION:outro]
183
+ [HIGHLIGHT:#intro]
184
+ Hustle API Dev Tools. Research first. Interview second. Test before code. Document everything.
185
+
186
+ Build together. Share resources. Grow stronger.
187
+
188
+ Install it now with npx @hustle-together/api-dev-tools.
189
+ `.trim();
190
+
191
+ /**
192
+ * Extract plain text from the narration script (remove markers)
193
+ */
194
+ function extractPlainText(script) {
195
+ return script
196
+ .replace(/\[SECTION:[^\]]+\]/g, '')
197
+ .replace(/\[HIGHLIGHT:[^\]]+\]/g, '')
198
+ .replace(/\n{3,}/g, '\n\n')
199
+ .trim();
200
+ }
201
+
202
+ /**
203
+ * Parse the script to extract section and highlight markers with their positions
204
+ */
205
+ function parseMarkers(script) {
206
+ const markers = [];
207
+ const lines = script.split('\n');
208
+ let charPosition = 0;
209
+ let currentSection = 'intro';
210
+
211
+ for (const line of lines) {
212
+ // Check for section marker
213
+ const sectionMatch = line.match(/\[SECTION:([^\]]+)\]/);
214
+ if (sectionMatch) {
215
+ currentSection = sectionMatch[1];
216
+ markers.push({
217
+ type: 'section',
218
+ id: currentSection,
219
+ charPosition
220
+ });
221
+ }
222
+
223
+ // Check for highlight marker
224
+ const highlightMatch = line.match(/\[HIGHLIGHT:([^\]]+)\]/);
225
+ if (highlightMatch) {
226
+ markers.push({
227
+ type: 'highlight',
228
+ selector: highlightMatch[1],
229
+ section: currentSection,
230
+ charPosition
231
+ });
232
+ }
233
+
234
+ // Update char position (for plain text, not including markers)
235
+ const plainLine = line
236
+ .replace(/\[SECTION:[^\]]+\]/g, '')
237
+ .replace(/\[HIGHLIGHT:[^\]]+\]/g, '');
238
+ if (plainLine.trim()) {
239
+ charPosition += plainLine.length + 1; // +1 for newline
240
+ }
241
+ }
242
+
243
+ return markers;
244
+ }
245
+
246
+ /**
247
+ * Convert character-level timestamps to word-level timestamps
248
+ */
249
+ function characterToWordTimestamps(alignment) {
250
+ const words = [];
251
+ let currentWord = '';
252
+ let wordStart = null;
253
+ let wordEnd = null;
254
+
255
+ for (let i = 0; i < alignment.characters.length; i++) {
256
+ const char = alignment.characters[i];
257
+ const startTime = alignment.character_start_times_seconds[i];
258
+ const endTime = alignment.character_end_times_seconds[i];
259
+
260
+ if (char === ' ' || char === '\n') {
261
+ if (currentWord) {
262
+ words.push({
263
+ word: currentWord,
264
+ start: wordStart,
265
+ end: wordEnd,
266
+ charIndex: i - currentWord.length
267
+ });
268
+ currentWord = '';
269
+ wordStart = null;
270
+ wordEnd = null;
271
+ }
272
+ } else {
273
+ if (wordStart === null) {
274
+ wordStart = startTime;
275
+ }
276
+ wordEnd = endTime;
277
+ currentWord += char;
278
+ }
279
+ }
280
+
281
+ // Don't forget the last word
282
+ if (currentWord) {
283
+ words.push({
284
+ word: currentWord,
285
+ start: wordStart,
286
+ end: wordEnd,
287
+ charIndex: alignment.characters.length - currentWord.length
288
+ });
289
+ }
290
+
291
+ return words;
292
+ }
293
+
294
+ /**
295
+ * Match markers to timestamps based on text position
296
+ */
297
+ function matchMarkersToTimestamps(markers, wordTimestamps, plainText) {
298
+ const timedMarkers = [];
299
+
300
+ for (const marker of markers) {
301
+ // Find the word closest to this marker's position
302
+ let closestWord = wordTimestamps[0];
303
+ let minDiff = Infinity;
304
+
305
+ for (const word of wordTimestamps) {
306
+ const diff = Math.abs(word.charIndex - marker.charPosition);
307
+ if (diff < minDiff) {
308
+ minDiff = diff;
309
+ closestWord = word;
310
+ }
311
+ }
312
+
313
+ timedMarkers.push({
314
+ ...marker,
315
+ timestamp: closestWord ? closestWord.start : 0
316
+ });
317
+ }
318
+
319
+ return timedMarkers;
320
+ }
321
+
322
+ /**
323
+ * Call ElevenLabs API to generate speech with timestamps
324
+ */
325
+ async function generateSpeech(text, voiceId, apiKey) {
326
+ const url = `${API_BASE}/text-to-speech/${voiceId}/with-timestamps`;
327
+
328
+ const response = await fetch(url, {
329
+ method: 'POST',
330
+ headers: {
331
+ 'xi-api-key': apiKey,
332
+ 'Content-Type': 'application/json'
333
+ },
334
+ body: JSON.stringify({
335
+ text,
336
+ model_id: MODEL_ID,
337
+ voice_settings: {
338
+ stability: 0.5,
339
+ similarity_boost: 0.75,
340
+ style: 0.3,
341
+ use_speaker_boost: true
342
+ }
343
+ })
344
+ });
345
+
346
+ if (!response.ok) {
347
+ const error = await response.text();
348
+ throw new Error(`ElevenLabs API error: ${response.status} - ${error}`);
349
+ }
350
+
351
+ return response.json();
352
+ }
353
+
354
+ /**
355
+ * Generate narration for a single voice
356
+ */
357
+ async function generateForVoice(voice, plainText, markers, apiKey, outputDir) {
358
+ console.log(`\n=== Generating narration for ${voice.displayName} ===`);
359
+ console.log(`Voice ID: ${voice.id}`);
360
+
361
+ const audioPath = path.join(outputDir, `narration-${voice.name}.mp3`);
362
+ const timingPath = path.join(outputDir, `narration-${voice.name}-timing.json`);
363
+
364
+ try {
365
+ // Generate speech with timestamps
366
+ const result = await generateSpeech(plainText, voice.id, apiKey);
367
+
368
+ console.log(`Audio generated successfully!`);
369
+
370
+ // Decode and save audio
371
+ const audioBuffer = Buffer.from(result.audio_base64, 'base64');
372
+ fs.writeFileSync(audioPath, audioBuffer);
373
+ console.log(`Audio saved: ${audioPath}`);
374
+ console.log(`Audio size: ${(audioBuffer.length / 1024 / 1024).toFixed(2)} MB`);
375
+
376
+ // Convert character timestamps to word timestamps
377
+ const wordTimestamps = characterToWordTimestamps(result.alignment);
378
+ console.log(`Extracted ${wordTimestamps.length} word timestamps`);
379
+
380
+ // Match markers to timestamps
381
+ const timedMarkers = matchMarkersToTimestamps(markers, wordTimestamps, plainText);
382
+
383
+ // Calculate duration
384
+ const lastWord = wordTimestamps[wordTimestamps.length - 1];
385
+ const duration = lastWord ? lastWord.end : 0;
386
+
387
+ // Build timing data
388
+ const timingData = {
389
+ generated: new Date().toISOString(),
390
+ voice: voice.displayName,
391
+ voiceId: voice.id,
392
+ duration,
393
+ wordCount: wordTimestamps.length,
394
+ sections: [],
395
+ highlights: [],
396
+ words: wordTimestamps.map(w => ({
397
+ word: w.word,
398
+ start: w.start,
399
+ end: w.end
400
+ }))
401
+ };
402
+
403
+ // Separate sections and highlights
404
+ for (const marker of timedMarkers) {
405
+ if (marker.type === 'section') {
406
+ timingData.sections.push({
407
+ id: marker.id,
408
+ timestamp: marker.timestamp
409
+ });
410
+ } else if (marker.type === 'highlight') {
411
+ timingData.highlights.push({
412
+ selector: marker.selector,
413
+ section: marker.section,
414
+ timestamp: marker.timestamp
415
+ });
416
+ }
417
+ }
418
+
419
+ // Save timing data
420
+ fs.writeFileSync(timingPath, JSON.stringify(timingData, null, 2));
421
+ console.log(`Timing saved: ${timingPath}`);
422
+
423
+ console.log(`Duration: ${duration.toFixed(1)} seconds`);
424
+ console.log(`Sections: ${timingData.sections.length}`);
425
+ console.log(`Highlights: ${timingData.highlights.length}`);
426
+
427
+ return {
428
+ voice: voice.displayName,
429
+ name: voice.name,
430
+ audioFile: `narration-${voice.name}.mp3`,
431
+ timingFile: `narration-${voice.name}-timing.json`,
432
+ duration,
433
+ wordCount: wordTimestamps.length,
434
+ audioSize: audioBuffer.length
435
+ };
436
+
437
+ } catch (error) {
438
+ console.error(`Error generating for ${voice.displayName}: ${error.message}`);
439
+ return null;
440
+ }
441
+ }
442
+
443
+ /**
444
+ * Main function
445
+ */
446
+ async function main() {
447
+ const apiKey = process.env.ELEVENLABS_API_KEY;
448
+
449
+ if (!apiKey) {
450
+ console.error('Error: ELEVENLABS_API_KEY environment variable is required');
451
+ console.error('Usage: ELEVENLABS_API_KEY=your_key node generate-all-narrations.js');
452
+ process.exit(1);
453
+ }
454
+
455
+ // Parse command line args for specific voice
456
+ const args = process.argv.slice(2);
457
+ const specificVoice = args[0];
458
+
459
+ const outputDir = __dirname;
460
+
461
+ // Extract plain text for TTS
462
+ const plainText = extractPlainText(NARRATION_SCRIPT);
463
+ console.log('Plain text extracted:', plainText.substring(0, 100) + '...');
464
+ console.log(`Total characters: ${plainText.length}`);
465
+
466
+ // Parse markers from script
467
+ const markers = parseMarkers(NARRATION_SCRIPT);
468
+ console.log(`Found ${markers.length} markers`);
469
+
470
+ // Filter voices if specific one requested
471
+ let voicesToGenerate = VOICES;
472
+ if (specificVoice) {
473
+ voicesToGenerate = VOICES.filter(v =>
474
+ v.name.toLowerCase() === specificVoice.toLowerCase() ||
475
+ v.displayName.toLowerCase() === specificVoice.toLowerCase()
476
+ );
477
+ if (voicesToGenerate.length === 0) {
478
+ console.error(`Voice "${specificVoice}" not found. Available: ${VOICES.map(v => v.name).join(', ')}`);
479
+ process.exit(1);
480
+ }
481
+ }
482
+
483
+ console.log(`\nGenerating ${voicesToGenerate.length} voice narration(s)...`);
484
+
485
+ const results = [];
486
+
487
+ for (const voice of voicesToGenerate) {
488
+ const result = await generateForVoice(voice, plainText, markers, apiKey, outputDir);
489
+ if (result) {
490
+ results.push(result);
491
+ }
492
+
493
+ // Add delay between API calls to avoid rate limiting
494
+ if (voicesToGenerate.indexOf(voice) < voicesToGenerate.length - 1) {
495
+ console.log('\nWaiting 5 seconds before next voice...');
496
+ await new Promise(resolve => setTimeout(resolve, 5000));
497
+ }
498
+ }
499
+
500
+ // Write manifest
501
+ const manifestPath = path.join(outputDir, 'voices-manifest.json');
502
+ fs.writeFileSync(manifestPath, JSON.stringify({
503
+ generated: new Date().toISOString(),
504
+ voices: results
505
+ }, null, 2));
506
+
507
+ console.log('\n=== SUMMARY ===');
508
+ console.log(`Generated ${results.length}/${voicesToGenerate.length} voice narrations`);
509
+ console.log(`Manifest: ${manifestPath}`);
510
+
511
+ for (const result of results) {
512
+ console.log(` ${result.voice}: ${result.duration.toFixed(1)}s, ${(result.audioSize / 1024 / 1024).toFixed(2)} MB`);
513
+ }
514
+ }
515
+
516
+ main();
@@ -0,0 +1,140 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * Generate voice preview clips with different ElevenLabs voices
4
+ *
5
+ * Usage: ELEVENLABS_API_KEY=your_key node generate-voice-previews.js
6
+ *
7
+ * Output: Creates preview MP3 files in ./previews/ directory
8
+ */
9
+
10
+ const fs = require('fs');
11
+ const path = require('path');
12
+
13
+ // ElevenLabs API configuration
14
+ const API_BASE = 'https://api.elevenlabs.io/v1';
15
+ const MODEL_ID = 'eleven_turbo_v2_5'; // Fast, high-quality model
16
+
17
+ // Voice configurations
18
+ const VOICES = [
19
+ {
20
+ id: 'UgBBYS2sOqTuMpoF3BR0',
21
+ name: 'mark',
22
+ displayName: 'Mark'
23
+ },
24
+ {
25
+ id: 'tnSpp4vdxKPjI9w0GnoV',
26
+ name: 'hope',
27
+ displayName: 'Hope'
28
+ },
29
+ {
30
+ id: 'Z7RrOqZFTyLpIlzCgfsp',
31
+ name: 'creature',
32
+ displayName: 'Creature'
33
+ },
34
+ {
35
+ id: 'YOq2y2Up4RgXP2HyXjE5',
36
+ name: 'gaming',
37
+ displayName: 'Gaming'
38
+ }
39
+ ];
40
+
41
+ // Preview text - short snippet that showcases the voice
42
+ const PREVIEW_TEXT = `Welcome to Hustle API Dev Tools. Build APIs the right way. Research first. Interview second. Test before code. Document everything.`;
43
+
44
+ /**
45
+ * Generate speech for a single voice
46
+ */
47
+ async function generateVoicePreview(voice, text, apiKey) {
48
+ const url = `${API_BASE}/text-to-speech/${voice.id}`;
49
+
50
+ console.log(`Generating preview for ${voice.displayName}...`);
51
+
52
+ const response = await fetch(url, {
53
+ method: 'POST',
54
+ headers: {
55
+ 'xi-api-key': apiKey,
56
+ 'Content-Type': 'application/json',
57
+ 'Accept': 'audio/mpeg'
58
+ },
59
+ body: JSON.stringify({
60
+ text,
61
+ model_id: MODEL_ID,
62
+ voice_settings: {
63
+ stability: 0.5,
64
+ similarity_boost: 0.75,
65
+ style: 0.3,
66
+ use_speaker_boost: true
67
+ }
68
+ })
69
+ });
70
+
71
+ if (!response.ok) {
72
+ const error = await response.text();
73
+ throw new Error(`ElevenLabs API error for ${voice.name}: ${response.status} - ${error}`);
74
+ }
75
+
76
+ // Get audio as buffer
77
+ const arrayBuffer = await response.arrayBuffer();
78
+ return Buffer.from(arrayBuffer);
79
+ }
80
+
81
+ /**
82
+ * Main function
83
+ */
84
+ async function main() {
85
+ const apiKey = process.env.ELEVENLABS_API_KEY;
86
+
87
+ if (!apiKey) {
88
+ console.error('Error: ELEVENLABS_API_KEY environment variable is required');
89
+ console.error('Usage: ELEVENLABS_API_KEY=your_key node generate-voice-previews.js');
90
+ process.exit(1);
91
+ }
92
+
93
+ const outputDir = path.join(__dirname, 'previews');
94
+
95
+ // Ensure output directory exists
96
+ if (!fs.existsSync(outputDir)) {
97
+ fs.mkdirSync(outputDir, { recursive: true });
98
+ }
99
+
100
+ console.log('Generating voice previews...');
101
+ console.log(`Text: "${PREVIEW_TEXT}"`);
102
+ console.log('');
103
+
104
+ const results = [];
105
+
106
+ for (const voice of VOICES) {
107
+ try {
108
+ const audioBuffer = await generateVoicePreview(voice, PREVIEW_TEXT, apiKey);
109
+ const outputPath = path.join(outputDir, `preview-${voice.name}.mp3`);
110
+ fs.writeFileSync(outputPath, audioBuffer);
111
+
112
+ const sizeMB = (audioBuffer.length / 1024 / 1024).toFixed(2);
113
+ console.log(` ✓ ${voice.displayName}: ${outputPath} (${sizeMB} MB)`);
114
+
115
+ results.push({
116
+ voice: voice.displayName,
117
+ id: voice.id,
118
+ file: `preview-${voice.name}.mp3`,
119
+ size: audioBuffer.length
120
+ });
121
+ } catch (error) {
122
+ console.error(` ✗ ${voice.displayName}: ${error.message}`);
123
+ }
124
+ }
125
+
126
+ // Write manifest
127
+ const manifestPath = path.join(outputDir, 'manifest.json');
128
+ fs.writeFileSync(manifestPath, JSON.stringify({
129
+ generated: new Date().toISOString(),
130
+ text: PREVIEW_TEXT,
131
+ voices: results
132
+ }, null, 2));
133
+
134
+ console.log('');
135
+ console.log(`=== Done ===`);
136
+ console.log(`Generated ${results.length}/${VOICES.length} voice previews`);
137
+ console.log(`Manifest: ${manifestPath}`);
138
+ }
139
+
140
+ main();