@hustle-together/api-dev-tools 3.3.0 → 3.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +712 -377
- package/commands/api-create.md +68 -23
- package/demo/hustle-together/blog/gemini-vs-claude-widgets.html +1 -1
- package/demo/hustle-together/blog/interview-driven-api-development.html +1 -1
- package/demo/hustle-together/blog/tdd-for-ai.html +1 -1
- package/demo/hustle-together/index.html +2 -2
- package/demo/workflow-demo-v3.5-backup.html +5008 -0
- package/demo/workflow-demo.html +5137 -3805
- package/hooks/enforce-deep-research.py +6 -1
- package/hooks/enforce-disambiguation.py +7 -1
- package/hooks/enforce-documentation.py +6 -1
- package/hooks/enforce-environment.py +5 -1
- package/hooks/enforce-interview.py +5 -1
- package/hooks/enforce-refactor.py +3 -1
- package/hooks/enforce-schema.py +0 -0
- package/hooks/enforce-scope.py +5 -1
- package/hooks/enforce-tdd-red.py +5 -1
- package/hooks/enforce-verify.py +0 -0
- package/hooks/track-tool-use.py +167 -0
- package/hooks/verify-implementation.py +0 -0
- package/package.json +1 -1
- package/templates/api-dev-state.json +24 -0
- package/demo/audio/audio-sync.js +0 -295
- package/demo/audio/generate-all-narrations.js +0 -581
- package/demo/audio/generate-narration.js +0 -486
- package/demo/audio/generate-voice-previews.js +0 -140
- package/demo/audio/narration-adam-timing.json +0 -4675
- package/demo/audio/narration-adam.mp3 +0 -0
- package/demo/audio/narration-creature-timing.json +0 -4675
- package/demo/audio/narration-creature.mp3 +0 -0
- package/demo/audio/narration-gaming-timing.json +0 -4675
- package/demo/audio/narration-gaming.mp3 +0 -0
- package/demo/audio/narration-hope-timing.json +0 -4675
- package/demo/audio/narration-hope.mp3 +0 -0
- package/demo/audio/narration-mark-timing.json +0 -4675
- package/demo/audio/narration-mark.mp3 +0 -0
- package/demo/audio/narration-timing.json +0 -3614
- package/demo/audio/narration-timing.sample.json +0 -48
- package/demo/audio/narration.mp3 +0 -0
- package/demo/audio/previews/manifest.json +0 -30
- package/demo/audio/previews/preview-creature.mp3 +0 -0
- package/demo/audio/previews/preview-gaming.mp3 +0 -0
- package/demo/audio/previews/preview-hope.mp3 +0 -0
- package/demo/audio/previews/preview-mark.mp3 +0 -0
- package/demo/audio/voices-manifest.json +0 -50
|
@@ -1,486 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
/**
|
|
3
|
-
* Generate narration audio with word-level timestamps using ElevenLabs API
|
|
4
|
-
*
|
|
5
|
-
* Usage: ELEVENLABS_API_KEY=your_key node generate-narration.js
|
|
6
|
-
*
|
|
7
|
-
* Output:
|
|
8
|
-
* - narration.mp3 - The audio file
|
|
9
|
-
* - narration-timing.json - Word timestamps with highlight triggers
|
|
10
|
-
*/
|
|
11
|
-
|
|
12
|
-
const fs = require('fs');
|
|
13
|
-
const path = require('path');
|
|
14
|
-
|
|
15
|
-
// ElevenLabs API configuration
|
|
16
|
-
const API_BASE = 'https://api.elevenlabs.io/v1';
|
|
17
|
-
const VOICE_ID = 'pNInz6obpgDQGcFmaJgB'; // Adam - deep, professional voice
|
|
18
|
-
const MODEL_ID = 'eleven_turbo_v2_5'; // Fast, high-quality model
|
|
19
|
-
|
|
20
|
-
// The narration script with section markers and highlight triggers
|
|
21
|
-
// Format: [SECTION:id] marks a new section, [HIGHLIGHT:element-selector] marks what to highlight
|
|
22
|
-
const NARRATION_SCRIPT = `
|
|
23
|
-
[SECTION:intro]
|
|
24
|
-
Welcome to Hustle API Dev Tools, version three point oh.
|
|
25
|
-
|
|
26
|
-
[HIGHLIGHT:#hustleBrand]
|
|
27
|
-
This package enforces a structured, twelve-phase workflow for AI-assisted API development.
|
|
28
|
-
|
|
29
|
-
[HIGHLIGHT:[data-phase="research"]]
|
|
30
|
-
Research first. No assumptions. No training data. Real documentation from Context7 and web search.
|
|
31
|
-
|
|
32
|
-
[HIGHLIGHT:[data-phase="interview"]]
|
|
33
|
-
Interview second. The AI asks YOU questions with structured options based on what it actually found.
|
|
34
|
-
|
|
35
|
-
[HIGHLIGHT:[data-phase="test"]]
|
|
36
|
-
Test before code. Red, green, refactor. No implementation without a failing test.
|
|
37
|
-
|
|
38
|
-
[HIGHLIGHT:[data-phase="docs"]]
|
|
39
|
-
Document everything. Every endpoint documented with real examples and schemas.
|
|
40
|
-
|
|
41
|
-
The philosophy is simple: Hustle together. Share resources. Build stronger.
|
|
42
|
-
|
|
43
|
-
[SECTION:problems]
|
|
44
|
-
[HIGHLIGHT:#problems h2]
|
|
45
|
-
Let's talk about the problem. What goes wrong when AI builds APIs without structure?
|
|
46
|
-
|
|
47
|
-
[HIGHLIGHT:.gap-item:nth-child(1)]
|
|
48
|
-
Gap one: AI doesn't use your exact words. You say Brandfetch API but it searches for something else. Wrong library. Wrong documentation.
|
|
49
|
-
|
|
50
|
-
[HIGHLIGHT:.gap-item:nth-child(2)]
|
|
51
|
-
Gap two: Generic questions. Without research first, the AI asks template questions instead of specific ones based on actual API capabilities.
|
|
52
|
-
|
|
53
|
-
[HIGHLIGHT:.gap-item:nth-child(3)]
|
|
54
|
-
Gap three: Memory-based implementation. After research, the AI forgets what it learned and implements from training data instead.
|
|
55
|
-
|
|
56
|
-
[HIGHLIGHT:.gap-item:nth-child(4)]
|
|
57
|
-
Gap four: No verification after tests pass. The AI writes code that passes tests but doesn't match the actual documentation.
|
|
58
|
-
|
|
59
|
-
[HIGHLIGHT:.gap-item:nth-child(5)]
|
|
60
|
-
Gap five: Context dilution. After many turns, the AI forgets project structure, documentation locations, and workflow requirements.
|
|
61
|
-
|
|
62
|
-
These gaps compound. Version three solves all of them with loop-back architecture and continuous re-grounding.
|
|
63
|
-
|
|
64
|
-
[SECTION:solution]
|
|
65
|
-
[HIGHLIGHT:#solution h2]
|
|
66
|
-
The solution is enforcement. Python hooks that intercept every tool call.
|
|
67
|
-
|
|
68
|
-
[HIGHLIGHT:.hook-box:nth-child(1)]
|
|
69
|
-
PreToolUse hooks run before Claude writes any file. They inject interview decisions as reminders and block writes without research.
|
|
70
|
-
|
|
71
|
-
[HIGHLIGHT:.hook-box:nth-child(2)]
|
|
72
|
-
PostToolUse hooks track tool usage and trigger verification. After tests pass, they force Phase nine: re-read the documentation.
|
|
73
|
-
|
|
74
|
-
[HIGHLIGHT:.hook-box:nth-child(3)]
|
|
75
|
-
The Stop hook blocks completion if any phase is incomplete. No more premature "done" declarations.
|
|
76
|
-
|
|
77
|
-
[HIGHLIGHT:.hook-box:nth-child(4)]
|
|
78
|
-
SessionStart and periodic hooks re-inject context every seven turns to prevent dilution in long sessions.
|
|
79
|
-
|
|
80
|
-
This isn't about limiting AI. It's about holding it to the same standards we hold ourselves.
|
|
81
|
-
|
|
82
|
-
[SECTION:phases]
|
|
83
|
-
[HIGHLIGHT:#phases h2]
|
|
84
|
-
The workflow now has twelve phases. Two new ones in version three.
|
|
85
|
-
|
|
86
|
-
[HIGHLIGHT:[data-phase="0"]]
|
|
87
|
-
Phase zero: Disambiguation. When you say Vercel AI, do you mean the SDK or the Gateway? We clarify before researching.
|
|
88
|
-
|
|
89
|
-
[HIGHLIGHT:[data-phase="1"]]
|
|
90
|
-
Phase one: Scope. Confirm we understand what you want to build.
|
|
91
|
-
|
|
92
|
-
[HIGHLIGHT:[data-phase="2"]]
|
|
93
|
-
Phase two: Initial research. Context7 and web search. Find the real documentation.
|
|
94
|
-
|
|
95
|
-
[HIGHLIGHT:[data-phase="3"]]
|
|
96
|
-
Phase three: Interview. Questions generated FROM research findings. Not generic templates.
|
|
97
|
-
|
|
98
|
-
[HIGHLIGHT:[data-phase="4"]]
|
|
99
|
-
Phase four: Deep research. Based on your interview answers, we propose targeted follow-up searches. Adaptive, not shotgun.
|
|
100
|
-
|
|
101
|
-
[HIGHLIGHT:[data-phase="5"]]
|
|
102
|
-
Phase five: Schema. Define Zod schemas based on research plus interview decisions.
|
|
103
|
-
|
|
104
|
-
[HIGHLIGHT:[data-phase="6"]]
|
|
105
|
-
Phase six: Environment. Verify API keys exist before writing code.
|
|
106
|
-
|
|
107
|
-
[HIGHLIGHT:[data-phase="7"]]
|
|
108
|
-
Phase seven: TDD Red. Write failing tests. Define success before implementation.
|
|
109
|
-
|
|
110
|
-
[HIGHLIGHT:[data-phase="8"]]
|
|
111
|
-
Phase eight: TDD Green. Minimal code to pass tests. Interview decisions injected by hooks.
|
|
112
|
-
|
|
113
|
-
[HIGHLIGHT:[data-phase="9"]]
|
|
114
|
-
Phase nine: Verify. This is new. Re-read the original documentation and compare to implementation. Find gaps. Loop back if needed.
|
|
115
|
-
|
|
116
|
-
[HIGHLIGHT:[data-phase="10"]]
|
|
117
|
-
Phase ten: Refactor. Clean up code while tests stay green.
|
|
118
|
-
|
|
119
|
-
[HIGHLIGHT:[data-phase="11"]]
|
|
120
|
-
Phase eleven: Documentation. Update OpenAPI spec and test manifest.
|
|
121
|
-
|
|
122
|
-
[HIGHLIGHT:[data-phase="12"]]
|
|
123
|
-
Phase twelve: Complete. Final verification by the Stop hook.
|
|
124
|
-
|
|
125
|
-
Every phase can loop back. If verification finds gaps, we return to Red and write tests for missing features.
|
|
126
|
-
|
|
127
|
-
[SECTION:demo]
|
|
128
|
-
[HIGHLIGHT:#demo h2]
|
|
129
|
-
Let's watch a real example. Creating a Brandfetch API endpoint.
|
|
130
|
-
|
|
131
|
-
[HIGHLIGHT:[data-step="0"]]
|
|
132
|
-
The user types /api-create brandfetch. The twelve-phase workflow begins.
|
|
133
|
-
|
|
134
|
-
[HIGHLIGHT:[data-step="1"]]
|
|
135
|
-
Claude confirms scope. We're building an endpoint to fetch brand assets by domain.
|
|
136
|
-
|
|
137
|
-
[HIGHLIGHT:[data-step="2"]]
|
|
138
|
-
Initial research. Claude uses Context7 to find the SDK documentation. WebSearch finds rate limits and response formats.
|
|
139
|
-
|
|
140
|
-
[HIGHLIGHT:[data-step="3"]]
|
|
141
|
-
Interview begins. But notice: the questions are specific to what Claude actually found. What's the primary purpose? Options come from the documentation.
|
|
142
|
-
|
|
143
|
-
[HIGHLIGHT:[data-step="4"]]
|
|
144
|
-
User selects: Full brand kit with logos, colors, and fonts.
|
|
145
|
-
|
|
146
|
-
[HIGHLIGHT:[data-step="5"]]
|
|
147
|
-
More questions. How should API keys be handled? User selects server environment variables only.
|
|
148
|
-
|
|
149
|
-
[HIGHLIGHT:[data-step="7"]]
|
|
150
|
-
Deep research. Based on your selections, Claude proposes specific searches for the full brand response format.
|
|
151
|
-
|
|
152
|
-
[HIGHLIGHT:[data-step="8"]]
|
|
153
|
-
Schema created. Zod types based on research plus interview decisions.
|
|
154
|
-
|
|
155
|
-
[HIGHLIGHT:[data-step="9"]]
|
|
156
|
-
Environment check. The hook verifies BRANDFETCH_API_KEY exists.
|
|
157
|
-
|
|
158
|
-
[HIGHLIGHT:[data-step="10"]]
|
|
159
|
-
TDD Red. Claude writes twelve failing test cases.
|
|
160
|
-
|
|
161
|
-
[HIGHLIGHT:[data-step="11"]]
|
|
162
|
-
TDD Green. Implementation begins. Watch the hook inject interview decisions.
|
|
163
|
-
|
|
164
|
-
[HIGHLIGHT:[data-step="12"]]
|
|
165
|
-
The hook reminds Claude: Remember user decisions. Purpose: full brand kit. API key handling: server only.
|
|
166
|
-
|
|
167
|
-
[HIGHLIGHT:[data-step="13"]]
|
|
168
|
-
Phase nine: Verify. Claude re-reads the Brandfetch documentation and compares to implementation. All features accounted for.
|
|
169
|
-
|
|
170
|
-
[HIGHLIGHT:[data-step="14"]]
|
|
171
|
-
Refactor. Code cleaned up. Tests still pass.
|
|
172
|
-
|
|
173
|
-
[HIGHLIGHT:[data-step="15"]]
|
|
174
|
-
Documentation updated. API test manifest and OpenAPI spec.
|
|
175
|
-
|
|
176
|
-
[HIGHLIGHT:[data-step="16"]]
|
|
177
|
-
Complete. All twelve phases verified. Four files created. Twelve tests passing.
|
|
178
|
-
|
|
179
|
-
[SECTION:installation]
|
|
180
|
-
[HIGHLIGHT:#installation h2]
|
|
181
|
-
Installation takes one command.
|
|
182
|
-
|
|
183
|
-
[HIGHLIGHT:.install-command]
|
|
184
|
-
Run npx @hustle-together/api-dev-tools. That's it.
|
|
185
|
-
|
|
186
|
-
The CLI copies slash commands, Python hooks, and settings. It creates the research cache folder and updates your CLAUDE.md with workflow documentation.
|
|
187
|
-
|
|
188
|
-
Version three adds automatic CLAUDE.md updates so Claude understands the workflow in your project.
|
|
189
|
-
|
|
190
|
-
Your project is now enforced. Every API follows the twelve-phase workflow.
|
|
191
|
-
|
|
192
|
-
[SECTION:credits]
|
|
193
|
-
[HIGHLIGHT:#credits h2]
|
|
194
|
-
This project builds on the work of others.
|
|
195
|
-
|
|
196
|
-
The TDD workflow is based on @wbern/claude-instructions by William Bernmalm.
|
|
197
|
-
|
|
198
|
-
Context7 provides live documentation lookup. Current docs, not stale training data.
|
|
199
|
-
|
|
200
|
-
And the interview methodology ensures questions come from research, not templates.
|
|
201
|
-
|
|
202
|
-
Thank you to the Claude Code community. Together, we're making AI development better.
|
|
203
|
-
|
|
204
|
-
[SECTION:outro]
|
|
205
|
-
[HIGHLIGHT:#intro]
|
|
206
|
-
Hustle API Dev Tools version three. Twelve phases. Loop-back architecture. Continuous verification.
|
|
207
|
-
|
|
208
|
-
Research first. Questions FROM findings. Verify after green. Document always.
|
|
209
|
-
|
|
210
|
-
Install it now with npx @hustle-together/api-dev-tools.
|
|
211
|
-
`.trim();
|
|
212
|
-
|
|
213
|
-
/**
|
|
214
|
-
* Extract plain text from the narration script (remove markers)
|
|
215
|
-
*/
|
|
216
|
-
function extractPlainText(script) {
|
|
217
|
-
return script
|
|
218
|
-
.replace(/\[SECTION:[^\]]+\]/g, '')
|
|
219
|
-
.replace(/\[HIGHLIGHT:[^\]]+\]/g, '')
|
|
220
|
-
.replace(/\n{3,}/g, '\n\n')
|
|
221
|
-
.trim();
|
|
222
|
-
}
|
|
223
|
-
|
|
224
|
-
/**
|
|
225
|
-
* Parse the script to extract section and highlight markers with their positions
|
|
226
|
-
*/
|
|
227
|
-
function parseMarkers(script) {
|
|
228
|
-
const markers = [];
|
|
229
|
-
const lines = script.split('\n');
|
|
230
|
-
let charPosition = 0;
|
|
231
|
-
let currentSection = 'intro';
|
|
232
|
-
|
|
233
|
-
for (const line of lines) {
|
|
234
|
-
// Check for section marker
|
|
235
|
-
const sectionMatch = line.match(/\[SECTION:([^\]]+)\]/);
|
|
236
|
-
if (sectionMatch) {
|
|
237
|
-
currentSection = sectionMatch[1];
|
|
238
|
-
markers.push({
|
|
239
|
-
type: 'section',
|
|
240
|
-
id: currentSection,
|
|
241
|
-
charPosition
|
|
242
|
-
});
|
|
243
|
-
}
|
|
244
|
-
|
|
245
|
-
// Check for highlight marker
|
|
246
|
-
// Use a more robust regex that handles nested brackets like [data-phase="0"]
|
|
247
|
-
const highlightMatch = line.match(/\[HIGHLIGHT:(.+?)\]$/);
|
|
248
|
-
if (highlightMatch) {
|
|
249
|
-
// Extract the selector - handle double brackets for attribute selectors
|
|
250
|
-
let selector = highlightMatch[1];
|
|
251
|
-
// If selector starts with [ but doesn't end with ], add the closing bracket
|
|
252
|
-
if (selector.startsWith('[') && !selector.endsWith(']')) {
|
|
253
|
-
selector = selector + ']';
|
|
254
|
-
}
|
|
255
|
-
markers.push({
|
|
256
|
-
type: 'highlight',
|
|
257
|
-
selector: selector,
|
|
258
|
-
section: currentSection,
|
|
259
|
-
charPosition
|
|
260
|
-
});
|
|
261
|
-
}
|
|
262
|
-
|
|
263
|
-
// Update char position (for plain text, not including markers)
|
|
264
|
-
const plainLine = line
|
|
265
|
-
.replace(/\[SECTION:[^\]]+\]/g, '')
|
|
266
|
-
.replace(/\[HIGHLIGHT:[^\]]+\]/g, '');
|
|
267
|
-
if (plainLine.trim()) {
|
|
268
|
-
charPosition += plainLine.length + 1; // +1 for newline
|
|
269
|
-
}
|
|
270
|
-
}
|
|
271
|
-
|
|
272
|
-
return markers;
|
|
273
|
-
}
|
|
274
|
-
|
|
275
|
-
/**
|
|
276
|
-
* Convert character-level timestamps to word-level timestamps
|
|
277
|
-
*/
|
|
278
|
-
function characterToWordTimestamps(alignment) {
|
|
279
|
-
const words = [];
|
|
280
|
-
let currentWord = '';
|
|
281
|
-
let wordStart = null;
|
|
282
|
-
let wordEnd = null;
|
|
283
|
-
|
|
284
|
-
for (let i = 0; i < alignment.characters.length; i++) {
|
|
285
|
-
const char = alignment.characters[i];
|
|
286
|
-
const startTime = alignment.character_start_times_seconds[i];
|
|
287
|
-
const endTime = alignment.character_end_times_seconds[i];
|
|
288
|
-
|
|
289
|
-
if (char === ' ' || char === '\n') {
|
|
290
|
-
if (currentWord) {
|
|
291
|
-
words.push({
|
|
292
|
-
word: currentWord,
|
|
293
|
-
start: wordStart,
|
|
294
|
-
end: wordEnd,
|
|
295
|
-
charIndex: i - currentWord.length
|
|
296
|
-
});
|
|
297
|
-
currentWord = '';
|
|
298
|
-
wordStart = null;
|
|
299
|
-
wordEnd = null;
|
|
300
|
-
}
|
|
301
|
-
} else {
|
|
302
|
-
if (wordStart === null) {
|
|
303
|
-
wordStart = startTime;
|
|
304
|
-
}
|
|
305
|
-
wordEnd = endTime;
|
|
306
|
-
currentWord += char;
|
|
307
|
-
}
|
|
308
|
-
}
|
|
309
|
-
|
|
310
|
-
// Don't forget the last word
|
|
311
|
-
if (currentWord) {
|
|
312
|
-
words.push({
|
|
313
|
-
word: currentWord,
|
|
314
|
-
start: wordStart,
|
|
315
|
-
end: wordEnd,
|
|
316
|
-
charIndex: alignment.characters.length - currentWord.length
|
|
317
|
-
});
|
|
318
|
-
}
|
|
319
|
-
|
|
320
|
-
return words;
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
/**
|
|
324
|
-
* Match markers to timestamps based on text position
|
|
325
|
-
*/
|
|
326
|
-
function matchMarkersToTimestamps(markers, wordTimestamps, plainText) {
|
|
327
|
-
const timedMarkers = [];
|
|
328
|
-
|
|
329
|
-
for (const marker of markers) {
|
|
330
|
-
// Find the word closest to this marker's position
|
|
331
|
-
let closestWord = wordTimestamps[0];
|
|
332
|
-
let minDiff = Infinity;
|
|
333
|
-
|
|
334
|
-
for (const word of wordTimestamps) {
|
|
335
|
-
const diff = Math.abs(word.charIndex - marker.charPosition);
|
|
336
|
-
if (diff < minDiff) {
|
|
337
|
-
minDiff = diff;
|
|
338
|
-
closestWord = word;
|
|
339
|
-
}
|
|
340
|
-
}
|
|
341
|
-
|
|
342
|
-
timedMarkers.push({
|
|
343
|
-
...marker,
|
|
344
|
-
timestamp: closestWord ? closestWord.start : 0
|
|
345
|
-
});
|
|
346
|
-
}
|
|
347
|
-
|
|
348
|
-
return timedMarkers;
|
|
349
|
-
}
|
|
350
|
-
|
|
351
|
-
/**
|
|
352
|
-
* Call ElevenLabs API to generate speech with timestamps
|
|
353
|
-
*/
|
|
354
|
-
async function generateSpeech(text, apiKey) {
|
|
355
|
-
const url = `${API_BASE}/text-to-speech/${VOICE_ID}/with-timestamps`;
|
|
356
|
-
|
|
357
|
-
console.log('Calling ElevenLabs API...');
|
|
358
|
-
console.log(`Text length: ${text.length} characters`);
|
|
359
|
-
|
|
360
|
-
const response = await fetch(url, {
|
|
361
|
-
method: 'POST',
|
|
362
|
-
headers: {
|
|
363
|
-
'xi-api-key': apiKey,
|
|
364
|
-
'Content-Type': 'application/json'
|
|
365
|
-
},
|
|
366
|
-
body: JSON.stringify({
|
|
367
|
-
text,
|
|
368
|
-
model_id: MODEL_ID,
|
|
369
|
-
voice_settings: {
|
|
370
|
-
stability: 0.5,
|
|
371
|
-
similarity_boost: 0.75,
|
|
372
|
-
style: 0.3,
|
|
373
|
-
use_speaker_boost: true
|
|
374
|
-
}
|
|
375
|
-
})
|
|
376
|
-
});
|
|
377
|
-
|
|
378
|
-
if (!response.ok) {
|
|
379
|
-
const error = await response.text();
|
|
380
|
-
throw new Error(`ElevenLabs API error: ${response.status} - ${error}`);
|
|
381
|
-
}
|
|
382
|
-
|
|
383
|
-
return response.json();
|
|
384
|
-
}
|
|
385
|
-
|
|
386
|
-
/**
|
|
387
|
-
* Main function
|
|
388
|
-
*/
|
|
389
|
-
async function main() {
|
|
390
|
-
const apiKey = process.env.ELEVENLABS_API_KEY;
|
|
391
|
-
|
|
392
|
-
if (!apiKey) {
|
|
393
|
-
console.error('Error: ELEVENLABS_API_KEY environment variable is required');
|
|
394
|
-
console.error('Usage: ELEVENLABS_API_KEY=your_key node generate-narration.js');
|
|
395
|
-
process.exit(1);
|
|
396
|
-
}
|
|
397
|
-
|
|
398
|
-
const outputDir = __dirname;
|
|
399
|
-
const audioPath = path.join(outputDir, 'narration.mp3');
|
|
400
|
-
const timingPath = path.join(outputDir, 'narration-timing.json');
|
|
401
|
-
|
|
402
|
-
// Extract plain text for TTS
|
|
403
|
-
const plainText = extractPlainText(NARRATION_SCRIPT);
|
|
404
|
-
console.log('Plain text extracted:', plainText.substring(0, 200) + '...');
|
|
405
|
-
|
|
406
|
-
// Parse markers from script
|
|
407
|
-
const markers = parseMarkers(NARRATION_SCRIPT);
|
|
408
|
-
console.log(`Found ${markers.length} markers`);
|
|
409
|
-
|
|
410
|
-
try {
|
|
411
|
-
// Generate speech with timestamps
|
|
412
|
-
const result = await generateSpeech(plainText, apiKey);
|
|
413
|
-
|
|
414
|
-
console.log('Audio generated successfully!');
|
|
415
|
-
|
|
416
|
-
// Decode and save audio
|
|
417
|
-
const audioBuffer = Buffer.from(result.audio_base64, 'base64');
|
|
418
|
-
fs.writeFileSync(audioPath, audioBuffer);
|
|
419
|
-
console.log(`Audio saved to: ${audioPath}`);
|
|
420
|
-
console.log(`Audio size: ${(audioBuffer.length / 1024 / 1024).toFixed(2)} MB`);
|
|
421
|
-
|
|
422
|
-
// Convert character timestamps to word timestamps
|
|
423
|
-
const wordTimestamps = characterToWordTimestamps(result.alignment);
|
|
424
|
-
console.log(`Extracted ${wordTimestamps.length} word timestamps`);
|
|
425
|
-
|
|
426
|
-
// Match markers to timestamps
|
|
427
|
-
const timedMarkers = matchMarkersToTimestamps(markers, wordTimestamps, plainText);
|
|
428
|
-
|
|
429
|
-
// Calculate duration
|
|
430
|
-
const lastWord = wordTimestamps[wordTimestamps.length - 1];
|
|
431
|
-
const duration = lastWord ? lastWord.end : 0;
|
|
432
|
-
|
|
433
|
-
// Build timing data
|
|
434
|
-
const timingData = {
|
|
435
|
-
generated: new Date().toISOString(),
|
|
436
|
-
duration,
|
|
437
|
-
wordCount: wordTimestamps.length,
|
|
438
|
-
sections: [],
|
|
439
|
-
highlights: [],
|
|
440
|
-
words: wordTimestamps.map(w => ({
|
|
441
|
-
word: w.word,
|
|
442
|
-
start: w.start,
|
|
443
|
-
end: w.end
|
|
444
|
-
}))
|
|
445
|
-
};
|
|
446
|
-
|
|
447
|
-
// Separate sections and highlights
|
|
448
|
-
for (const marker of timedMarkers) {
|
|
449
|
-
if (marker.type === 'section') {
|
|
450
|
-
timingData.sections.push({
|
|
451
|
-
id: marker.id,
|
|
452
|
-
timestamp: marker.timestamp
|
|
453
|
-
});
|
|
454
|
-
} else if (marker.type === 'highlight') {
|
|
455
|
-
timingData.highlights.push({
|
|
456
|
-
selector: marker.selector,
|
|
457
|
-
section: marker.section,
|
|
458
|
-
timestamp: marker.timestamp
|
|
459
|
-
});
|
|
460
|
-
}
|
|
461
|
-
}
|
|
462
|
-
|
|
463
|
-
// Save timing data
|
|
464
|
-
fs.writeFileSync(timingPath, JSON.stringify(timingData, null, 2));
|
|
465
|
-
console.log(`Timing data saved to: ${timingPath}`);
|
|
466
|
-
|
|
467
|
-
console.log('\n=== Summary ===');
|
|
468
|
-
console.log(`Duration: ${duration.toFixed(1)} seconds`);
|
|
469
|
-
console.log(`Sections: ${timingData.sections.length}`);
|
|
470
|
-
console.log(`Highlights: ${timingData.highlights.length}`);
|
|
471
|
-
console.log(`Words: ${timingData.words.length}`);
|
|
472
|
-
|
|
473
|
-
console.log('\n=== Section Timestamps ===');
|
|
474
|
-
for (const section of timingData.sections) {
|
|
475
|
-
const mins = Math.floor(section.timestamp / 60);
|
|
476
|
-
const secs = (section.timestamp % 60).toFixed(1);
|
|
477
|
-
console.log(` ${section.id}: ${mins}:${secs.padStart(4, '0')}`);
|
|
478
|
-
}
|
|
479
|
-
|
|
480
|
-
} catch (error) {
|
|
481
|
-
console.error('Error:', error.message);
|
|
482
|
-
process.exit(1);
|
|
483
|
-
}
|
|
484
|
-
}
|
|
485
|
-
|
|
486
|
-
main();
|
|
@@ -1,140 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
/**
|
|
3
|
-
* Generate voice preview clips with different ElevenLabs voices
|
|
4
|
-
*
|
|
5
|
-
* Usage: ELEVENLABS_API_KEY=your_key node generate-voice-previews.js
|
|
6
|
-
*
|
|
7
|
-
* Output: Creates preview MP3 files in ./previews/ directory
|
|
8
|
-
*/
|
|
9
|
-
|
|
10
|
-
const fs = require('fs');
|
|
11
|
-
const path = require('path');
|
|
12
|
-
|
|
13
|
-
// ElevenLabs API configuration
|
|
14
|
-
const API_BASE = 'https://api.elevenlabs.io/v1';
|
|
15
|
-
const MODEL_ID = 'eleven_turbo_v2_5'; // Fast, high-quality model
|
|
16
|
-
|
|
17
|
-
// Voice configurations
|
|
18
|
-
const VOICES = [
|
|
19
|
-
{
|
|
20
|
-
id: 'UgBBYS2sOqTuMpoF3BR0',
|
|
21
|
-
name: 'mark',
|
|
22
|
-
displayName: 'Mark'
|
|
23
|
-
},
|
|
24
|
-
{
|
|
25
|
-
id: 'tnSpp4vdxKPjI9w0GnoV',
|
|
26
|
-
name: 'hope',
|
|
27
|
-
displayName: 'Hope'
|
|
28
|
-
},
|
|
29
|
-
{
|
|
30
|
-
id: 'Z7RrOqZFTyLpIlzCgfsp',
|
|
31
|
-
name: 'creature',
|
|
32
|
-
displayName: 'Creature'
|
|
33
|
-
},
|
|
34
|
-
{
|
|
35
|
-
id: 'YOq2y2Up4RgXP2HyXjE5',
|
|
36
|
-
name: 'gaming',
|
|
37
|
-
displayName: 'Gaming'
|
|
38
|
-
}
|
|
39
|
-
];
|
|
40
|
-
|
|
41
|
-
// Preview text - short snippet that showcases the voice
|
|
42
|
-
const PREVIEW_TEXT = `Welcome to Hustle API Dev Tools. Build APIs the right way. Research first. Interview second. Test before code. Document everything.`;
|
|
43
|
-
|
|
44
|
-
/**
|
|
45
|
-
* Generate speech for a single voice
|
|
46
|
-
*/
|
|
47
|
-
async function generateVoicePreview(voice, text, apiKey) {
|
|
48
|
-
const url = `${API_BASE}/text-to-speech/${voice.id}`;
|
|
49
|
-
|
|
50
|
-
console.log(`Generating preview for ${voice.displayName}...`);
|
|
51
|
-
|
|
52
|
-
const response = await fetch(url, {
|
|
53
|
-
method: 'POST',
|
|
54
|
-
headers: {
|
|
55
|
-
'xi-api-key': apiKey,
|
|
56
|
-
'Content-Type': 'application/json',
|
|
57
|
-
'Accept': 'audio/mpeg'
|
|
58
|
-
},
|
|
59
|
-
body: JSON.stringify({
|
|
60
|
-
text,
|
|
61
|
-
model_id: MODEL_ID,
|
|
62
|
-
voice_settings: {
|
|
63
|
-
stability: 0.5,
|
|
64
|
-
similarity_boost: 0.75,
|
|
65
|
-
style: 0.3,
|
|
66
|
-
use_speaker_boost: true
|
|
67
|
-
}
|
|
68
|
-
})
|
|
69
|
-
});
|
|
70
|
-
|
|
71
|
-
if (!response.ok) {
|
|
72
|
-
const error = await response.text();
|
|
73
|
-
throw new Error(`ElevenLabs API error for ${voice.name}: ${response.status} - ${error}`);
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
// Get audio as buffer
|
|
77
|
-
const arrayBuffer = await response.arrayBuffer();
|
|
78
|
-
return Buffer.from(arrayBuffer);
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
/**
|
|
82
|
-
* Main function
|
|
83
|
-
*/
|
|
84
|
-
async function main() {
|
|
85
|
-
const apiKey = process.env.ELEVENLABS_API_KEY;
|
|
86
|
-
|
|
87
|
-
if (!apiKey) {
|
|
88
|
-
console.error('Error: ELEVENLABS_API_KEY environment variable is required');
|
|
89
|
-
console.error('Usage: ELEVENLABS_API_KEY=your_key node generate-voice-previews.js');
|
|
90
|
-
process.exit(1);
|
|
91
|
-
}
|
|
92
|
-
|
|
93
|
-
const outputDir = path.join(__dirname, 'previews');
|
|
94
|
-
|
|
95
|
-
// Ensure output directory exists
|
|
96
|
-
if (!fs.existsSync(outputDir)) {
|
|
97
|
-
fs.mkdirSync(outputDir, { recursive: true });
|
|
98
|
-
}
|
|
99
|
-
|
|
100
|
-
console.log('Generating voice previews...');
|
|
101
|
-
console.log(`Text: "${PREVIEW_TEXT}"`);
|
|
102
|
-
console.log('');
|
|
103
|
-
|
|
104
|
-
const results = [];
|
|
105
|
-
|
|
106
|
-
for (const voice of VOICES) {
|
|
107
|
-
try {
|
|
108
|
-
const audioBuffer = await generateVoicePreview(voice, PREVIEW_TEXT, apiKey);
|
|
109
|
-
const outputPath = path.join(outputDir, `preview-${voice.name}.mp3`);
|
|
110
|
-
fs.writeFileSync(outputPath, audioBuffer);
|
|
111
|
-
|
|
112
|
-
const sizeMB = (audioBuffer.length / 1024 / 1024).toFixed(2);
|
|
113
|
-
console.log(` ✓ ${voice.displayName}: ${outputPath} (${sizeMB} MB)`);
|
|
114
|
-
|
|
115
|
-
results.push({
|
|
116
|
-
voice: voice.displayName,
|
|
117
|
-
id: voice.id,
|
|
118
|
-
file: `preview-${voice.name}.mp3`,
|
|
119
|
-
size: audioBuffer.length
|
|
120
|
-
});
|
|
121
|
-
} catch (error) {
|
|
122
|
-
console.error(` ✗ ${voice.displayName}: ${error.message}`);
|
|
123
|
-
}
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
// Write manifest
|
|
127
|
-
const manifestPath = path.join(outputDir, 'manifest.json');
|
|
128
|
-
fs.writeFileSync(manifestPath, JSON.stringify({
|
|
129
|
-
generated: new Date().toISOString(),
|
|
130
|
-
text: PREVIEW_TEXT,
|
|
131
|
-
voices: results
|
|
132
|
-
}, null, 2));
|
|
133
|
-
|
|
134
|
-
console.log('');
|
|
135
|
-
console.log(`=== Done ===`);
|
|
136
|
-
console.log(`Generated ${results.length}/${VOICES.length} voice previews`);
|
|
137
|
-
console.log(`Manifest: ${manifestPath}`);
|
|
138
|
-
}
|
|
139
|
-
|
|
140
|
-
main();
|