@tpitre/story-ui 4.16.6 → 4.16.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- {"version":3,"file":"VoiceCanvas.d.ts","sourceRoot":"","sources":["../../../../templates/StoryUI/voice/VoiceCanvas.tsx"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;GAcG;AACH,OAAO,KAAwE,MAAM,OAAO,CAAC;AAY7F,MAAM,WAAW,gBAAgB;IAC/B,OAAO,EAAE,MAAM,CAAC;IAChB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,oEAAoE;IACpE,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,yEAAyE;IACzE,MAAM,CAAC,EAAE,CAAC,MAAM,EAAE;QAAE,QAAQ,EAAE,MAAM,CAAC;QAAC,IAAI,EAAE,MAAM,CAAC;QAAC,KAAK,EAAE,MAAM,CAAA;KAAE,KAAK,IAAI,CAAC;IAC7E,OAAO,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,CAAC;CACnC;AAED,8EAA8E;AAC9E,MAAM,WAAW,iBAAiB;IAChC,4EAA4E;IAC5E,KAAK,EAAE,MAAM,IAAI,CAAC;CACnB;AAID,eAAO,MAAM,WAAW,4FA6uBtB,CAAC"}
1
+ {"version":3,"file":"VoiceCanvas.d.ts","sourceRoot":"","sources":["../../../../templates/StoryUI/voice/VoiceCanvas.tsx"],"names":[],"mappings":"AAAA;;;;;;;;;;;;;;GAcG;AACH,OAAO,KAAwE,MAAM,OAAO,CAAC;AAY7F,MAAM,WAAW,gBAAgB;IAC/B,OAAO,EAAE,MAAM,CAAC;IAChB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,oEAAoE;IACpE,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,yEAAyE;IACzE,MAAM,CAAC,EAAE,CAAC,MAAM,EAAE;QAAE,QAAQ,EAAE,MAAM,CAAC;QAAC,IAAI,EAAE,MAAM,CAAC;QAAC,KAAK,EAAE,MAAM,CAAA;KAAE,KAAK,IAAI,CAAC;IAC7E,OAAO,CAAC,EAAE,CAAC,KAAK,EAAE,MAAM,KAAK,IAAI,CAAC;CACnC;AAED,8EAA8E;AAC9E,MAAM,WAAW,iBAAiB;IAChC,4EAA4E;IAC5E,KAAK,EAAE,MAAM,IAAI,CAAC;CACnB;AAID,eAAO,MAAM,WAAW,4FA+wBtB,CAAC"}
@@ -59,6 +59,7 @@ export const VoiceCanvas = React.forwardRef(function VoiceCanvas({ apiBase, prov
59
59
  const audioCheckRef = useRef(null);
60
60
  const audioStreamRef = useRef(null);
61
61
  const stopListeningRef = useRef(() => { });
62
+ const startListeningRef = useRef(() => { });
62
63
  const currentCodeRef = useRef(currentCode);
63
64
  currentCodeRef.current = currentCode;
64
65
  // Incremented on every new generation to prevent stale finally blocks from
@@ -85,11 +86,30 @@ export const VoiceCanvas = React.forwardRef(function VoiceCanvas({ apiBase, prov
85
86
  }, []);
86
87
  // ── Generate / Edit ───────────────────────────────────────────
87
88
  const sendCanvasRequest = useCallback(async (transcript) => {
89
+ // Reject prompts that are too short to produce useful output.
90
+ // Fragments like "create a" or "please" result in broken code.
91
+ const wordCount = transcript.trim().split(/\s+/).length;
92
+ if (wordCount < 3) {
93
+ setErrorMessage('Say a bit more — describe what you want to build.');
94
+ return;
95
+ }
88
96
  if (abortRef.current)
89
97
  abortRef.current.abort();
90
98
  // Stamp this generation so stale finally blocks from aborted requests
91
99
  // don't clobber the state of a newer in-flight request.
92
100
  const genId = ++generationCounterRef.current;
101
+ // Pause voice recognition while the LLM is thinking so the user can
102
+ // talk freely without triggering new requests or aborting this one.
103
+ // The mic resumes automatically when generation completes.
104
+ const wasListening = isListeningRef.current;
105
+ if (wasListening && recognitionRef.current) {
106
+ try {
107
+ recognitionRef.current.stop();
108
+ }
109
+ catch { /* already stopped */ }
110
+ recognitionRef.current = null;
111
+ // Keep isListeningRef.current = true so we know to resume later
112
+ }
93
113
  setIsGenerating(true);
94
114
  setStatusText('Thinking...');
95
115
  setErrorMessage('');
@@ -179,6 +199,15 @@ export const VoiceCanvas = React.forwardRef(function VoiceCanvas({ apiBase, prov
179
199
  if (generationCounterRef.current === genId) {
180
200
  setIsGenerating(false);
181
201
  abortRef.current = null;
202
+ // Resume voice recognition if it was active before generation started.
203
+ // This lets the user keep talking hands-free across multiple edits.
204
+ if (wasListening && isListeningRef.current) {
205
+ setTimeout(() => {
206
+ if (isListeningRef.current && !recognitionRef.current) {
207
+ startListeningRef.current();
208
+ }
209
+ }, 300);
210
+ }
182
211
  }
183
212
  }
184
213
  }, [apiBase, provider, model, storyReady, sendCodeToIframe, onError]);
@@ -297,15 +326,18 @@ export const VoiceCanvas = React.forwardRef(function VoiceCanvas({ apiBase, prov
297
326
  clearTimeout(autoSubmitRef.current);
298
327
  autoSubmitRef.current = setTimeout(() => {
299
328
  const prompt = transcript.trim();
300
- if (prompt) {
301
- // Clear the pending transcript BEFORE sending so that stopListening
302
- // (if pressed moments later) doesn't fire a duplicate request.
329
+ // Require at least 3 words to avoid sending fragments like "create a"
330
+ // or "please" that produce bad LLM output. Short pauses mid-thought
331
+ // are common in natural speech the longer delay (3s) gives the user
332
+ // time to continue before auto-submitting.
333
+ const wordCount = prompt.split(/\s+/).length;
334
+ if (prompt && wordCount >= 3) {
303
335
  pendingTranscriptRef.current = '';
304
336
  setPendingTranscript('');
305
337
  sendCanvasRequest(prompt);
306
338
  }
307
339
  autoSubmitRef.current = null;
308
- }, 1200);
340
+ }, 3000);
309
341
  }, [sendCanvasRequest]);
310
342
  // ── Voice: start ───────────────────────────────────────────────
311
343
  const startListening = useCallback(() => {
@@ -456,6 +488,7 @@ export const VoiceCanvas = React.forwardRef(function VoiceCanvas({ apiBase, prov
456
488
  setIsListening(false);
457
489
  }
458
490
  }, [clear, undo, redo, scheduleIntent, saveStory]);
491
+ startListeningRef.current = startListening;
459
492
  // ── Voice: stop ────────────────────────────────────────────────
460
493
  const stopListening = useCallback(() => {
461
494
  isListeningRef.current = false;
@@ -93,6 +93,7 @@ function VoiceCanvas({
93
93
  const audioCheckRef = useRef<ReturnType<typeof setTimeout> | null>(null);
94
94
  const audioStreamRef = useRef<MediaStream | null>(null);
95
95
  const stopListeningRef = useRef<() => void>(() => {});
96
+ const startListeningRef = useRef<() => void>(() => {});
96
97
  const currentCodeRef = useRef(currentCode);
97
98
  currentCodeRef.current = currentCode;
98
99
  // Incremented on every new generation to prevent stale finally blocks from
@@ -123,12 +124,30 @@ function VoiceCanvas({
123
124
  // ── Generate / Edit ───────────────────────────────────────────
124
125
 
125
126
  const sendCanvasRequest = useCallback(async (transcript: string) => {
127
+ // Reject prompts that are too short to produce useful output.
128
+ // Fragments like "create a" or "please" result in broken code.
129
+ const wordCount = transcript.trim().split(/\s+/).length;
130
+ if (wordCount < 3) {
131
+ setErrorMessage('Say a bit more — describe what you want to build.');
132
+ return;
133
+ }
134
+
126
135
  if (abortRef.current) abortRef.current.abort();
127
136
 
128
137
  // Stamp this generation so stale finally blocks from aborted requests
129
138
  // don't clobber the state of a newer in-flight request.
130
139
  const genId = ++generationCounterRef.current;
131
140
 
141
+ // Pause voice recognition while the LLM is thinking so the user can
142
+ // talk freely without triggering new requests or aborting this one.
143
+ // The mic resumes automatically when generation completes.
144
+ const wasListening = isListeningRef.current;
145
+ if (wasListening && recognitionRef.current) {
146
+ try { recognitionRef.current.stop(); } catch { /* already stopped */ }
147
+ recognitionRef.current = null;
148
+ // Keep isListeningRef.current = true so we know to resume later
149
+ }
150
+
132
151
  setIsGenerating(true);
133
152
  setStatusText('Thinking...');
134
153
  setErrorMessage('');
@@ -227,6 +246,16 @@ function VoiceCanvas({
227
246
  if (generationCounterRef.current === genId) {
228
247
  setIsGenerating(false);
229
248
  abortRef.current = null;
249
+
250
+ // Resume voice recognition if it was active before generation started.
251
+ // This lets the user keep talking hands-free across multiple edits.
252
+ if (wasListening && isListeningRef.current) {
253
+ setTimeout(() => {
254
+ if (isListeningRef.current && !recognitionRef.current) {
255
+ startListeningRef.current();
256
+ }
257
+ }, 300);
258
+ }
230
259
  }
231
260
  }
232
261
  }, [apiBase, provider, model, storyReady, sendCodeToIframe, onError]);
@@ -350,15 +379,18 @@ function VoiceCanvas({
350
379
  if (autoSubmitRef.current) clearTimeout(autoSubmitRef.current);
351
380
  autoSubmitRef.current = setTimeout(() => {
352
381
  const prompt = transcript.trim();
353
- if (prompt) {
354
- // Clear the pending transcript BEFORE sending so that stopListening
355
- // (if pressed moments later) doesn't fire a duplicate request.
382
+ // Require at least 3 words to avoid sending fragments like "create a"
383
+ // or "please" that produce bad LLM output. Short pauses mid-thought
384
+ // are common in natural speech the longer delay (3s) gives the user
385
+ // time to continue before auto-submitting.
386
+ const wordCount = prompt.split(/\s+/).length;
387
+ if (prompt && wordCount >= 3) {
356
388
  pendingTranscriptRef.current = '';
357
389
  setPendingTranscript('');
358
390
  sendCanvasRequest(prompt);
359
391
  }
360
392
  autoSubmitRef.current = null;
361
- }, 1200);
393
+ }, 3000);
362
394
  }, [sendCanvasRequest]);
363
395
 
364
396
  // ── Voice: start ───────────────────────────────────────────────
@@ -495,6 +527,8 @@ function VoiceCanvas({
495
527
  }
496
528
  }, [clear, undo, redo, scheduleIntent, saveStory]);
497
529
 
530
+ startListeningRef.current = startListening;
531
+
498
532
  // ── Voice: stop ────────────────────────────────────────────────
499
533
 
500
534
  const stopListening = useCallback(() => {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tpitre/story-ui",
3
- "version": "4.16.6",
3
+ "version": "4.16.8",
4
4
  "description": "AI-powered Storybook story generator with dynamic component discovery",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",
@@ -93,6 +93,7 @@ function VoiceCanvas({
93
93
  const audioCheckRef = useRef<ReturnType<typeof setTimeout> | null>(null);
94
94
  const audioStreamRef = useRef<MediaStream | null>(null);
95
95
  const stopListeningRef = useRef<() => void>(() => {});
96
+ const startListeningRef = useRef<() => void>(() => {});
96
97
  const currentCodeRef = useRef(currentCode);
97
98
  currentCodeRef.current = currentCode;
98
99
  // Incremented on every new generation to prevent stale finally blocks from
@@ -123,12 +124,30 @@ function VoiceCanvas({
123
124
  // ── Generate / Edit ───────────────────────────────────────────
124
125
 
125
126
  const sendCanvasRequest = useCallback(async (transcript: string) => {
127
+ // Reject prompts that are too short to produce useful output.
128
+ // Fragments like "create a" or "please" result in broken code.
129
+ const wordCount = transcript.trim().split(/\s+/).length;
130
+ if (wordCount < 3) {
131
+ setErrorMessage('Say a bit more — describe what you want to build.');
132
+ return;
133
+ }
134
+
126
135
  if (abortRef.current) abortRef.current.abort();
127
136
 
128
137
  // Stamp this generation so stale finally blocks from aborted requests
129
138
  // don't clobber the state of a newer in-flight request.
130
139
  const genId = ++generationCounterRef.current;
131
140
 
141
+ // Pause voice recognition while the LLM is thinking so the user can
142
+ // talk freely without triggering new requests or aborting this one.
143
+ // The mic resumes automatically when generation completes.
144
+ const wasListening = isListeningRef.current;
145
+ if (wasListening && recognitionRef.current) {
146
+ try { recognitionRef.current.stop(); } catch { /* already stopped */ }
147
+ recognitionRef.current = null;
148
+ // Keep isListeningRef.current = true so we know to resume later
149
+ }
150
+
132
151
  setIsGenerating(true);
133
152
  setStatusText('Thinking...');
134
153
  setErrorMessage('');
@@ -227,6 +246,16 @@ function VoiceCanvas({
227
246
  if (generationCounterRef.current === genId) {
228
247
  setIsGenerating(false);
229
248
  abortRef.current = null;
249
+
250
+ // Resume voice recognition if it was active before generation started.
251
+ // This lets the user keep talking hands-free across multiple edits.
252
+ if (wasListening && isListeningRef.current) {
253
+ setTimeout(() => {
254
+ if (isListeningRef.current && !recognitionRef.current) {
255
+ startListeningRef.current();
256
+ }
257
+ }, 300);
258
+ }
230
259
  }
231
260
  }
232
261
  }, [apiBase, provider, model, storyReady, sendCodeToIframe, onError]);
@@ -350,15 +379,18 @@ function VoiceCanvas({
350
379
  if (autoSubmitRef.current) clearTimeout(autoSubmitRef.current);
351
380
  autoSubmitRef.current = setTimeout(() => {
352
381
  const prompt = transcript.trim();
353
- if (prompt) {
354
- // Clear the pending transcript BEFORE sending so that stopListening
355
- // (if pressed moments later) doesn't fire a duplicate request.
382
+ // Require at least 3 words to avoid sending fragments like "create a"
383
+ // or "please" that produce bad LLM output. Short pauses mid-thought
384
+ // are common in natural speech the longer delay (3s) gives the user
385
+ // time to continue before auto-submitting.
386
+ const wordCount = prompt.split(/\s+/).length;
387
+ if (prompt && wordCount >= 3) {
356
388
  pendingTranscriptRef.current = '';
357
389
  setPendingTranscript('');
358
390
  sendCanvasRequest(prompt);
359
391
  }
360
392
  autoSubmitRef.current = null;
361
- }, 1200);
393
+ }, 3000);
362
394
  }, [sendCanvasRequest]);
363
395
 
364
396
  // ── Voice: start ───────────────────────────────────────────────
@@ -495,6 +527,8 @@ function VoiceCanvas({
495
527
  }
496
528
  }, [clear, undo, redo, scheduleIntent, saveStory]);
497
529
 
530
+ startListeningRef.current = startListening;
531
+
498
532
  // ── Voice: stop ────────────────────────────────────────────────
499
533
 
500
534
  const stopListening = useCallback(() => {