@contentgrowth/llm-service 0.9.4 → 0.9.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -161,6 +161,12 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
161
161
  const recognitionRef = useRef(null);
162
162
  const isSimulatingRef = useRef(false);
163
163
  const simulationTimeoutRef = useRef(null);
164
+ const onResultRef = useRef(onResult);
165
+ const onEndRef = useRef(onEnd);
166
+ useEffect(() => {
167
+ onResultRef.current = onResult;
168
+ onEndRef.current = onEnd;
169
+ }, [onResult, onEnd]);
164
170
  useEffect(() => {
165
171
  if (typeof window !== "undefined") {
166
172
  const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
@@ -179,7 +185,7 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
179
185
  return;
180
186
  }
181
187
  setIsListening(false);
182
- if (onEnd) onEnd();
188
+ if (onEndRef.current) onEndRef.current();
183
189
  };
184
190
  recognition.onresult = (event) => {
185
191
  let interimTranscript = "";
@@ -188,10 +194,10 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
188
194
  const result = event.results[i];
189
195
  if (result.isFinal) {
190
196
  finalTranscript += result[0].transcript;
191
- if (onResult) onResult(finalTranscript, true);
197
+ if (onResultRef.current) onResultRef.current(finalTranscript, true);
192
198
  } else {
193
199
  interimTranscript += result[0].transcript;
194
- if (onResult) onResult(interimTranscript, false);
200
+ if (onResultRef.current) onResultRef.current(interimTranscript, false);
195
201
  }
196
202
  }
197
203
  setTranscript((prev) => prev + finalTranscript);
@@ -206,10 +212,10 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
206
212
  simulationTimeoutRef.current = setTimeout(() => {
207
213
  const mockText = "This is a simulated voice input for testing.";
208
214
  setTranscript((prev) => prev + (prev ? " " : "") + mockText);
209
- if (onResult) onResult(mockText, true);
215
+ if (onResultRef.current) onResultRef.current(mockText, true);
210
216
  isSimulatingRef.current = false;
211
217
  setIsListening(false);
212
- if (onEnd) onEnd();
218
+ if (onEndRef.current) onEndRef.current();
213
219
  simulationTimeoutRef.current = null;
214
220
  }, 3e3);
215
221
  return;
@@ -226,9 +232,11 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
226
232
  clearTimeout(simulationTimeoutRef.current);
227
233
  simulationTimeoutRef.current = null;
228
234
  }
229
- recognitionRef.current.stop();
235
+ if (recognitionRef.current) {
236
+ recognitionRef.current.stop();
237
+ }
230
238
  };
231
- }, [onResult, onEnd, language]);
239
+ }, [language]);
232
240
  const start = useCallback(() => {
233
241
  if (recognitionRef.current && !isListening) {
234
242
  try {
@@ -248,10 +256,10 @@ var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
248
256
  }
249
257
  const mockText = "This is a simulated voice input for testing.";
250
258
  setTranscript((prev) => prev + (prev ? " " : "") + mockText);
251
- if (onResult) onResult(mockText, true);
259
+ if (onResultRef.current) onResultRef.current(mockText, true);
252
260
  isSimulatingRef.current = false;
253
261
  setIsListening(false);
254
- if (onEnd) onEnd();
262
+ if (onEndRef.current) onEndRef.current();
255
263
  return;
256
264
  }
257
265
  if (recognitionRef.current && isListening) {