@contentgrowth/llm-service 0.9.99 → 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ui/react/components/index.cjs +133 -498
- package/dist/ui/react/components/index.cjs.map +1 -1
- package/dist/ui/react/components/index.d.cts +2 -13
- package/dist/ui/react/components/index.d.ts +2 -13
- package/dist/ui/react/components/index.js +128 -492
- package/dist/ui/react/components/index.js.map +1 -1
- package/package.json +1 -1
|
@@ -148,303 +148,19 @@ function ChatHeader({
|
|
|
148
148
|
}
|
|
149
149
|
|
|
150
150
|
// src/ui/react/components/ChatInputArea.tsx
|
|
151
|
-
import { useState as
|
|
151
|
+
import { useState as useState2, useRef as useRef2, useImperativeHandle, forwardRef, useEffect as useEffect2, useCallback as useCallback2, useLayoutEffect } from "react";
|
|
152
152
|
import { MicrophoneIcon, StopIcon, PaperAirplaneIcon, XMarkIcon, Square2StackIcon } from "@heroicons/react/24/outline";
|
|
153
153
|
|
|
154
|
-
// src/ui/react/hooks/useSpeechRecognition.ts
|
|
155
|
-
import { useState, useEffect, useCallback, useRef } from "react";
|
|
156
|
-
var useSpeechRecognition = (onResult, onEnd, language = "en-US") => {
|
|
157
|
-
const [isListening, setIsListening] = useState(false);
|
|
158
|
-
const [transcript, setTranscript] = useState("");
|
|
159
|
-
const [error, setError] = useState(null);
|
|
160
|
-
const [isSupported, setIsSupported] = useState(false);
|
|
161
|
-
const recognitionRef = useRef(null);
|
|
162
|
-
const isSimulatingRef = useRef(false);
|
|
163
|
-
const simulationTimeoutRef = useRef(null);
|
|
164
|
-
const languageRef = useRef(language);
|
|
165
|
-
const instanceIdRef = useRef(Math.random().toString(36).slice(2));
|
|
166
|
-
const lastStartAtRef = useRef(null);
|
|
167
|
-
const lastStopAtRef = useRef(null);
|
|
168
|
-
const onResultRef = useRef(onResult);
|
|
169
|
-
const onEndRef = useRef(onEnd);
|
|
170
|
-
useEffect(() => {
|
|
171
|
-
onResultRef.current = onResult;
|
|
172
|
-
onEndRef.current = onEnd;
|
|
173
|
-
}, [onResult, onEnd]);
|
|
174
|
-
useEffect(() => {
|
|
175
|
-
languageRef.current = language;
|
|
176
|
-
if (recognitionRef.current) {
|
|
177
|
-
console.log("[useSpeechRecognition] Updating language to:", language);
|
|
178
|
-
recognitionRef.current.lang = language;
|
|
179
|
-
}
|
|
180
|
-
}, [language]);
|
|
181
|
-
const isStartingRef = useRef(false);
|
|
182
|
-
useEffect(() => {
|
|
183
|
-
var _a;
|
|
184
|
-
if (typeof window !== "undefined") {
|
|
185
|
-
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
186
|
-
console.log("[useSpeechRecognition] Env - isSecureContext:", window.isSecureContext, "protocol:", (_a = window.location) == null ? void 0 : _a.protocol);
|
|
187
|
-
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
188
|
-
console.log("[useSpeechRecognition] Init check - SpeechRecognition available:", !!SpeechRecognition, "isMobile:", isMobile, "instanceId:", instanceIdRef.current);
|
|
189
|
-
setIsSupported(!!SpeechRecognition);
|
|
190
|
-
}
|
|
191
|
-
return () => {
|
|
192
|
-
console.log("[useSpeechRecognition] Effect cleanup - stopping recognition");
|
|
193
|
-
if (isSimulatingRef.current && simulationTimeoutRef.current) {
|
|
194
|
-
clearTimeout(simulationTimeoutRef.current);
|
|
195
|
-
simulationTimeoutRef.current = null;
|
|
196
|
-
}
|
|
197
|
-
if (recognitionRef.current) {
|
|
198
|
-
try {
|
|
199
|
-
recognitionRef.current.stop();
|
|
200
|
-
} catch (e) {
|
|
201
|
-
}
|
|
202
|
-
recognitionRef.current = null;
|
|
203
|
-
}
|
|
204
|
-
if (typeof window !== "undefined") {
|
|
205
|
-
const w = window;
|
|
206
|
-
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
207
|
-
console.log("[useSpeechRecognition] Cleanup clearing global active instance lock. instanceId:", instanceIdRef.current);
|
|
208
|
-
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
209
|
-
}
|
|
210
|
-
}
|
|
211
|
-
};
|
|
212
|
-
}, []);
|
|
213
|
-
const createRecognitionInstance = useCallback(() => {
|
|
214
|
-
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
|
|
215
|
-
if (!SpeechRecognition) {
|
|
216
|
-
console.error("[useSpeechRecognition] SpeechRecognition not available");
|
|
217
|
-
return null;
|
|
218
|
-
}
|
|
219
|
-
console.log("[useSpeechRecognition] Creating NEW recognition instance within user gesture context. Timestamp:", Date.now());
|
|
220
|
-
const recognition = new SpeechRecognition();
|
|
221
|
-
const isMobile = /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
222
|
-
recognition.continuous = true;
|
|
223
|
-
recognition.interimResults = true;
|
|
224
|
-
recognition.lang = languageRef.current;
|
|
225
|
-
console.log("[useSpeechRecognition] Instance created. continuous:", recognition.continuous, "interimResults:", recognition.interimResults, "lang:", recognition.lang, "isMobile:", isMobile, "instanceId:", instanceIdRef.current);
|
|
226
|
-
recognition.onaudiostart = () => {
|
|
227
|
-
console.log("[useSpeechRecognition] Native onaudiostart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
228
|
-
};
|
|
229
|
-
recognition.onaudioend = () => {
|
|
230
|
-
console.log("[useSpeechRecognition] Native onaudioend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
231
|
-
};
|
|
232
|
-
recognition.onsoundstart = () => {
|
|
233
|
-
console.log("[useSpeechRecognition] Native onsoundstart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
234
|
-
};
|
|
235
|
-
recognition.onsoundend = () => {
|
|
236
|
-
console.log("[useSpeechRecognition] Native onsoundend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
237
|
-
};
|
|
238
|
-
recognition.onspeechstart = () => {
|
|
239
|
-
console.log("[useSpeechRecognition] Native onspeechstart. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
240
|
-
};
|
|
241
|
-
recognition.onspeechend = () => {
|
|
242
|
-
console.log("[useSpeechRecognition] Native onspeechend. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
243
|
-
};
|
|
244
|
-
recognition.onnomatch = () => {
|
|
245
|
-
console.log("[useSpeechRecognition] Native onnomatch. Timestamp:", Date.now(), "instanceId:", instanceIdRef.current);
|
|
246
|
-
};
|
|
247
|
-
recognition.onstart = () => {
|
|
248
|
-
console.log("[useSpeechRecognition] Native onstart event fired. Timestamp:", Date.now());
|
|
249
|
-
isStartingRef.current = false;
|
|
250
|
-
setIsListening(true);
|
|
251
|
-
setError(null);
|
|
252
|
-
if (typeof window !== "undefined") {
|
|
253
|
-
const w = window;
|
|
254
|
-
w.__llmSpeechRecognitionActiveInstanceId = instanceIdRef.current;
|
|
255
|
-
console.log("[useSpeechRecognition] Set global active instance lock. instanceId:", instanceIdRef.current);
|
|
256
|
-
}
|
|
257
|
-
};
|
|
258
|
-
recognition.onend = () => {
|
|
259
|
-
console.log("[useSpeechRecognition] Native onend event fired. Timestamp:", Date.now());
|
|
260
|
-
isStartingRef.current = false;
|
|
261
|
-
if (recognitionRef.current === recognition) {
|
|
262
|
-
console.log("[useSpeechRecognition] Nullifying recognitionRef (onend)");
|
|
263
|
-
recognitionRef.current = null;
|
|
264
|
-
}
|
|
265
|
-
if (isSimulatingRef.current) {
|
|
266
|
-
console.log("[useSpeechRecognition] onend ignored - simulating");
|
|
267
|
-
return;
|
|
268
|
-
}
|
|
269
|
-
setIsListening(false);
|
|
270
|
-
if (onEndRef.current) onEndRef.current();
|
|
271
|
-
if (typeof window !== "undefined") {
|
|
272
|
-
const w = window;
|
|
273
|
-
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
274
|
-
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
275
|
-
console.log("[useSpeechRecognition] Cleared global active instance lock. instanceId:", instanceIdRef.current);
|
|
276
|
-
}
|
|
277
|
-
}
|
|
278
|
-
};
|
|
279
|
-
recognition.onresult = (event) => {
|
|
280
|
-
console.log("[useSpeechRecognition] onresult event. results count:", event.results.length);
|
|
281
|
-
let interimTranscript = "";
|
|
282
|
-
let finalTranscript = "";
|
|
283
|
-
for (let i = event.results.length - 1; i < event.results.length; ++i) {
|
|
284
|
-
const result = event.results[i];
|
|
285
|
-
if (result.isFinal) {
|
|
286
|
-
finalTranscript += result[0].transcript;
|
|
287
|
-
console.log("[useSpeechRecognition] Final transcript:", finalTranscript);
|
|
288
|
-
if (onResultRef.current) onResultRef.current(finalTranscript, true);
|
|
289
|
-
} else {
|
|
290
|
-
interimTranscript += result[0].transcript;
|
|
291
|
-
console.log("[useSpeechRecognition] Interim transcript:", interimTranscript);
|
|
292
|
-
if (onResultRef.current) onResultRef.current(interimTranscript, false);
|
|
293
|
-
}
|
|
294
|
-
}
|
|
295
|
-
setTranscript((prev) => prev + finalTranscript);
|
|
296
|
-
};
|
|
297
|
-
recognition.onerror = (event) => {
|
|
298
|
-
console.error("[useSpeechRecognition] Native onerror event:", event.error, "Timestamp:", Date.now());
|
|
299
|
-
console.error("[useSpeechRecognition] Error context - lastStartAt:", lastStartAtRef.current, "lastStopAt:", lastStopAtRef.current, "instanceId:", instanceIdRef.current);
|
|
300
|
-
console.error("[useSpeechRecognition] Error details - This could be caused by:");
|
|
301
|
-
if (event.error === "aborted") {
|
|
302
|
-
console.error("[useSpeechRecognition] - aborted: Recognition was aborted. Common causes: keyboard appeared, focus changed, another recognition started, or page navigation");
|
|
303
|
-
} else if (event.error === "not-allowed") {
|
|
304
|
-
console.error("[useSpeechRecognition] - not-allowed: Microphone permission denied");
|
|
305
|
-
} else if (event.error === "no-speech") {
|
|
306
|
-
console.error("[useSpeechRecognition] - no-speech: No speech detected");
|
|
307
|
-
} else if (event.error === "network") {
|
|
308
|
-
console.error("[useSpeechRecognition] - network: Network error during recognition");
|
|
309
|
-
}
|
|
310
|
-
if (recognitionRef.current === recognition) {
|
|
311
|
-
console.log("[useSpeechRecognition] Nullifying recognitionRef (onerror)");
|
|
312
|
-
recognitionRef.current = null;
|
|
313
|
-
}
|
|
314
|
-
isStartingRef.current = false;
|
|
315
|
-
if (event.error === "not-allowed" && process.env.NODE_ENV === "development") {
|
|
316
|
-
console.warn("Speech recognition blocked. Simulating input for development...");
|
|
317
|
-
isSimulatingRef.current = true;
|
|
318
|
-
setError(null);
|
|
319
|
-
setIsListening(true);
|
|
320
|
-
simulationTimeoutRef.current = setTimeout(() => {
|
|
321
|
-
const mockText = "This is a simulated voice input for testing.";
|
|
322
|
-
setTranscript((prev) => prev + (prev ? " " : "") + mockText);
|
|
323
|
-
if (onResultRef.current) onResultRef.current(mockText, true);
|
|
324
|
-
isSimulatingRef.current = false;
|
|
325
|
-
setIsListening(false);
|
|
326
|
-
if (onEndRef.current) onEndRef.current();
|
|
327
|
-
simulationTimeoutRef.current = null;
|
|
328
|
-
}, 3e3);
|
|
329
|
-
return;
|
|
330
|
-
}
|
|
331
|
-
console.error("Speech recognition error", event.error);
|
|
332
|
-
setError(event.error);
|
|
333
|
-
setIsListening(false);
|
|
334
|
-
if (typeof window !== "undefined") {
|
|
335
|
-
const w = window;
|
|
336
|
-
if (w.__llmSpeechRecognitionActiveInstanceId === instanceIdRef.current) {
|
|
337
|
-
w.__llmSpeechRecognitionActiveInstanceId = null;
|
|
338
|
-
console.log("[useSpeechRecognition] Cleared global active instance lock after error. instanceId:", instanceIdRef.current);
|
|
339
|
-
}
|
|
340
|
-
}
|
|
341
|
-
};
|
|
342
|
-
return recognition;
|
|
343
|
-
}, []);
|
|
344
|
-
const start = useCallback(() => {
|
|
345
|
-
var _a;
|
|
346
|
-
const startTimestamp = Date.now();
|
|
347
|
-
console.log("[useSpeechRecognition] start() called. Timestamp:", startTimestamp);
|
|
348
|
-
console.log("[useSpeechRecognition] State check - isListening:", isListening, "isStarting:", isStartingRef.current, "hasExistingInstance:", !!recognitionRef.current);
|
|
349
|
-
if (typeof document !== "undefined") {
|
|
350
|
-
console.log("[useSpeechRecognition] Document hasFocus:", document.hasFocus(), "activeElement:", (_a = document.activeElement) == null ? void 0 : _a.tagName);
|
|
351
|
-
}
|
|
352
|
-
if (isSimulatingRef.current) {
|
|
353
|
-
console.log("[useSpeechRecognition] isSimulating, ignoring start");
|
|
354
|
-
return;
|
|
355
|
-
}
|
|
356
|
-
if (isStartingRef.current) {
|
|
357
|
-
console.warn("[useSpeechRecognition] Already starting - ignoring duplicate call");
|
|
358
|
-
return;
|
|
359
|
-
}
|
|
360
|
-
if (isListening) {
|
|
361
|
-
console.warn("[useSpeechRecognition] App state says already listening - ignoring");
|
|
362
|
-
return;
|
|
363
|
-
}
|
|
364
|
-
if (typeof window !== "undefined") {
|
|
365
|
-
const w = window;
|
|
366
|
-
if (w.__llmSpeechRecognitionActiveInstanceId && w.__llmSpeechRecognitionActiveInstanceId !== instanceIdRef.current) {
|
|
367
|
-
console.error("[useSpeechRecognition] Another recognition instance appears active. activeInstanceId:", w.__llmSpeechRecognitionActiveInstanceId, "thisInstanceId:", instanceIdRef.current);
|
|
368
|
-
}
|
|
369
|
-
}
|
|
370
|
-
try {
|
|
371
|
-
if (recognitionRef.current) {
|
|
372
|
-
console.log("[useSpeechRecognition] Stopping existing instance before creating new one");
|
|
373
|
-
try {
|
|
374
|
-
recognitionRef.current.onend = null;
|
|
375
|
-
recognitionRef.current.onerror = null;
|
|
376
|
-
recognitionRef.current.stop();
|
|
377
|
-
} catch (e) {
|
|
378
|
-
}
|
|
379
|
-
recognitionRef.current = null;
|
|
380
|
-
}
|
|
381
|
-
const recognition = createRecognitionInstance();
|
|
382
|
-
if (!recognition) {
|
|
383
|
-
console.error("[useSpeechRecognition] Failed to create recognition instance");
|
|
384
|
-
setError("Speech recognition not available");
|
|
385
|
-
return;
|
|
386
|
-
}
|
|
387
|
-
recognitionRef.current = recognition;
|
|
388
|
-
setTranscript("");
|
|
389
|
-
isStartingRef.current = true;
|
|
390
|
-
lastStartAtRef.current = Date.now();
|
|
391
|
-
console.log("[useSpeechRecognition] About to call recognition.start(). Timestamp:", Date.now());
|
|
392
|
-
recognitionRef.current.start();
|
|
393
|
-
console.log("[useSpeechRecognition] recognition.start() executed successfully. Timestamp:", Date.now());
|
|
394
|
-
} catch (error2) {
|
|
395
|
-
isStartingRef.current = false;
|
|
396
|
-
console.error("[useSpeechRecognition] Failed to start recognition:", (error2 == null ? void 0 : error2.message) || error2);
|
|
397
|
-
if ((error2 == null ? void 0 : error2.name) === "InvalidStateError") {
|
|
398
|
-
console.error("[useSpeechRecognition] InvalidStateError - recognition may already be running");
|
|
399
|
-
}
|
|
400
|
-
setError((error2 == null ? void 0 : error2.message) || "Failed to start speech recognition");
|
|
401
|
-
}
|
|
402
|
-
}, [isListening, createRecognitionInstance]);
|
|
403
|
-
const stop = useCallback(() => {
|
|
404
|
-
console.log("[useSpeechRecognition] stop() called");
|
|
405
|
-
lastStopAtRef.current = Date.now();
|
|
406
|
-
if (isSimulatingRef.current) {
|
|
407
|
-
if (simulationTimeoutRef.current) {
|
|
408
|
-
clearTimeout(simulationTimeoutRef.current);
|
|
409
|
-
simulationTimeoutRef.current = null;
|
|
410
|
-
}
|
|
411
|
-
const mockText = "This is a simulated voice input for testing.";
|
|
412
|
-
setTranscript((prev) => prev + (prev ? " " : "") + mockText);
|
|
413
|
-
if (onResultRef.current) onResultRef.current(mockText, true);
|
|
414
|
-
isSimulatingRef.current = false;
|
|
415
|
-
setIsListening(false);
|
|
416
|
-
if (onEndRef.current) onEndRef.current();
|
|
417
|
-
return;
|
|
418
|
-
}
|
|
419
|
-
if (recognitionRef.current) {
|
|
420
|
-
recognitionRef.current.stop();
|
|
421
|
-
console.log("[useSpeechRecognition] recognition.stop() executed");
|
|
422
|
-
}
|
|
423
|
-
}, []);
|
|
424
|
-
const resetTranscript = useCallback(() => {
|
|
425
|
-
setTranscript("");
|
|
426
|
-
}, []);
|
|
427
|
-
return {
|
|
428
|
-
isListening,
|
|
429
|
-
transcript,
|
|
430
|
-
start,
|
|
431
|
-
stop,
|
|
432
|
-
resetTranscript,
|
|
433
|
-
isSupported,
|
|
434
|
-
error
|
|
435
|
-
};
|
|
436
|
-
};
|
|
437
|
-
|
|
438
154
|
// src/ui/react/hooks/useAudioRecorder.ts
|
|
439
|
-
import { useState
|
|
155
|
+
import { useState, useRef, useCallback } from "react";
|
|
440
156
|
var useAudioRecorder = (onStop) => {
|
|
441
|
-
const [isRecording, setIsRecording] =
|
|
442
|
-
const [isSimulated, setIsSimulated] =
|
|
443
|
-
const [blob, setBlob] =
|
|
444
|
-
const [error, setError] =
|
|
445
|
-
const mediaRecorderRef =
|
|
446
|
-
const chunksRef =
|
|
447
|
-
const start =
|
|
157
|
+
const [isRecording, setIsRecording] = useState(false);
|
|
158
|
+
const [isSimulated, setIsSimulated] = useState(false);
|
|
159
|
+
const [blob, setBlob] = useState(null);
|
|
160
|
+
const [error, setError] = useState(null);
|
|
161
|
+
const mediaRecorderRef = useRef(null);
|
|
162
|
+
const chunksRef = useRef([]);
|
|
163
|
+
const start = useCallback(async () => {
|
|
448
164
|
try {
|
|
449
165
|
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
|
|
450
166
|
if (process.env.NODE_ENV === "development") {
|
|
@@ -456,7 +172,12 @@ var useAudioRecorder = (onStop) => {
|
|
|
456
172
|
}
|
|
457
173
|
throw new Error("Media devices not available. Ensure you are using HTTPS or localhost.");
|
|
458
174
|
}
|
|
459
|
-
const stream = await navigator.mediaDevices.getUserMedia({
|
|
175
|
+
const stream = await navigator.mediaDevices.getUserMedia({
|
|
176
|
+
audio: {
|
|
177
|
+
noiseSuppression: true,
|
|
178
|
+
echoCancellation: true
|
|
179
|
+
}
|
|
180
|
+
});
|
|
460
181
|
const mediaRecorder = new MediaRecorder(stream);
|
|
461
182
|
mediaRecorderRef.current = mediaRecorder;
|
|
462
183
|
chunksRef.current = [];
|
|
@@ -482,7 +203,7 @@ var useAudioRecorder = (onStop) => {
|
|
|
482
203
|
setError(e.message || "Microphone access denied");
|
|
483
204
|
}
|
|
484
205
|
}, [onStop]);
|
|
485
|
-
const stop =
|
|
206
|
+
const stop = useCallback(() => {
|
|
486
207
|
if (isSimulated) {
|
|
487
208
|
setIsRecording(false);
|
|
488
209
|
setIsSimulated(false);
|
|
@@ -506,9 +227,9 @@ var useAudioRecorder = (onStop) => {
|
|
|
506
227
|
};
|
|
507
228
|
|
|
508
229
|
// src/ui/react/hooks/useProactiveResize.ts
|
|
509
|
-
import { useEffect
|
|
230
|
+
import { useEffect } from "react";
|
|
510
231
|
function useProactiveResize(textareaRef, measurementRef, value, disabled) {
|
|
511
|
-
|
|
232
|
+
useEffect(() => {
|
|
512
233
|
if (!textareaRef.current || !measurementRef.current) return;
|
|
513
234
|
const styles = window.getComputedStyle(textareaRef.current);
|
|
514
235
|
measurementRef.current.style.width = styles.width;
|
|
@@ -544,22 +265,18 @@ var ChatInputArea = forwardRef(({
|
|
|
544
265
|
defaultInputMode = "text"
|
|
545
266
|
}, ref) => {
|
|
546
267
|
var _a, _b, _c, _d;
|
|
547
|
-
const [internalMessage, setInternalMessage] =
|
|
548
|
-
const [voiceTrigger, setVoiceTrigger] =
|
|
549
|
-
const [isTranscribing, setIsTranscribing] =
|
|
550
|
-
const [voiceError, setVoiceError] =
|
|
551
|
-
const [isFocused, setIsFocused] =
|
|
552
|
-
const [showDebug, setShowDebug] =
|
|
553
|
-
const [logs, setLogs] =
|
|
554
|
-
const tapCountRef =
|
|
555
|
-
|
|
268
|
+
const [internalMessage, setInternalMessage] = useState2("");
|
|
269
|
+
const [voiceTrigger, setVoiceTrigger] = useState2(null);
|
|
270
|
+
const [isTranscribing, setIsTranscribing] = useState2(false);
|
|
271
|
+
const [voiceError, setVoiceError] = useState2(null);
|
|
272
|
+
const [isFocused, setIsFocused] = useState2(false);
|
|
273
|
+
const [showDebug, setShowDebug] = useState2(false);
|
|
274
|
+
const [logs, setLogs] = useState2([]);
|
|
275
|
+
const tapCountRef = useRef2({ count: 0, lastTap: 0 });
|
|
276
|
+
useEffect2(() => {
|
|
556
277
|
const originalLog = console.log;
|
|
557
278
|
const originalWarn = console.warn;
|
|
558
279
|
const originalError = console.error;
|
|
559
|
-
const formatTime = () => {
|
|
560
|
-
const now = /* @__PURE__ */ new Date();
|
|
561
|
-
return `${now.getHours().toString().padStart(2, "0")}:${now.getMinutes().toString().padStart(2, "0")}:${now.getSeconds().toString().padStart(2, "0")}.${now.getMilliseconds().toString().padStart(3, "0")}`;
|
|
562
|
-
};
|
|
563
280
|
const addLog = (type, args) => {
|
|
564
281
|
try {
|
|
565
282
|
const msg = args.map((arg) => {
|
|
@@ -567,7 +284,7 @@ var ChatInputArea = forwardRef(({
|
|
|
567
284
|
if (typeof arg === "object") return JSON.stringify(arg);
|
|
568
285
|
return String(arg);
|
|
569
286
|
}).join(" ");
|
|
570
|
-
setLogs((prev) => [
|
|
287
|
+
setLogs((prev) => [`[${type}] ${msg}`, ...prev].slice(0, 50));
|
|
571
288
|
} catch (e) {
|
|
572
289
|
}
|
|
573
290
|
};
|
|
@@ -589,15 +306,15 @@ var ChatInputArea = forwardRef(({
|
|
|
589
306
|
console.error = originalError;
|
|
590
307
|
};
|
|
591
308
|
}, []);
|
|
592
|
-
const copyLogs =
|
|
309
|
+
const copyLogs = useCallback2(() => {
|
|
593
310
|
navigator.clipboard.writeText(logs.join("\n")).then(() => alert("Logs copied to clipboard")).catch((err) => console.error("Failed to copy logs", err));
|
|
594
311
|
}, [logs]);
|
|
595
|
-
const textareaRef =
|
|
596
|
-
const measurementRef =
|
|
597
|
-
const pendingSelectionRef =
|
|
312
|
+
const textareaRef = useRef2(null);
|
|
313
|
+
const measurementRef = useRef2(null);
|
|
314
|
+
const pendingSelectionRef = useRef2(null);
|
|
598
315
|
const isControlled = value !== void 0;
|
|
599
316
|
const message = isControlled ? value : internalMessage;
|
|
600
|
-
const messageRef =
|
|
317
|
+
const messageRef = useRef2(message);
|
|
601
318
|
messageRef.current = message;
|
|
602
319
|
useLayoutEffect(() => {
|
|
603
320
|
if (pendingSelectionRef.current && textareaRef.current) {
|
|
@@ -607,18 +324,18 @@ var ChatInputArea = forwardRef(({
|
|
|
607
324
|
pendingSelectionRef.current = null;
|
|
608
325
|
}
|
|
609
326
|
}, [message]);
|
|
610
|
-
const onChangeRef =
|
|
611
|
-
|
|
327
|
+
const onChangeRef = useRef2(onChange);
|
|
328
|
+
useEffect2(() => {
|
|
612
329
|
onChangeRef.current = onChange;
|
|
613
330
|
}, [onChange]);
|
|
614
331
|
const { voice: globalVoice } = useChatConfig();
|
|
615
332
|
const isVoiceEnabled = (_a = globalVoice == null ? void 0 : globalVoice.enabled) != null ? _a : !!propVoiceConfig;
|
|
616
333
|
const voiceConfig = isVoiceEnabled ? propVoiceConfig || (globalVoice == null ? void 0 : globalVoice.config) : void 0;
|
|
617
|
-
const voiceConfigRef =
|
|
618
|
-
|
|
334
|
+
const voiceConfigRef = useRef2(voiceConfig);
|
|
335
|
+
useEffect2(() => {
|
|
619
336
|
voiceConfigRef.current = voiceConfig;
|
|
620
337
|
}, [voiceConfig]);
|
|
621
|
-
const triggerChange =
|
|
338
|
+
const triggerChange = useCallback2((newValue) => {
|
|
622
339
|
setVoiceError(null);
|
|
623
340
|
if (isControlled && onChangeRef.current) {
|
|
624
341
|
const syntheticEvent = {
|
|
@@ -632,7 +349,7 @@ var ChatInputArea = forwardRef(({
|
|
|
632
349
|
}, [isControlled]);
|
|
633
350
|
const isInputDisabled = (currentTask == null ? void 0 : currentTask.complete) || (lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactive) && (((_b = lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) == null ? void 0 : _b.function) === "form" && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted) || ((_c = lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.interactiveData) == null ? void 0 : _c.function) === "confirm" && !(lastInteractiveMessage == null ? void 0 : lastInteractiveMessage.isResponseSubmitted));
|
|
634
351
|
useProactiveResize(textareaRef, measurementRef, message, isInputDisabled || !!voiceTrigger);
|
|
635
|
-
const insertTextAtCursor =
|
|
352
|
+
const insertTextAtCursor = useCallback2((text) => {
|
|
636
353
|
const textarea = textareaRef.current;
|
|
637
354
|
const currentVal = messageRef.current || "";
|
|
638
355
|
if (!textarea) {
|
|
@@ -650,23 +367,16 @@ var ChatInputArea = forwardRef(({
|
|
|
650
367
|
pendingSelectionRef.current = { start: selectionStart, end: selectionEnd };
|
|
651
368
|
triggerChange(newText);
|
|
652
369
|
}, [triggerChange]);
|
|
653
|
-
const handleVoiceResult =
|
|
370
|
+
const handleVoiceResult = useCallback2((text, isFinal) => {
|
|
654
371
|
if (isFinal) {
|
|
655
372
|
insertTextAtCursor(text);
|
|
656
373
|
}
|
|
657
374
|
}, [insertTextAtCursor]);
|
|
658
|
-
const handleVoiceEnd =
|
|
375
|
+
const handleVoiceEnd = useCallback2(() => {
|
|
659
376
|
var _a2, _b2;
|
|
660
377
|
setVoiceTrigger(null);
|
|
661
378
|
(_b2 = (_a2 = voiceConfigRef.current) == null ? void 0 : _a2.onVoiceEnd) == null ? void 0 : _b2.call(_a2);
|
|
662
379
|
}, []);
|
|
663
|
-
const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
|
|
664
|
-
useEffect3(() => {
|
|
665
|
-
if (nativeSpeech.error) {
|
|
666
|
-
setVoiceError(nativeSpeech.error);
|
|
667
|
-
console.error("[ChatInputArea] Native Speech Error:", nativeSpeech.error);
|
|
668
|
-
}
|
|
669
|
-
}, [nativeSpeech.error]);
|
|
670
380
|
const customRecorder = useAudioRecorder(async (blob) => {
|
|
671
381
|
var _a2, _b2, _c2;
|
|
672
382
|
setVoiceTrigger(null);
|
|
@@ -726,65 +436,27 @@ var ChatInputArea = forwardRef(({
|
|
|
726
436
|
handleSubmit();
|
|
727
437
|
}
|
|
728
438
|
};
|
|
729
|
-
const isMobile = useCallback3(() => {
|
|
730
|
-
if (typeof window === "undefined") return false;
|
|
731
|
-
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
732
|
-
}, []);
|
|
733
439
|
const startRecording = async (trigger) => {
|
|
734
|
-
var _a2
|
|
735
|
-
|
|
736
|
-
console.log("[ChatInputArea] Current state - voiceTrigger:", voiceTrigger, "isTranscribing:", isTranscribing);
|
|
737
|
-
if (voiceTrigger || isTranscribing) {
|
|
738
|
-
console.log("[ChatInputArea] startRecording ignored - already active");
|
|
739
|
-
return;
|
|
740
|
-
}
|
|
440
|
+
var _a2;
|
|
441
|
+
if (voiceTrigger || isTranscribing) return;
|
|
741
442
|
setVoiceTrigger(trigger);
|
|
742
443
|
setVoiceError(null);
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
if (!nativeSpeech.isSupported) {
|
|
746
|
-
console.error("[ChatInputArea] Native speech not supported");
|
|
747
|
-
alert("Speech recognition is not supported in this browser.");
|
|
748
|
-
setVoiceTrigger(null);
|
|
749
|
-
return;
|
|
750
|
-
}
|
|
751
|
-
console.log("[ChatInputArea] Calling nativeSpeech.start()...");
|
|
752
|
-
nativeSpeech.start();
|
|
753
|
-
console.log("[ChatInputArea] nativeSpeech.start() called");
|
|
754
|
-
console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists...");
|
|
755
|
-
try {
|
|
756
|
-
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
757
|
-
console.log("[ChatInputArea] voiceConfig.onVoiceStart completed");
|
|
758
|
-
} catch (e) {
|
|
759
|
-
console.error("[ChatInputArea] voiceConfig.onVoiceStart threw error", e);
|
|
760
|
-
}
|
|
761
|
-
} else {
|
|
762
|
-
console.log("[ChatInputArea] Using custom recorder");
|
|
763
|
-
console.log("[ChatInputArea] Calling voiceConfig.onVoiceStart if exists (custom mode)...");
|
|
764
|
-
try {
|
|
765
|
-
(_b2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _b2.call(voiceConfig);
|
|
766
|
-
console.log("[ChatInputArea] voiceConfig.onVoiceStart completed");
|
|
767
|
-
} catch (e) {
|
|
768
|
-
console.error("[ChatInputArea] voiceConfig.onVoiceStart threw error", e);
|
|
769
|
-
}
|
|
444
|
+
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _a2.call(voiceConfig);
|
|
445
|
+
try {
|
|
770
446
|
await customRecorder.start();
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
setTimeout(() => {
|
|
776
|
-
var _a3;
|
|
777
|
-
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
778
|
-
}, 0);
|
|
447
|
+
} catch (e) {
|
|
448
|
+
console.error("[ChatInputArea] Failed to start recorder:", e);
|
|
449
|
+
setVoiceError("Mic access denied");
|
|
450
|
+
setVoiceTrigger(null);
|
|
779
451
|
}
|
|
452
|
+
setTimeout(() => {
|
|
453
|
+
var _a3;
|
|
454
|
+
return (_a3 = textareaRef.current) == null ? void 0 : _a3.focus();
|
|
455
|
+
}, 0);
|
|
780
456
|
};
|
|
781
457
|
const stopRecording = () => {
|
|
782
458
|
if (!voiceTrigger) return;
|
|
783
|
-
|
|
784
|
-
nativeSpeech.stop();
|
|
785
|
-
} else {
|
|
786
|
-
customRecorder.stop();
|
|
787
|
-
}
|
|
459
|
+
customRecorder.stop();
|
|
788
460
|
};
|
|
789
461
|
const getPlaceholder = () => {
|
|
790
462
|
if (placeholder) return placeholder;
|
|
@@ -963,7 +635,7 @@ var ChatInputArea = forwardRef(({
|
|
|
963
635
|
ChatInputArea.displayName = "ChatInputArea";
|
|
964
636
|
|
|
965
637
|
// src/ui/react/components/TapToTalk.tsx
|
|
966
|
-
import React3, { useState as
|
|
638
|
+
import React3, { useState as useState3, useCallback as useCallback3, useRef as useRef3 } from "react";
|
|
967
639
|
import { MicrophoneIcon as MicrophoneIcon2, XMarkIcon as XMarkIcon2, Square2StackIcon as Square2StackIcon2 } from "@heroicons/react/24/outline";
|
|
968
640
|
import { jsx as jsx6, jsxs as jsxs4 } from "react/jsx-runtime";
|
|
969
641
|
var TapToTalk = ({
|
|
@@ -978,19 +650,16 @@ var TapToTalk = ({
|
|
|
978
650
|
var _a;
|
|
979
651
|
const globalConfig = useChatConfig();
|
|
980
652
|
const voiceConfig = propVoiceConfig || ((_a = globalConfig.voice) == null ? void 0 : _a.config);
|
|
981
|
-
const [isTranscribing, setIsTranscribing] =
|
|
982
|
-
const [
|
|
983
|
-
const [
|
|
984
|
-
const [
|
|
985
|
-
const
|
|
653
|
+
const [isTranscribing, setIsTranscribing] = useState3(false);
|
|
654
|
+
const [voiceTrigger, setVoiceTrigger] = useState3(null);
|
|
655
|
+
const [errorMsg, setErrorMsg] = useState3(null);
|
|
656
|
+
const [showDebug, setShowDebug] = useState3(false);
|
|
657
|
+
const [logs, setLogs] = useState3([]);
|
|
658
|
+
const tapCountRef = useRef3({ count: 0, lastTap: 0 });
|
|
986
659
|
React3.useEffect(() => {
|
|
987
660
|
const originalLog = console.log;
|
|
988
661
|
const originalWarn = console.warn;
|
|
989
662
|
const originalError = console.error;
|
|
990
|
-
const formatTime = () => {
|
|
991
|
-
const now = /* @__PURE__ */ new Date();
|
|
992
|
-
return `${now.getHours().toString().padStart(2, "0")}:${now.getMinutes().toString().padStart(2, "0")}:${now.getSeconds().toString().padStart(2, "0")}.${now.getMilliseconds().toString().padStart(3, "0")}`;
|
|
993
|
-
};
|
|
994
663
|
const addLog = (type, args) => {
|
|
995
664
|
try {
|
|
996
665
|
const msg = args.map((arg) => {
|
|
@@ -998,7 +667,7 @@ var TapToTalk = ({
|
|
|
998
667
|
if (typeof arg === "object") return JSON.stringify(arg);
|
|
999
668
|
return String(arg);
|
|
1000
669
|
}).join(" ");
|
|
1001
|
-
setLogs((prev) => [
|
|
670
|
+
setLogs((prev) => [`[${type}] ${msg}`, ...prev].slice(0, 50));
|
|
1002
671
|
} catch (e) {
|
|
1003
672
|
}
|
|
1004
673
|
};
|
|
@@ -1020,27 +689,15 @@ var TapToTalk = ({
|
|
|
1020
689
|
console.error = originalError;
|
|
1021
690
|
};
|
|
1022
691
|
}, []);
|
|
1023
|
-
const copyLogs =
|
|
692
|
+
const copyLogs = useCallback3(() => {
|
|
1024
693
|
navigator.clipboard.writeText(logs.join("\n")).then(() => alert("Logs copied to clipboard")).catch((err) => console.error("Failed to copy logs", err));
|
|
1025
694
|
}, [logs]);
|
|
1026
|
-
const
|
|
1027
|
-
|
|
1028
|
-
if (isFinal) {
|
|
1029
|
-
onResult(text);
|
|
1030
|
-
setErrorMsg(null);
|
|
1031
|
-
}
|
|
1032
|
-
}, [onResult]);
|
|
1033
|
-
const handleVoiceEnd = useCallback4(() => {
|
|
1034
|
-
console.log("[TapToTalk] Native speech ended");
|
|
695
|
+
const handleVoiceEnd = useCallback3(() => {
|
|
696
|
+
setVoiceTrigger(null);
|
|
1035
697
|
}, []);
|
|
1036
|
-
const nativeSpeech = useSpeechRecognition(handleVoiceResult, handleVoiceEnd, voiceConfig == null ? void 0 : voiceConfig.language);
|
|
1037
|
-
React3.useEffect(() => {
|
|
1038
|
-
if (nativeSpeech.error) {
|
|
1039
|
-
setErrorMsg(nativeSpeech.error);
|
|
1040
|
-
console.error("[TapToTalk] Native Speech Error:", nativeSpeech.error);
|
|
1041
|
-
}
|
|
1042
|
-
}, [nativeSpeech.error]);
|
|
1043
698
|
const customRecorder = useAudioRecorder(async (blob) => {
|
|
699
|
+
var _a2;
|
|
700
|
+
setVoiceTrigger(null);
|
|
1044
701
|
setIsTranscribing(true);
|
|
1045
702
|
setErrorMsg(null);
|
|
1046
703
|
if (blob.type === "audio/simulated") {
|
|
@@ -1050,7 +707,7 @@ var TapToTalk = ({
|
|
|
1050
707
|
setIsTranscribing(false);
|
|
1051
708
|
return;
|
|
1052
709
|
}
|
|
1053
|
-
if (
|
|
710
|
+
if (voiceConfig == null ? void 0 : voiceConfig.onAudioCapture) {
|
|
1054
711
|
try {
|
|
1055
712
|
const text = await voiceConfig.onAudioCapture(blob);
|
|
1056
713
|
if (text) onResult(text);
|
|
@@ -1063,77 +720,59 @@ var TapToTalk = ({
|
|
|
1063
720
|
} else {
|
|
1064
721
|
setIsTranscribing(false);
|
|
1065
722
|
}
|
|
723
|
+
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _a2.call(voiceConfig);
|
|
1066
724
|
});
|
|
1067
|
-
const isListening =
|
|
725
|
+
const isListening = !!voiceTrigger || customRecorder.isRecording;
|
|
1068
726
|
const isActive = isListening || isTranscribing;
|
|
1069
|
-
const processingRef =
|
|
1070
|
-
const
|
|
1071
|
-
|
|
1072
|
-
return /Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent) || "ontouchstart" in window || navigator.maxTouchPoints > 0;
|
|
1073
|
-
}, []);
|
|
1074
|
-
const toggleVoice = async (e) => {
|
|
1075
|
-
const now = Date.now();
|
|
1076
|
-
if (now - tapCountRef.current.lastTap < 500) {
|
|
1077
|
-
tapCountRef.current.count++;
|
|
1078
|
-
} else {
|
|
1079
|
-
tapCountRef.current.count = 1;
|
|
1080
|
-
}
|
|
1081
|
-
tapCountRef.current.lastTap = now;
|
|
1082
|
-
if (tapCountRef.current.count >= 5) {
|
|
1083
|
-
setShowDebug((prev) => !prev);
|
|
1084
|
-
tapCountRef.current.count = 0;
|
|
1085
|
-
if (isActive) {
|
|
1086
|
-
console.log("[TapToTalk] Debug trigger force-stop");
|
|
1087
|
-
if ((voiceConfig == null ? void 0 : voiceConfig.mode) === "native") nativeSpeech.stop();
|
|
1088
|
-
else customRecorder.stop();
|
|
1089
|
-
}
|
|
1090
|
-
return;
|
|
1091
|
-
}
|
|
1092
|
-
console.log("[TapToTalk] toggleVoice called. isMobile:", isMobile());
|
|
727
|
+
const processingRef = useRef3(false);
|
|
728
|
+
const toggleVoice = async () => {
|
|
729
|
+
var _a2, _b, _c;
|
|
1093
730
|
if (processingRef.current) {
|
|
1094
731
|
console.log("[TapToTalk] toggleVoice ignored - processing");
|
|
1095
732
|
return;
|
|
1096
733
|
}
|
|
1097
734
|
processingRef.current = true;
|
|
1098
|
-
console.log("[TapToTalk] toggleVoice called. isActive:", isActive
|
|
735
|
+
console.log("[TapToTalk] toggleVoice called. isActive:", isActive);
|
|
1099
736
|
try {
|
|
737
|
+
const now = Date.now();
|
|
738
|
+
if (now - tapCountRef.current.lastTap < 500) {
|
|
739
|
+
tapCountRef.current.count++;
|
|
740
|
+
} else {
|
|
741
|
+
tapCountRef.current.count = 1;
|
|
742
|
+
}
|
|
743
|
+
tapCountRef.current.lastTap = now;
|
|
744
|
+
if (tapCountRef.current.count >= 5) {
|
|
745
|
+
setShowDebug((prev) => !prev);
|
|
746
|
+
tapCountRef.current.count = 0;
|
|
747
|
+
if (isActive) {
|
|
748
|
+
console.log("[TapToTalk] Debug trigger force-stop");
|
|
749
|
+
customRecorder.stop();
|
|
750
|
+
setVoiceTrigger(null);
|
|
751
|
+
}
|
|
752
|
+
return;
|
|
753
|
+
}
|
|
1100
754
|
if (isActive) {
|
|
1101
755
|
if (isTranscribing && !isListening) {
|
|
1102
756
|
console.log("[TapToTalk] Ignoring click during transcription");
|
|
1103
757
|
return;
|
|
1104
758
|
}
|
|
1105
759
|
console.log("[TapToTalk] Stopping voice...");
|
|
1106
|
-
|
|
1107
|
-
|
|
1108
|
-
|
|
1109
|
-
customRecorder.stop();
|
|
1110
|
-
}
|
|
760
|
+
customRecorder.stop();
|
|
761
|
+
setVoiceTrigger(null);
|
|
762
|
+
(_a2 = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _a2.call(voiceConfig);
|
|
1111
763
|
} else {
|
|
1112
|
-
console.log("[TapToTalk] Starting voice...
|
|
764
|
+
console.log("[TapToTalk] Starting voice...");
|
|
1113
765
|
setErrorMsg(null);
|
|
1114
|
-
|
|
1115
|
-
|
|
1116
|
-
|
|
1117
|
-
|
|
1118
|
-
|
|
1119
|
-
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
|
|
1123
|
-
|
|
1124
|
-
console.error("[TapToTalk] Custom recorder failed:", e2);
|
|
1125
|
-
setErrorMsg("Mic access denied");
|
|
1126
|
-
}
|
|
1127
|
-
} else {
|
|
1128
|
-
console.log("[TapToTalk] Starting native speech recognition...");
|
|
1129
|
-
if (!nativeSpeech.isSupported) {
|
|
1130
|
-
console.error("[TapToTalk] Native speech not supported");
|
|
1131
|
-
setErrorMsg("Speech not supported");
|
|
1132
|
-
return;
|
|
1133
|
-
}
|
|
1134
|
-
console.log("[TapToTalk] Calling nativeSpeech.start()...");
|
|
1135
|
-
nativeSpeech.start();
|
|
1136
|
-
console.log("[TapToTalk] nativeSpeech.start() called");
|
|
766
|
+
onFocusTarget == null ? void 0 : onFocusTarget();
|
|
767
|
+
setVoiceTrigger("click");
|
|
768
|
+
(_b = voiceConfig == null ? void 0 : voiceConfig.onVoiceStart) == null ? void 0 : _b.call(voiceConfig);
|
|
769
|
+
try {
|
|
770
|
+
await customRecorder.start();
|
|
771
|
+
} catch (e) {
|
|
772
|
+
console.error("[TapToTalk] Custom recorder failed:", e);
|
|
773
|
+
setErrorMsg("Mic access denied");
|
|
774
|
+
setVoiceTrigger(null);
|
|
775
|
+
(_c = voiceConfig == null ? void 0 : voiceConfig.onVoiceEnd) == null ? void 0 : _c.call(voiceConfig);
|
|
1137
776
|
}
|
|
1138
777
|
}
|
|
1139
778
|
} finally {
|
|
@@ -1179,9 +818,7 @@ var TapToTalk = ({
|
|
|
1179
818
|
/* @__PURE__ */ jsxs4(
|
|
1180
819
|
"button",
|
|
1181
820
|
{
|
|
1182
|
-
type: "button",
|
|
1183
821
|
onClick: toggleVoice,
|
|
1184
|
-
style: { touchAction: "manipulation" },
|
|
1185
822
|
disabled: disabled || isTranscribing && !isListening,
|
|
1186
823
|
className: `flex items-center justify-center gap-3 px-6 py-3 rounded-xl transition-all duration-300 w-full font-medium shadow-md active:scale-[0.98]
|
|
1187
824
|
${bgColor} text-white
|
|
@@ -1189,9 +826,9 @@ var TapToTalk = ({
|
|
|
1189
826
|
${className}`,
|
|
1190
827
|
title: label,
|
|
1191
828
|
children: [
|
|
1192
|
-
/* @__PURE__ */ jsx6("div", { className: "flex items-center justify-center shrink-0
|
|
1193
|
-
/* @__PURE__ */ jsx6("span", { className: "truncate
|
|
1194
|
-
errorMsg && /* @__PURE__ */ jsx6("span", { className: "text-[10px] bg-white/20 px-1.5 py-0.5 rounded text-red-100 animate-in fade-in slide-in-from-right-1
|
|
829
|
+
/* @__PURE__ */ jsx6("div", { className: "flex items-center justify-center shrink-0", children: Icon }),
|
|
830
|
+
/* @__PURE__ */ jsx6("span", { className: "truncate", children: label }),
|
|
831
|
+
errorMsg && /* @__PURE__ */ jsx6("span", { className: "text-[10px] bg-white/20 px-1.5 py-0.5 rounded text-red-100 animate-in fade-in slide-in-from-right-1", children: errorMsg })
|
|
1195
832
|
]
|
|
1196
833
|
}
|
|
1197
834
|
)
|
|
@@ -1199,17 +836,17 @@ var TapToTalk = ({
|
|
|
1199
836
|
};
|
|
1200
837
|
|
|
1201
838
|
// src/ui/react/components/ChatMessageList.tsx
|
|
1202
|
-
import { useEffect as
|
|
839
|
+
import { useEffect as useEffect5, useRef as useRef5 } from "react";
|
|
1203
840
|
|
|
1204
841
|
// src/ui/react/components/interactive/ConfirmInteraction.tsx
|
|
1205
|
-
import { useState as
|
|
842
|
+
import { useState as useState4 } from "react";
|
|
1206
843
|
import { jsx as jsx7, jsxs as jsxs5 } from "react/jsx-runtime";
|
|
1207
844
|
var ConfirmInteraction = ({
|
|
1208
845
|
parameters,
|
|
1209
846
|
onResponse,
|
|
1210
847
|
isResponseSubmitted
|
|
1211
848
|
}) => {
|
|
1212
|
-
const [selectedOption, setSelectedOption] =
|
|
849
|
+
const [selectedOption, setSelectedOption] = useState4(null);
|
|
1213
850
|
const params = parameters;
|
|
1214
851
|
const { yesPrompt, noPrompt } = params;
|
|
1215
852
|
console.log("[ConfirmInteraction] Parameters:", params);
|
|
@@ -1242,7 +879,7 @@ var ConfirmInteraction = ({
|
|
|
1242
879
|
var ConfirmInteraction_default = ConfirmInteraction;
|
|
1243
880
|
|
|
1244
881
|
// src/ui/react/components/interactive/SelectInteraction.tsx
|
|
1245
|
-
import { useState as
|
|
882
|
+
import { useState as useState5, useEffect as useEffect3 } from "react";
|
|
1246
883
|
import { jsx as jsx8, jsxs as jsxs6 } from "react/jsx-runtime";
|
|
1247
884
|
var SelectInteraction = ({
|
|
1248
885
|
parameters,
|
|
@@ -1250,11 +887,11 @@ var SelectInteraction = ({
|
|
|
1250
887
|
isResponseSubmitted,
|
|
1251
888
|
message
|
|
1252
889
|
}) => {
|
|
1253
|
-
const [selectedOption, setSelectedOption] =
|
|
1254
|
-
const [customOption, setCustomOption] =
|
|
890
|
+
const [selectedOption, setSelectedOption] = useState5(null);
|
|
891
|
+
const [customOption, setCustomOption] = useState5(null);
|
|
1255
892
|
const params = parameters;
|
|
1256
893
|
const { question, options, placeholder } = params;
|
|
1257
|
-
|
|
894
|
+
useEffect3(() => {
|
|
1258
895
|
if (isResponseSubmitted && (message == null ? void 0 : message.responseValue)) {
|
|
1259
896
|
const responseValueStr = String(message.responseValue);
|
|
1260
897
|
setSelectedOption(responseValueStr);
|
|
@@ -1293,7 +930,7 @@ var SelectInteraction = ({
|
|
|
1293
930
|
var SelectInteraction_default = SelectInteraction;
|
|
1294
931
|
|
|
1295
932
|
// src/ui/react/components/interactive/FormInteraction.tsx
|
|
1296
|
-
import { useState as
|
|
933
|
+
import { useState as useState6, useEffect as useEffect4, useRef as useRef4 } from "react";
|
|
1297
934
|
import { ChevronDownIcon, ChevronUpIcon } from "lucide-react";
|
|
1298
935
|
import { jsx as jsx9, jsxs as jsxs7 } from "react/jsx-runtime";
|
|
1299
936
|
var FormInteraction = ({
|
|
@@ -1302,11 +939,11 @@ var FormInteraction = ({
|
|
|
1302
939
|
isResponseSubmitted,
|
|
1303
940
|
submittedValues
|
|
1304
941
|
}) => {
|
|
1305
|
-
const [isModalOpen, setIsModalOpen] =
|
|
1306
|
-
const [formValues, setFormValues] =
|
|
1307
|
-
const [isExpanded, setIsExpanded] =
|
|
1308
|
-
const [parsedFields, setParsedFields] =
|
|
1309
|
-
const formButtonsRef =
|
|
942
|
+
const [isModalOpen, setIsModalOpen] = useState6(false);
|
|
943
|
+
const [formValues, setFormValues] = useState6({});
|
|
944
|
+
const [isExpanded, setIsExpanded] = useState6(false);
|
|
945
|
+
const [parsedFields, setParsedFields] = useState6([]);
|
|
946
|
+
const formButtonsRef = useRef4(null);
|
|
1310
947
|
const parseParameters = () => {
|
|
1311
948
|
const { prompt, description, submitText = "Submit", cancelText = "Cancel" } = parameters;
|
|
1312
949
|
let fieldsArray = [];
|
|
@@ -1367,7 +1004,7 @@ var FormInteraction = ({
|
|
|
1367
1004
|
};
|
|
1368
1005
|
};
|
|
1369
1006
|
const params = parseParameters();
|
|
1370
|
-
|
|
1007
|
+
useEffect4(() => {
|
|
1371
1008
|
const processedParams = parseParameters();
|
|
1372
1009
|
setParsedFields(processedParams.fields);
|
|
1373
1010
|
const initialValues = {};
|
|
@@ -1385,7 +1022,7 @@ var FormInteraction = ({
|
|
|
1385
1022
|
setIsModalOpen(true);
|
|
1386
1023
|
}
|
|
1387
1024
|
}, [parameters, isResponseSubmitted]);
|
|
1388
|
-
|
|
1025
|
+
useEffect4(() => {
|
|
1389
1026
|
if (isModalOpen && formButtonsRef.current) {
|
|
1390
1027
|
setTimeout(() => {
|
|
1391
1028
|
var _a;
|
|
@@ -1693,10 +1330,10 @@ var ChatMessageList = ({
|
|
|
1693
1330
|
getContextExample,
|
|
1694
1331
|
onInteractiveResponse
|
|
1695
1332
|
}) => {
|
|
1696
|
-
const chatContainerRef =
|
|
1697
|
-
const lastMessageRef =
|
|
1698
|
-
const processingIndicatorRef =
|
|
1699
|
-
|
|
1333
|
+
const chatContainerRef = useRef5(null);
|
|
1334
|
+
const lastMessageRef = useRef5(null);
|
|
1335
|
+
const processingIndicatorRef = useRef5(null);
|
|
1336
|
+
useEffect5(() => {
|
|
1700
1337
|
if (isProcessing && processingIndicatorRef.current) {
|
|
1701
1338
|
processingIndicatorRef.current.scrollIntoView({ behavior: "smooth" });
|
|
1702
1339
|
return;
|
|
@@ -1940,7 +1577,6 @@ export {
|
|
|
1940
1577
|
transcribeAudio,
|
|
1941
1578
|
useAudioRecorder,
|
|
1942
1579
|
useChatConfig,
|
|
1943
|
-
useProactiveResize
|
|
1944
|
-
useSpeechRecognition
|
|
1580
|
+
useProactiveResize
|
|
1945
1581
|
};
|
|
1946
1582
|
//# sourceMappingURL=index.js.map
|