@usecrow/ui 0.1.57 → 0.1.59

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -911,18 +911,21 @@ function useConversations({ productId, apiUrl = "" }) {
911
911
  const [isLoadingHistory, setIsLoadingHistory] = useState(false);
912
912
  const loadConversations = useCallback(async () => {
913
913
  const token = window.__crow_identity_token;
914
- if (!token) return;
914
+ if (!token) return [];
915
915
  try {
916
916
  const res = await fetch(
917
917
  `${apiUrl}/api/chat/conversations?product_id=${productId}&identity_token=${encodeURIComponent(token)}`
918
918
  );
919
919
  if (res.ok) {
920
920
  const data = await res.json();
921
- setConversations(data.conversations || []);
921
+ const convs = data.conversations || [];
922
+ setConversations(convs);
923
+ return convs;
922
924
  }
923
925
  } catch (error) {
924
926
  console.error("[Crow] Failed to load conversations:", error);
925
927
  }
928
+ return [];
926
929
  }, [apiUrl, productId]);
927
930
  const loadConversationHistory = useCallback(
928
931
  async (conversationId) => {
@@ -1743,6 +1746,186 @@ function usePreviewCopilotStyles(previewStyles) {
1743
1746
  styles: mergeCopilotStyles(void 0, previewStyles)
1744
1747
  };
1745
1748
  }
1749
+ function useTTSOutput({
1750
+ backendUrl,
1751
+ voiceId = "YTpq7expH9539ERJ"
1752
+ }) {
1753
+ const [isSpeaking, setIsSpeaking] = useState(false);
1754
+ const [error, setError] = useState(null);
1755
+ const wsRef = useRef(null);
1756
+ const audioContextRef = useRef(null);
1757
+ const nextTimeRef = useRef(0);
1758
+ const streamCompleteRef = useRef(false);
1759
+ const completionCheckIntervalRef = useRef(null);
1760
+ const cleanupAudioContext = useCallback(() => {
1761
+ setIsSpeaking(false);
1762
+ if (audioContextRef.current && audioContextRef.current.state !== "closed") {
1763
+ audioContextRef.current.close();
1764
+ audioContextRef.current = null;
1765
+ }
1766
+ if (completionCheckIntervalRef.current) {
1767
+ clearInterval(completionCheckIntervalRef.current);
1768
+ completionCheckIntervalRef.current = null;
1769
+ }
1770
+ }, []);
1771
+ const closeWebSocket = useCallback(() => {
1772
+ if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN) {
1773
+ try {
1774
+ wsRef.current.send(JSON.stringify({ type: "stop" }));
1775
+ wsRef.current.close();
1776
+ } catch (e) {
1777
+ }
1778
+ }
1779
+ wsRef.current = null;
1780
+ }, []);
1781
+ const cleanupTTS = useCallback(() => {
1782
+ setIsSpeaking(false);
1783
+ setError(null);
1784
+ closeWebSocket();
1785
+ cleanupAudioContext();
1786
+ }, [closeWebSocket, cleanupAudioContext]);
1787
+ const waitForAudioComplete = useCallback(() => {
1788
+ if (completionCheckIntervalRef.current) {
1789
+ clearInterval(completionCheckIntervalRef.current);
1790
+ }
1791
+ completionCheckIntervalRef.current = setInterval(() => {
1792
+ if (!audioContextRef.current) {
1793
+ if (completionCheckIntervalRef.current) {
1794
+ clearInterval(completionCheckIntervalRef.current);
1795
+ completionCheckIntervalRef.current = null;
1796
+ }
1797
+ return;
1798
+ }
1799
+ const now = audioContextRef.current.currentTime;
1800
+ if (now >= nextTimeRef.current) {
1801
+ if (completionCheckIntervalRef.current) {
1802
+ clearInterval(completionCheckIntervalRef.current);
1803
+ completionCheckIntervalRef.current = null;
1804
+ }
1805
+ cleanupAudioContext();
1806
+ }
1807
+ }, 100);
1808
+ }, [cleanupAudioContext]);
1809
+ const playAudioChunk = useCallback((base64Audio) => {
1810
+ if (!audioContextRef.current || audioContextRef.current.state === "closed") {
1811
+ console.error("TTS: AudioContext not available");
1812
+ return;
1813
+ }
1814
+ try {
1815
+ const binary = atob(base64Audio);
1816
+ const bytes = new Uint8Array(binary.length);
1817
+ for (let i = 0; i < binary.length; i++) {
1818
+ bytes[i] = binary.charCodeAt(i);
1819
+ }
1820
+ const pcm16 = new Int16Array(bytes.buffer);
1821
+ const float32 = new Float32Array(pcm16.length);
1822
+ for (let i = 0; i < pcm16.length; i++) {
1823
+ float32[i] = pcm16[i] / 32768;
1824
+ }
1825
+ const buffer = audioContextRef.current.createBuffer(1, float32.length, 48e3);
1826
+ buffer.getChannelData(0).set(float32);
1827
+ const source = audioContextRef.current.createBufferSource();
1828
+ source.buffer = buffer;
1829
+ source.connect(audioContextRef.current.destination);
1830
+ const now = audioContextRef.current.currentTime;
1831
+ if (nextTimeRef.current < now) {
1832
+ nextTimeRef.current = now;
1833
+ }
1834
+ source.start(nextTimeRef.current);
1835
+ nextTimeRef.current += buffer.duration;
1836
+ } catch (err) {
1837
+ console.error("TTS: Error playing audio chunk:", err);
1838
+ setError(err instanceof Error ? err.message : "Failed to play audio chunk");
1839
+ }
1840
+ }, []);
1841
+ const speak = useCallback(
1842
+ (text) => {
1843
+ console.log("[TTS Hook] speak called with:", text.substring(0, 50), "backendUrl:", backendUrl);
1844
+ if (!text.trim()) {
1845
+ console.log("[TTS Hook] No text to speak");
1846
+ setError("No text to speak");
1847
+ return;
1848
+ }
1849
+ if (isSpeaking || wsRef.current) {
1850
+ console.log("[TTS Hook] Already playing");
1851
+ setError("Already playing, stop first");
1852
+ return;
1853
+ }
1854
+ setError(null);
1855
+ nextTimeRef.current = 0;
1856
+ streamCompleteRef.current = false;
1857
+ try {
1858
+ audioContextRef.current = new (window.AudioContext || window.webkitAudioContext)({
1859
+ sampleRate: 48e3
1860
+ });
1861
+ const url = backendUrl.startsWith("http") ? backendUrl.replace(/^http/, "ws") : backendUrl;
1862
+ const wsUrl = `${url}/api/tts/stream`;
1863
+ console.log("[TTS Hook] Connecting to:", wsUrl);
1864
+ const ws = new WebSocket(wsUrl);
1865
+ wsRef.current = ws;
1866
+ ws.onopen = () => {
1867
+ ws.send(
1868
+ JSON.stringify({
1869
+ type: "setup",
1870
+ voice_id: voiceId,
1871
+ output_format: "pcm"
1872
+ })
1873
+ );
1874
+ };
1875
+ ws.onmessage = (event) => {
1876
+ const msg = JSON.parse(event.data);
1877
+ if (msg.type === "ready") {
1878
+ ws.send(JSON.stringify({ type: "text", text }));
1879
+ ws.send(JSON.stringify({ type: "end_of_stream" }));
1880
+ } else if (msg.type === "audio") {
1881
+ playAudioChunk(msg.audio);
1882
+ } else if (msg.type === "done") {
1883
+ streamCompleteRef.current = true;
1884
+ closeWebSocket();
1885
+ waitForAudioComplete();
1886
+ } else if (msg.type === "error") {
1887
+ setError(msg.message || "TTS error");
1888
+ cleanupTTS();
1889
+ }
1890
+ };
1891
+ ws.onerror = () => {
1892
+ setError("WebSocket error");
1893
+ cleanupTTS();
1894
+ };
1895
+ ws.onclose = () => {
1896
+ wsRef.current = null;
1897
+ };
1898
+ setIsSpeaking(true);
1899
+ } catch (err) {
1900
+ setError(err instanceof Error ? err.message : "Failed to start TTS");
1901
+ cleanupTTS();
1902
+ }
1903
+ },
1904
+ [
1905
+ isSpeaking,
1906
+ backendUrl,
1907
+ voiceId,
1908
+ playAudioChunk,
1909
+ closeWebSocket,
1910
+ waitForAudioComplete,
1911
+ cleanupTTS
1912
+ ]
1913
+ );
1914
+ const stop = useCallback(() => {
1915
+ cleanupTTS();
1916
+ }, [cleanupTTS]);
1917
+ useEffect(() => {
1918
+ return () => {
1919
+ cleanupTTS();
1920
+ };
1921
+ }, [cleanupTTS]);
1922
+ return {
1923
+ speak,
1924
+ stop,
1925
+ isSpeaking,
1926
+ error
1927
+ };
1928
+ }
1746
1929
  var WidgetStyleContext = createContext(null);
1747
1930
  function WidgetStyleProvider({
1748
1931
  children,
@@ -2730,80 +2913,176 @@ var ModelSelector = ({
2730
2913
  ] }, provider)) })
2731
2914
  ] });
2732
2915
  };
2733
- var getSpeechRecognition = () => {
2734
- if (typeof window === "undefined") return null;
2735
- return window.SpeechRecognition || window.webkitSpeechRecognition || null;
2916
+ var isMediaRecorderSupported = () => {
2917
+ if (typeof window === "undefined") return false;
2918
+ return !!(navigator.mediaDevices && typeof navigator.mediaDevices.getUserMedia === "function" && (window.AudioContext || window.webkitAudioContext));
2736
2919
  };
2737
- function useVoiceInput(options = {}) {
2738
- const { lang, silenceTimeoutMs } = options;
2739
- const [supported] = useState(() => getSpeechRecognition() !== null);
2920
+ function useVoiceInput(options) {
2921
+ const { backendUrl, silenceTimeoutMs } = options;
2922
+ const [supported] = useState(() => isMediaRecorderSupported());
2740
2923
  const [isRecording, setIsRecording] = useState(false);
2741
2924
  const [transcript, setTranscript] = useState("");
2742
- const recognitionRef = useRef(null);
2925
+ const [error, setError] = useState(null);
2926
+ const wsRef = useRef(null);
2927
+ const streamRef = useRef(null);
2928
+ const audioContextRef = useRef(null);
2929
+ const processorRef = useRef(null);
2743
2930
  const silenceTimerRef = useRef(null);
2744
- const finalTranscriptRef = useRef("");
2931
+ const transcriptRef = useRef("");
2932
+ const interimRef = useRef("");
2933
+ const isRecordingRef = useRef(false);
2745
2934
  const clearSilenceTimer = useCallback(() => {
2746
2935
  if (silenceTimerRef.current) {
2747
2936
  clearTimeout(silenceTimerRef.current);
2748
2937
  silenceTimerRef.current = null;
2749
2938
  }
2750
2939
  }, []);
2751
- const stop = useCallback(() => {
2940
+ const cleanup = useCallback(() => {
2752
2941
  clearSilenceTimer();
2753
- if (recognitionRef.current) {
2754
- recognitionRef.current.stop();
2942
+ isRecordingRef.current = false;
2943
+ if (interimRef.current) {
2944
+ transcriptRef.current += interimRef.current + " ";
2945
+ setTranscript(transcriptRef.current.trim());
2946
+ interimRef.current = "";
2947
+ }
2948
+ if (wsRef.current) {
2949
+ try {
2950
+ if (wsRef.current.readyState === WebSocket.OPEN) {
2951
+ wsRef.current.send(JSON.stringify({ type: "stop" }));
2952
+ }
2953
+ wsRef.current.close();
2954
+ } catch (e) {
2955
+ }
2956
+ wsRef.current = null;
2957
+ }
2958
+ if (processorRef.current) {
2959
+ processorRef.current.disconnect();
2960
+ processorRef.current = null;
2961
+ }
2962
+ if (audioContextRef.current) {
2963
+ audioContextRef.current.close();
2964
+ audioContextRef.current = null;
2755
2965
  }
2966
+ if (streamRef.current) {
2967
+ streamRef.current.getTracks().forEach((track) => track.stop());
2968
+ streamRef.current = null;
2969
+ }
2970
+ setIsRecording(false);
2756
2971
  }, [clearSilenceTimer]);
2972
+ const stop = useCallback(() => {
2973
+ cleanup();
2974
+ }, [cleanup]);
2757
2975
  const clear = useCallback(() => {
2758
2976
  setTranscript("");
2759
- finalTranscriptRef.current = "";
2977
+ transcriptRef.current = "";
2978
+ setError(null);
2760
2979
  }, []);
2761
- const start = useCallback(() => {
2762
- const SpeechRecognition = getSpeechRecognition();
2763
- if (!SpeechRecognition) return;
2764
- if (recognitionRef.current) {
2765
- recognitionRef.current.abort();
2766
- }
2767
- finalTranscriptRef.current = "";
2768
- setTranscript("");
2769
- const recognition = new SpeechRecognition();
2770
- recognition.continuous = true;
2771
- recognition.interimResults = true;
2772
- recognition.lang = lang || navigator.language || "en-US";
2773
- recognition.onresult = (event) => {
2774
- let interim = "";
2775
- let final = "";
2776
- for (let i = 0; i < event.results.length; i++) {
2777
- const result = event.results[i];
2778
- if (result.isFinal) {
2779
- final += result[0].transcript;
2780
- } else {
2781
- interim += result[0].transcript;
2782
- }
2980
+ const startAudioCapture = useCallback(() => {
2981
+ if (!streamRef.current || !wsRef.current) return;
2982
+ audioContextRef.current = new (window.AudioContext || window.webkitAudioContext)({ sampleRate: 24e3 });
2983
+ const source = audioContextRef.current.createMediaStreamSource(
2984
+ streamRef.current
2985
+ );
2986
+ processorRef.current = audioContextRef.current.createScriptProcessor(
2987
+ 4096,
2988
+ 1,
2989
+ 1
2990
+ );
2991
+ processorRef.current.onaudioprocess = (event) => {
2992
+ if (!isRecordingRef.current || !wsRef.current || wsRef.current.readyState !== WebSocket.OPEN) {
2993
+ return;
2783
2994
  }
2784
- finalTranscriptRef.current = final;
2785
- setTranscript(final + interim);
2786
- if (silenceTimeoutMs) {
2787
- clearSilenceTimer();
2788
- silenceTimerRef.current = setTimeout(() => {
2789
- stop();
2790
- }, silenceTimeoutMs);
2995
+ const inputData = event.inputBuffer.getChannelData(0);
2996
+ const pcm16 = new Int16Array(inputData.length);
2997
+ for (let i = 0; i < inputData.length; i++) {
2998
+ const s = Math.max(-1, Math.min(1, inputData[i]));
2999
+ pcm16[i] = s < 0 ? s * 32768 : s * 32767;
2791
3000
  }
2792
- };
2793
- recognition.onerror = (event) => {
2794
- if (event.error !== "aborted") {
2795
- console.warn("[Crow Voice] Speech recognition error:", event.error);
3001
+ const bytes = new Uint8Array(pcm16.buffer);
3002
+ let binary = "";
3003
+ for (let i = 0; i < bytes.length; i++) {
3004
+ binary += String.fromCharCode(bytes[i]);
2796
3005
  }
2797
- setIsRecording(false);
2798
- };
2799
- recognition.onend = () => {
2800
- setIsRecording(false);
2801
- recognitionRef.current = null;
3006
+ wsRef.current.send(
3007
+ JSON.stringify({ type: "audio", data: btoa(binary) })
3008
+ );
2802
3009
  };
2803
- recognitionRef.current = recognition;
2804
- recognition.start();
2805
- setIsRecording(true);
2806
- }, [lang, silenceTimeoutMs, clearSilenceTimer, stop]);
3010
+ source.connect(processorRef.current);
3011
+ processorRef.current.connect(audioContextRef.current.destination);
3012
+ }, []);
3013
+ const start = useCallback(async () => {
3014
+ if (!supported) {
3015
+ setError("Audio recording not supported in this browser");
3016
+ return;
3017
+ }
3018
+ setError(null);
3019
+ transcriptRef.current = "";
3020
+ setTranscript("");
3021
+ try {
3022
+ streamRef.current = await navigator.mediaDevices.getUserMedia({
3023
+ audio: {
3024
+ echoCancellation: true,
3025
+ noiseSuppression: true,
3026
+ sampleRate: 24e3
3027
+ }
3028
+ });
3029
+ const wsProtocol = backendUrl.startsWith("https") ? "wss" : "ws";
3030
+ const wsHost = backendUrl.replace(/^https?:\/\//, "");
3031
+ const wsUrl = `${wsProtocol}://${wsHost}/api/stt/stream`;
3032
+ wsRef.current = new WebSocket(wsUrl);
3033
+ wsRef.current.onopen = () => {
3034
+ wsRef.current?.send(JSON.stringify({ type: "setup" }));
3035
+ };
3036
+ wsRef.current.onmessage = (event) => {
3037
+ const msg = JSON.parse(event.data);
3038
+ if (msg.type === "ready") {
3039
+ startAudioCapture();
3040
+ isRecordingRef.current = true;
3041
+ setIsRecording(true);
3042
+ } else if (msg.type === "transcript") {
3043
+ if (msg.is_final && msg.text) {
3044
+ transcriptRef.current += msg.text + " ";
3045
+ interimRef.current = "";
3046
+ setTranscript(transcriptRef.current.trim());
3047
+ if (silenceTimeoutMs) {
3048
+ clearSilenceTimer();
3049
+ silenceTimerRef.current = setTimeout(() => {
3050
+ stop();
3051
+ }, silenceTimeoutMs);
3052
+ }
3053
+ } else if (!msg.is_final && msg.text) {
3054
+ interimRef.current = msg.text;
3055
+ setTranscript((transcriptRef.current + msg.text).trim());
3056
+ }
3057
+ } else if (msg.type === "error") {
3058
+ setError(msg.message || "STT error");
3059
+ cleanup();
3060
+ }
3061
+ };
3062
+ wsRef.current.onerror = () => {
3063
+ setError("WebSocket connection error");
3064
+ cleanup();
3065
+ };
3066
+ wsRef.current.onclose = () => {
3067
+ if (isRecordingRef.current) {
3068
+ cleanup();
3069
+ }
3070
+ };
3071
+ } catch (err) {
3072
+ setError(
3073
+ err instanceof Error ? err.message : "Failed to start recording"
3074
+ );
3075
+ cleanup();
3076
+ }
3077
+ }, [
3078
+ supported,
3079
+ backendUrl,
3080
+ startAudioCapture,
3081
+ silenceTimeoutMs,
3082
+ clearSilenceTimer,
3083
+ stop,
3084
+ cleanup
3085
+ ]);
2807
3086
  const toggle = useCallback(() => {
2808
3087
  if (isRecording) {
2809
3088
  stop();
@@ -2813,13 +3092,19 @@ function useVoiceInput(options = {}) {
2813
3092
  }, [isRecording, start, stop]);
2814
3093
  useEffect(() => {
2815
3094
  return () => {
2816
- clearSilenceTimer();
2817
- if (recognitionRef.current) {
2818
- recognitionRef.current.abort();
2819
- }
3095
+ cleanup();
2820
3096
  };
2821
- }, [clearSilenceTimer]);
2822
- return { supported, isRecording, transcript, start, stop, toggle, clear };
3097
+ }, [cleanup]);
3098
+ return {
3099
+ supported,
3100
+ isRecording,
3101
+ transcript,
3102
+ error,
3103
+ start,
3104
+ stop,
3105
+ toggle,
3106
+ clear
3107
+ };
2823
3108
  }
2824
3109
  var Textarea = React3.forwardRef(
2825
3110
  ({ className, ...props }, ref) => /* @__PURE__ */ jsx(
@@ -3013,11 +3298,23 @@ var PromptInputBox = React3.forwardRef(
3013
3298
  selectedModel = "gpt-4o",
3014
3299
  onModelChange,
3015
3300
  availableModels = [],
3016
- highlighted = false
3301
+ highlighted = false,
3302
+ backendUrl = "",
3303
+ triggerVoiceRecording = 0
3017
3304
  }, ref) => {
3018
3305
  const [input, setInput] = React3.useState("");
3019
3306
  const promptBoxRef = React3.useRef(null);
3020
- const voice = useVoiceInput();
3307
+ const voice = useVoiceInput({ backendUrl, silenceTimeoutMs: 1500 });
3308
+ const lastTriggerRef = React3.useRef(0);
3309
+ const voiceRef = React3.useRef(voice);
3310
+ voiceRef.current = voice;
3311
+ React3.useEffect(() => {
3312
+ if (triggerVoiceRecording > 0 && triggerVoiceRecording !== lastTriggerRef.current) {
3313
+ console.log("[Voice] Auto-starting recording from trigger");
3314
+ voiceRef.current.start();
3315
+ }
3316
+ lastTriggerRef.current = triggerVoiceRecording;
3317
+ }, [triggerVoiceRecording]);
3021
3318
  React3.useEffect(() => {
3022
3319
  if (voice.isRecording && voice.transcript) {
3023
3320
  setInput(voice.transcript);
@@ -3026,11 +3323,16 @@ var PromptInputBox = React3.forwardRef(
3026
3323
  const wasRecordingRef = React3.useRef(false);
3027
3324
  React3.useEffect(() => {
3028
3325
  if (wasRecordingRef.current && !voice.isRecording && voice.transcript) {
3029
- setInput(voice.transcript);
3326
+ const messageToSend = voice.transcript.trim();
3327
+ if (messageToSend) {
3328
+ console.log("[Voice] Auto-sending:", messageToSend);
3329
+ onSend(messageToSend);
3330
+ setInput("");
3331
+ }
3030
3332
  voice.clear();
3031
3333
  }
3032
3334
  wasRecordingRef.current = voice.isRecording;
3033
- }, [voice.isRecording, voice.transcript, voice.clear]);
3335
+ }, [voice.isRecording, voice.transcript, voice.clear, onSend]);
3034
3336
  const handleSubmit = () => {
3035
3337
  if (input.trim()) {
3036
3338
  if (voice.isRecording) {
@@ -3641,6 +3943,25 @@ function CrowWidget({
3641
3943
  setShouldRestoreHistory(true);
3642
3944
  }
3643
3945
  });
3946
+ const tts = useTTSOutput({ backendUrl: apiUrl });
3947
+ const ttsRef = useRef(tts);
3948
+ ttsRef.current = tts;
3949
+ const wasLoadingRef = useRef(false);
3950
+ useEffect(() => {
3951
+ console.log("[Crow TTS] isLoading changed:", chat.isLoading, "wasLoading:", wasLoadingRef.current);
3952
+ if (wasLoadingRef.current && !chat.isLoading) {
3953
+ const lastMessage = [...chat.messages].reverse().find((m) => m.isBot);
3954
+ console.log("[Crow TTS] Last bot message:", lastMessage?.content?.substring(0, 50));
3955
+ if (lastMessage?.content) {
3956
+ const textToSpeak = lastMessage.content.replace(/\*\*/g, "").replace(/\*/g, "").replace(/`[^`]+`/g, "").replace(/\[([^\]]+)\]\([^)]+\)/g, "$1").trim();
3957
+ if (textToSpeak) {
3958
+ console.log("[Crow TTS] Speaking:", textToSpeak.substring(0, 50));
3959
+ ttsRef.current.speak(textToSpeak);
3960
+ }
3961
+ }
3962
+ }
3963
+ wasLoadingRef.current = chat.isLoading;
3964
+ }, [chat.isLoading, chat.messages]);
3644
3965
  useEffect(() => {
3645
3966
  if (initialSuggestions.length > 0 && chat.suggestedActions.length === 0) {
3646
3967
  chat.setSuggestedActions(initialSuggestions);
@@ -3680,7 +4001,15 @@ function CrowWidget({
3680
4001
  const { executeClientTool } = useCrowAPI({
3681
4002
  onIdentified: async () => {
3682
4003
  setIsVerifiedUser(true);
3683
- await conversations.loadConversations();
4004
+ const convs = await conversations.loadConversations();
4005
+ if (convs.length > 0) {
4006
+ const mostRecent = convs[0];
4007
+ const historyMessages = await conversations.loadConversationHistory(mostRecent.id);
4008
+ if (historyMessages.length > 0) {
4009
+ chat.loadMessages(historyMessages);
4010
+ chat.setConversationId(mostRecent.id);
4011
+ }
4012
+ }
3684
4013
  },
3685
4014
  onReset: () => {
3686
4015
  setIsVerifiedUser(false);
@@ -4035,7 +4364,8 @@ function CrowWidget({
4035
4364
  isLoading: chat.isLoading,
4036
4365
  showStopButton: isBrowserUseActive || !!askUserResolver || !!pendingConfirmation,
4037
4366
  highlighted: !!askUserResolver,
4038
- className: "crow-backdrop-blur-md"
4367
+ className: "crow-backdrop-blur-md",
4368
+ backendUrl: apiUrl
4039
4369
  }
4040
4370
  )
4041
4371
  ] })