@paymanai/payman-typescript-ask-sdk 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -17,6 +17,7 @@ bun add @paymanai/payman-typescript-ask-sdk
17
17
  ## Features
18
18
 
19
19
  - ✅ **`useChat` Hook** - React hook for managing chat state and streaming
20
+ - ✅ **`useVoice` Hook** - React hook for voice recognition (web and mobile)
20
21
  - ✅ **Streaming Client** - Low-level streaming utilities
21
22
  - ✅ **TypeScript Types** - Full type definitions
22
23
  - ✅ **Cross-Platform** - Works in web and React Native
@@ -98,6 +99,48 @@ await streamWorkflowEvents(
98
99
  );
99
100
  ```
100
101
 
102
+ ## Voice Support
103
+
104
+ The SDK includes built-in voice recognition for both web and mobile platforms.
105
+
106
+ ```typescript
107
+ import { useVoice } from '@paymanai/payman-typescript-ask-sdk';
108
+
109
+ function MyChat() {
110
+ const {
111
+ voiceState,
112
+ transcribedText,
113
+ isAvailable,
114
+ isRecording,
115
+ startRecording,
116
+ stopRecording,
117
+ } = useVoice(
118
+ { lang: 'en-US' },
119
+ {
120
+ onResult: (transcript) => console.log('Transcript:', transcript),
121
+ }
122
+ );
123
+
124
+ return (
125
+ <div>
126
+ <button onClick={startRecording} disabled={!isAvailable || isRecording}>
127
+ Start Voice
128
+ </button>
129
+ <button onClick={stopRecording} disabled={!isRecording}>
130
+ Stop Voice
131
+ </button>
132
+ <p>{transcribedText}</p>
133
+ </div>
134
+ );
135
+ }
136
+ ```
137
+
138
+ **Platform Support:**
139
+ - **Web**: Uses browser's Web Speech API (Chrome, Edge, Safari)
140
+ - **React Native**: Uses `expo-speech-recognition` (iOS & Android). You must install it in your app: `npm install expo-speech-recognition` (or `yarn add expo-speech-recognition`). If the package is not installed, the voice button will show but `isAvailable` will be false and no permissions are requested.
141
+
142
+ **Voice UI layout (Ask UI / custom UIs):** When voice is enabled, show the voice control **beside** the send button (e.g. voice on the left, send on the right), not replacing it. Both should be visible so users can send text or use voice.
143
+
101
144
  ## API Reference
102
145
 
103
146
  ### `useChat(options)`
@@ -121,6 +164,36 @@ React hook for managing chat state.
121
164
  - `cancelStream: () => void` - Cancel current stream
122
165
  - `isWaitingForResponse: boolean` - Loading state
123
166
 
167
+ ### `useVoice(config?, callbacks?)`
168
+
169
+ React hook for voice recognition.
170
+
171
+ **Parameters:**
172
+ - `config?: VoiceConfig` - Voice configuration
173
+ - `lang?: string` - Language (default: "en-US")
174
+ - `interimResults?: boolean` - Enable interim results (default: true)
175
+ - `continuous?: boolean` - Continuous mode (default: true)
176
+ - `maxAlternatives?: number` - Max alternatives (default: 1)
177
+ - `autoStopAfterSilence?: number` - Auto-stop after silence in ms (web only)
178
+ - `callbacks?: VoiceCallbacks` - Event callbacks
179
+ - `onStart?: () => void` - Recording started
180
+ - `onEnd?: () => void` - Recording ended
181
+ - `onResult?: (transcript: string) => void` - New transcript
182
+ - `onError?: (error: string) => void` - Error occurred
183
+ - `onStateChange?: (state: VoiceState) => void` - State changed
184
+
185
+ **Returns:**
186
+ - `voiceState: VoiceState` - Current state ("idle" | "listening" | "processing" | "error")
187
+ - `transcribedText: string` - Current transcribed text
188
+ - `isAvailable: boolean` - Is voice available on this device/browser
189
+ - `isRecording: boolean` - Is currently recording
190
+ - `startRecording: () => Promise<void>` - Start voice recording
191
+ - `stopRecording: () => void` - Stop voice recording
192
+ - `requestPermissions: () => Promise<VoicePermissions>` - Request mic permissions
193
+ - `getPermissions: () => Promise<VoicePermissions>` - Check mic permissions
194
+ - `clearTranscript: () => void` - Clear transcribed text
195
+ - `reset: () => void` - Reset voice state
196
+
124
197
  ### `streamWorkflowEvents(url, body, headers, options)`
125
198
 
126
199
  Low-level streaming function.
@@ -141,11 +214,18 @@ All types are exported:
141
214
 
142
215
  ```typescript
143
216
  import type {
217
+ // Chat types
144
218
  ChatConfig,
145
219
  ChatCallbacks,
146
220
  MessageDisplay,
147
221
  StreamingStep,
148
222
  WorkflowStage,
223
+ // Voice types
224
+ VoiceConfig,
225
+ VoiceCallbacks,
226
+ VoiceState,
227
+ VoicePermissions,
228
+ UseVoiceReturn,
149
229
  // ... and more
150
230
  } from '@paymanai/payman-typescript-ask-sdk';
151
231
  ```
package/dist/index.d.mts CHANGED
@@ -1,5 +1,73 @@
1
1
  import React from 'react';
2
2
 
3
+ type VoiceState = "idle" | "listening" | "processing" | "error";
4
+ type VoiceConfig = {
5
+ /** Language for speech recognition (default: "en-US") */
6
+ lang?: string;
7
+ /** Enable interim results during transcription (default: true) */
8
+ interimResults?: boolean;
9
+ /** Continuous recognition mode (default: true) */
10
+ continuous?: boolean;
11
+ /** Maximum number of alternative transcriptions (default: 1) */
12
+ maxAlternatives?: number;
13
+ /** Require on-device recognition (mobile only, default: false) */
14
+ requiresOnDeviceRecognition?: boolean;
15
+ /** Auto-stop after silence in milliseconds (web only, default: undefined) */
16
+ autoStopAfterSilence?: number;
17
+ };
18
+ type VoicePermissions = {
19
+ /** Permission granted */
20
+ granted: boolean;
21
+ /** Can ask for permission */
22
+ canAskAgain?: boolean;
23
+ /** Permission status */
24
+ status?: "granted" | "denied" | "undetermined";
25
+ };
26
+ type VoiceCallbacks = {
27
+ /** Called when speech recognition starts */
28
+ onStart?: () => void;
29
+ /** Called when speech recognition ends */
30
+ onEnd?: () => void;
31
+ /** Called when transcription result is received */
32
+ onResult?: (transcript: string) => void;
33
+ /** Called when an error occurs */
34
+ onError?: (error: string) => void;
35
+ /** Called when voice state changes */
36
+ onStateChange?: (state: VoiceState) => void;
37
+ };
38
+ type VoiceResult = {
39
+ /** Current transcribed text */
40
+ transcript: string;
41
+ /** Is final result (not interim) */
42
+ isFinal: boolean;
43
+ /** Confidence score (0-1, web only) */
44
+ confidence?: number;
45
+ };
46
+ type UseVoiceReturn = {
47
+ /** Current voice state */
48
+ voiceState: VoiceState;
49
+ /** Current transcribed text */
50
+ transcribedText: string;
51
+ /** Is speech recognition available on this device/browser */
52
+ isAvailable: boolean;
53
+ /** Is currently recording */
54
+ isRecording: boolean;
55
+ /** Last error message from speech recognition (e.g. "audio not available") – clear when starting again */
56
+ lastError?: string | null;
57
+ /** Start voice recording */
58
+ startRecording: () => Promise<void>;
59
+ /** Stop voice recording */
60
+ stopRecording: () => void;
61
+ /** Request microphone permissions */
62
+ requestPermissions?: () => Promise<VoicePermissions>;
63
+ /** Check microphone permissions */
64
+ getPermissions?: () => Promise<VoicePermissions>;
65
+ /** Clear transcribed text */
66
+ clearTranscript: () => void;
67
+ /** Reset voice state */
68
+ reset: () => void;
69
+ };
70
+
3
71
  type MessageRole = "user" | "assistant" | "system";
4
72
  type StreamProgress = "started" | "processing" | "completed" | "error";
5
73
  type WorkflowStage = "DEV" | "SANDBOX" | "PROD" | "ARCHIVED";
@@ -165,6 +233,38 @@ type UseChatReturn = {
165
233
  };
166
234
  declare function useChat(config: ChatConfig, callbacks?: ChatCallbacks): UseChatReturn;
167
235
 
236
+ interface SpeechRecognitionEvent extends Event {
237
+ results: SpeechRecognitionResultList;
238
+ resultIndex: number;
239
+ }
240
+ interface SpeechRecognitionErrorEvent extends Event {
241
+ error: string;
242
+ message?: string;
243
+ }
244
+ interface SpeechRecognition extends EventTarget {
245
+ continuous: boolean;
246
+ interimResults: boolean;
247
+ lang: string;
248
+ maxAlternatives: number;
249
+ start(): void;
250
+ stop(): void;
251
+ abort(): void;
252
+ onstart: ((this: SpeechRecognition, ev: Event) => any) | null;
253
+ onend: ((this: SpeechRecognition, ev: Event) => any) | null;
254
+ onresult: ((this: SpeechRecognition, ev: SpeechRecognitionEvent) => any) | null;
255
+ onerror: ((this: SpeechRecognition, ev: SpeechRecognitionErrorEvent) => any) | null;
256
+ }
257
+ interface SpeechRecognitionConstructor {
258
+ new (): SpeechRecognition;
259
+ }
260
+ declare global {
261
+ interface Window {
262
+ SpeechRecognition?: SpeechRecognitionConstructor;
263
+ webkitSpeechRecognition?: SpeechRecognitionConstructor;
264
+ }
265
+ }
266
+ declare function useVoice(config?: VoiceConfig, callbacks?: VoiceCallbacks): UseVoiceReturn;
267
+
168
268
  /**
169
269
  * Cross-platform UUID v4 generator
170
270
  * Works in both browser and React Native environments
@@ -220,4 +320,4 @@ declare function cancelUserAction(config: ChatConfig, userActionId: string): Pro
220
320
  */
221
321
  declare function resendUserAction(config: ChatConfig, userActionId: string): Promise<UserActionResponse>;
222
322
 
223
- export { type APIConfig, type ChatCallbacks, type ChatConfig, type ChunkDisplay, type MessageDisplay, type MessageRole, type SessionParams, type StreamEvent, type StreamOptions, type StreamProgress, type StreamingStep, type UseChatReturn, type UserActionRequest, type UserActionResult, type UserActionState, type WorkflowStage, cancelUserAction, generateId, resendUserAction, streamWorkflowEvents, submitUserAction, useChat };
323
+ export { type APIConfig, type ChatCallbacks, type ChatConfig, type ChunkDisplay, type MessageDisplay, type MessageRole, type SessionParams, type StreamEvent, type StreamOptions, type StreamProgress, type StreamingStep, type UseChatReturn, type UseVoiceReturn, type UserActionRequest, type UserActionResult, type UserActionState, type VoiceCallbacks, type VoiceConfig, type VoicePermissions, type VoiceResult, type VoiceState, type WorkflowStage, cancelUserAction, generateId, resendUserAction, streamWorkflowEvents, submitUserAction, useChat, useVoice };
package/dist/index.d.ts CHANGED
@@ -1,5 +1,73 @@
1
1
  import React from 'react';
2
2
 
3
+ type VoiceState = "idle" | "listening" | "processing" | "error";
4
+ type VoiceConfig = {
5
+ /** Language for speech recognition (default: "en-US") */
6
+ lang?: string;
7
+ /** Enable interim results during transcription (default: true) */
8
+ interimResults?: boolean;
9
+ /** Continuous recognition mode (default: true) */
10
+ continuous?: boolean;
11
+ /** Maximum number of alternative transcriptions (default: 1) */
12
+ maxAlternatives?: number;
13
+ /** Require on-device recognition (mobile only, default: false) */
14
+ requiresOnDeviceRecognition?: boolean;
15
+ /** Auto-stop after silence in milliseconds (web only, default: undefined) */
16
+ autoStopAfterSilence?: number;
17
+ };
18
+ type VoicePermissions = {
19
+ /** Permission granted */
20
+ granted: boolean;
21
+ /** Can ask for permission */
22
+ canAskAgain?: boolean;
23
+ /** Permission status */
24
+ status?: "granted" | "denied" | "undetermined";
25
+ };
26
+ type VoiceCallbacks = {
27
+ /** Called when speech recognition starts */
28
+ onStart?: () => void;
29
+ /** Called when speech recognition ends */
30
+ onEnd?: () => void;
31
+ /** Called when transcription result is received */
32
+ onResult?: (transcript: string) => void;
33
+ /** Called when an error occurs */
34
+ onError?: (error: string) => void;
35
+ /** Called when voice state changes */
36
+ onStateChange?: (state: VoiceState) => void;
37
+ };
38
+ type VoiceResult = {
39
+ /** Current transcribed text */
40
+ transcript: string;
41
+ /** Is final result (not interim) */
42
+ isFinal: boolean;
43
+ /** Confidence score (0-1, web only) */
44
+ confidence?: number;
45
+ };
46
+ type UseVoiceReturn = {
47
+ /** Current voice state */
48
+ voiceState: VoiceState;
49
+ /** Current transcribed text */
50
+ transcribedText: string;
51
+ /** Is speech recognition available on this device/browser */
52
+ isAvailable: boolean;
53
+ /** Is currently recording */
54
+ isRecording: boolean;
55
+ /** Last error message from speech recognition (e.g. "audio not available") – clear when starting again */
56
+ lastError?: string | null;
57
+ /** Start voice recording */
58
+ startRecording: () => Promise<void>;
59
+ /** Stop voice recording */
60
+ stopRecording: () => void;
61
+ /** Request microphone permissions */
62
+ requestPermissions?: () => Promise<VoicePermissions>;
63
+ /** Check microphone permissions */
64
+ getPermissions?: () => Promise<VoicePermissions>;
65
+ /** Clear transcribed text */
66
+ clearTranscript: () => void;
67
+ /** Reset voice state */
68
+ reset: () => void;
69
+ };
70
+
3
71
  type MessageRole = "user" | "assistant" | "system";
4
72
  type StreamProgress = "started" | "processing" | "completed" | "error";
5
73
  type WorkflowStage = "DEV" | "SANDBOX" | "PROD" | "ARCHIVED";
@@ -165,6 +233,38 @@ type UseChatReturn = {
165
233
  };
166
234
  declare function useChat(config: ChatConfig, callbacks?: ChatCallbacks): UseChatReturn;
167
235
 
236
+ interface SpeechRecognitionEvent extends Event {
237
+ results: SpeechRecognitionResultList;
238
+ resultIndex: number;
239
+ }
240
+ interface SpeechRecognitionErrorEvent extends Event {
241
+ error: string;
242
+ message?: string;
243
+ }
244
+ interface SpeechRecognition extends EventTarget {
245
+ continuous: boolean;
246
+ interimResults: boolean;
247
+ lang: string;
248
+ maxAlternatives: number;
249
+ start(): void;
250
+ stop(): void;
251
+ abort(): void;
252
+ onstart: ((this: SpeechRecognition, ev: Event) => any) | null;
253
+ onend: ((this: SpeechRecognition, ev: Event) => any) | null;
254
+ onresult: ((this: SpeechRecognition, ev: SpeechRecognitionEvent) => any) | null;
255
+ onerror: ((this: SpeechRecognition, ev: SpeechRecognitionErrorEvent) => any) | null;
256
+ }
257
+ interface SpeechRecognitionConstructor {
258
+ new (): SpeechRecognition;
259
+ }
260
+ declare global {
261
+ interface Window {
262
+ SpeechRecognition?: SpeechRecognitionConstructor;
263
+ webkitSpeechRecognition?: SpeechRecognitionConstructor;
264
+ }
265
+ }
266
+ declare function useVoice(config?: VoiceConfig, callbacks?: VoiceCallbacks): UseVoiceReturn;
267
+
168
268
  /**
169
269
  * Cross-platform UUID v4 generator
170
270
  * Works in both browser and React Native environments
@@ -220,4 +320,4 @@ declare function cancelUserAction(config: ChatConfig, userActionId: string): Pro
220
320
  */
221
321
  declare function resendUserAction(config: ChatConfig, userActionId: string): Promise<UserActionResponse>;
222
322
 
223
- export { type APIConfig, type ChatCallbacks, type ChatConfig, type ChunkDisplay, type MessageDisplay, type MessageRole, type SessionParams, type StreamEvent, type StreamOptions, type StreamProgress, type StreamingStep, type UseChatReturn, type UserActionRequest, type UserActionResult, type UserActionState, type WorkflowStage, cancelUserAction, generateId, resendUserAction, streamWorkflowEvents, submitUserAction, useChat };
323
+ export { type APIConfig, type ChatCallbacks, type ChatConfig, type ChunkDisplay, type MessageDisplay, type MessageRole, type SessionParams, type StreamEvent, type StreamOptions, type StreamProgress, type StreamingStep, type UseChatReturn, type UseVoiceReturn, type UserActionRequest, type UserActionResult, type UserActionState, type VoiceCallbacks, type VoiceConfig, type VoicePermissions, type VoiceResult, type VoiceState, type WorkflowStage, cancelUserAction, generateId, resendUserAction, streamWorkflowEvents, submitUserAction, useChat, useVoice };
package/dist/index.js CHANGED
@@ -873,6 +873,221 @@ function useChat(config, callbacks = {}) {
873
873
  resendOtp
874
874
  };
875
875
  }
876
+ function getSpeechRecognition() {
877
+ if (typeof window === "undefined") return null;
878
+ return window.SpeechRecognition || window.webkitSpeechRecognition || null;
879
+ }
880
+ function useVoice(config = {}, callbacks = {}) {
881
+ const [voiceState, setVoiceState] = react.useState("idle");
882
+ const [transcribedText, setTranscribedText] = react.useState("");
883
+ const [isAvailable, setIsAvailable] = react.useState(false);
884
+ const [isRecording, setIsRecording] = react.useState(false);
885
+ const recognitionRef = react.useRef(null);
886
+ const autoStopTimerRef = react.useRef(null);
887
+ const {
888
+ lang = "en-US",
889
+ interimResults = true,
890
+ continuous = true,
891
+ maxAlternatives = 1,
892
+ autoStopAfterSilence
893
+ } = config;
894
+ const { onStart, onEnd, onResult, onError, onStateChange } = callbacks;
895
+ react.useEffect(() => {
896
+ const SpeechRecognitionAPI = getSpeechRecognition();
897
+ setIsAvailable(SpeechRecognitionAPI !== null);
898
+ }, []);
899
+ react.useEffect(() => {
900
+ onStateChange?.(voiceState);
901
+ }, [voiceState, onStateChange]);
902
+ const requestPermissions = react.useCallback(async () => {
903
+ try {
904
+ const result = await navigator.mediaDevices.getUserMedia({
905
+ audio: true
906
+ });
907
+ result.getTracks().forEach((track) => track.stop());
908
+ return {
909
+ granted: true,
910
+ status: "granted"
911
+ };
912
+ } catch (error) {
913
+ return {
914
+ granted: false,
915
+ status: "denied"
916
+ };
917
+ }
918
+ }, []);
919
+ const getPermissions = react.useCallback(async () => {
920
+ if (typeof navigator === "undefined" || !navigator.permissions) {
921
+ return {
922
+ granted: false,
923
+ status: "undetermined"
924
+ };
925
+ }
926
+ try {
927
+ const result = await navigator.permissions.query({
928
+ name: "microphone"
929
+ });
930
+ return {
931
+ granted: result.state === "granted",
932
+ status: result.state === "granted" ? "granted" : result.state === "denied" ? "denied" : "undetermined"
933
+ };
934
+ } catch {
935
+ return {
936
+ granted: false,
937
+ status: "undetermined"
938
+ };
939
+ }
940
+ }, []);
941
+ const clearAutoStopTimer = react.useCallback(() => {
942
+ if (autoStopTimerRef.current) {
943
+ clearTimeout(autoStopTimerRef.current);
944
+ autoStopTimerRef.current = null;
945
+ }
946
+ }, []);
947
+ const resetAutoStopTimer = react.useCallback(() => {
948
+ clearAutoStopTimer();
949
+ if (autoStopAfterSilence && autoStopAfterSilence > 0) {
950
+ autoStopTimerRef.current = setTimeout(() => {
951
+ if (recognitionRef.current && isRecording) {
952
+ recognitionRef.current.stop();
953
+ }
954
+ }, autoStopAfterSilence);
955
+ }
956
+ }, [autoStopAfterSilence, clearAutoStopTimer, isRecording]);
957
+ const stopRecording = react.useCallback(() => {
958
+ if (recognitionRef.current) {
959
+ try {
960
+ recognitionRef.current.stop();
961
+ } catch (error) {
962
+ console.warn("Error stopping speech recognition:", error);
963
+ }
964
+ }
965
+ clearAutoStopTimer();
966
+ setIsRecording(false);
967
+ setVoiceState("idle");
968
+ }, [clearAutoStopTimer]);
969
+ const startRecording = react.useCallback(async () => {
970
+ const SpeechRecognitionAPI = getSpeechRecognition();
971
+ if (!SpeechRecognitionAPI) {
972
+ onError?.("Speech recognition not supported in this browser");
973
+ return;
974
+ }
975
+ try {
976
+ try {
977
+ await navigator.mediaDevices.getUserMedia({ audio: true });
978
+ } catch (permError) {
979
+ onError?.("Microphone access denied. Please allow microphone access in your browser settings.");
980
+ return;
981
+ }
982
+ const recognition = new SpeechRecognitionAPI();
983
+ recognition.continuous = continuous;
984
+ recognition.interimResults = interimResults;
985
+ recognition.lang = lang;
986
+ recognition.maxAlternatives = maxAlternatives;
987
+ recognition.onstart = () => {
988
+ setVoiceState("listening");
989
+ setIsRecording(true);
990
+ onStart?.();
991
+ resetAutoStopTimer();
992
+ };
993
+ recognition.onend = () => {
994
+ setVoiceState("idle");
995
+ setIsRecording(false);
996
+ clearAutoStopTimer();
997
+ onEnd?.();
998
+ };
999
+ recognition.onresult = (event) => {
1000
+ const results = event.results;
1001
+ let transcript = "";
1002
+ for (let i = 0; i < results.length; i++) {
1003
+ const result = results[i];
1004
+ if (result && result[0]) {
1005
+ const text = result[0].transcript;
1006
+ if (transcript && !transcript.endsWith(" ") && !text.startsWith(" ")) {
1007
+ transcript += " " + text;
1008
+ } else {
1009
+ transcript += text;
1010
+ }
1011
+ }
1012
+ }
1013
+ transcript = transcript.trim();
1014
+ if (transcript) {
1015
+ setTranscribedText(transcript);
1016
+ onResult?.(transcript);
1017
+ resetAutoStopTimer();
1018
+ }
1019
+ };
1020
+ recognition.onerror = (event) => {
1021
+ setVoiceState("error");
1022
+ setIsRecording(false);
1023
+ clearAutoStopTimer();
1024
+ let errorMessage = event.error;
1025
+ if (event.error === "not-allowed") {
1026
+ errorMessage = "Microphone access denied. Please allow microphone access in your browser settings.";
1027
+ } else if (event.error === "no-speech") {
1028
+ errorMessage = "No speech detected. Please try again.";
1029
+ } else if (event.error === "audio-capture") {
1030
+ errorMessage = "No microphone found or microphone is in use.";
1031
+ } else if (event.error === "network") {
1032
+ errorMessage = "Network error occurred. Please check your connection.";
1033
+ }
1034
+ onError?.(errorMessage);
1035
+ };
1036
+ recognitionRef.current = recognition;
1037
+ setTranscribedText("");
1038
+ recognition.start();
1039
+ } catch (error) {
1040
+ setVoiceState("error");
1041
+ setIsRecording(false);
1042
+ onError?.(
1043
+ error instanceof Error ? error.message : "Failed to start recording"
1044
+ );
1045
+ }
1046
+ }, [
1047
+ lang,
1048
+ interimResults,
1049
+ continuous,
1050
+ maxAlternatives,
1051
+ onStart,
1052
+ onEnd,
1053
+ onResult,
1054
+ onError,
1055
+ getPermissions,
1056
+ resetAutoStopTimer,
1057
+ clearAutoStopTimer
1058
+ ]);
1059
+ const clearTranscript = react.useCallback(() => {
1060
+ setTranscribedText("");
1061
+ }, []);
1062
+ const reset = react.useCallback(() => {
1063
+ stopRecording();
1064
+ setTranscribedText("");
1065
+ setVoiceState("idle");
1066
+ }, [stopRecording]);
1067
+ react.useEffect(() => {
1068
+ return () => {
1069
+ if (recognitionRef.current) {
1070
+ try {
1071
+ recognitionRef.current.stop();
1072
+ } catch {
1073
+ }
1074
+ }
1075
+ clearAutoStopTimer();
1076
+ };
1077
+ }, [clearAutoStopTimer]);
1078
+ return {
1079
+ voiceState,
1080
+ transcribedText,
1081
+ isAvailable,
1082
+ isRecording,
1083
+ startRecording,
1084
+ stopRecording,
1085
+ requestPermissions,
1086
+ getPermissions,
1087
+ clearTranscript,
1088
+ reset
1089
+ };
1090
+ }
876
1091
 
877
1092
  exports.cancelUserAction = cancelUserAction;
878
1093
  exports.generateId = generateId;
@@ -880,5 +1095,6 @@ exports.resendUserAction = resendUserAction;
880
1095
  exports.streamWorkflowEvents = streamWorkflowEvents;
881
1096
  exports.submitUserAction = submitUserAction;
882
1097
  exports.useChat = useChat;
1098
+ exports.useVoice = useVoice;
883
1099
  //# sourceMappingURL=index.js.map
884
1100
  //# sourceMappingURL=index.js.map