react-native-voice-ts 1.0.1 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +287 -77
- package/dist/NativeVoiceAndroid.d.ts +0 -1
- package/dist/NativeVoiceAndroid.js +0 -1
- package/dist/NativeVoiceIOS.d.ts +0 -1
- package/dist/NativeVoiceIOS.js +0 -1
- package/dist/VoiceModuleTypes.d.ts +0 -1
- package/dist/VoiceModuleTypes.js +0 -1
- package/dist/VoiceUtilTypes.d.ts +0 -1
- package/dist/VoiceUtilTypes.js +0 -1
- package/dist/components/MicIcon.d.ts +24 -5
- package/dist/components/MicIcon.js +71 -13
- package/dist/components/VoiceMicrophone.d.ts +12 -1
- package/dist/components/VoiceMicrophone.js +97 -10
- package/dist/components/index.d.ts +1 -2
- package/dist/components/index.js +1 -2
- package/dist/hooks/index.d.ts +0 -1
- package/dist/hooks/index.js +0 -1
- package/dist/hooks/useVoiceRecognition.d.ts +12 -1
- package/dist/hooks/useVoiceRecognition.js +109 -12
- package/dist/index.d.ts +1 -2
- package/dist/index.js +1 -2
- package/package.json +4 -8
- package/CONTRIBUTING.md +0 -293
- package/dist/NativeVoiceAndroid.d.ts.map +0 -1
- package/dist/NativeVoiceAndroid.js.map +0 -1
- package/dist/NativeVoiceIOS.d.ts.map +0 -1
- package/dist/NativeVoiceIOS.js.map +0 -1
- package/dist/VoiceModuleTypes.d.ts.map +0 -1
- package/dist/VoiceModuleTypes.js.map +0 -1
- package/dist/VoiceUtilTypes.d.ts.map +0 -1
- package/dist/VoiceUtilTypes.js.map +0 -1
- package/dist/components/MicIcon.d.ts.map +0 -1
- package/dist/components/MicIcon.js.map +0 -1
- package/dist/components/VoiceMicrophone.d.ts.map +0 -1
- package/dist/components/VoiceMicrophone.js.map +0 -1
- package/dist/components/index.d.ts.map +0 -1
- package/dist/components/index.js.map +0 -1
- package/dist/hooks/index.d.ts.map +0 -1
- package/dist/hooks/index.js.map +0 -1
- package/dist/hooks/useVoiceRecognition.d.ts.map +0 -1
- package/dist/hooks/useVoiceRecognition.js.map +0 -1
- package/dist/index.d.ts.map +0 -1
- package/dist/index.js.map +0 -1
- package/ios/Voice.xcodeproj/project.xcworkspace/xcuserdata/olumayowadaniel.xcuserdatad/UserInterfaceState.xcuserstate +0 -0
- package/ios/Voice.xcodeproj/project.xcworkspace/xcuserdata/rudie_shahinian.xcuserdatad/UserInterfaceState.xcuserstate +0 -0
- package/plugin/src/withVoice.ts +0 -74
- package/plugin/tsconfig.json +0 -10
- package/plugin/tsconfig.tsbuildinfo +0 -1
- package/src/NativeVoiceAndroid.ts +0 -28
- package/src/NativeVoiceIOS.ts +0 -24
- package/src/VoiceModuleTypes.ts +0 -64
- package/src/VoiceUtilTypes.ts +0 -46
- package/src/components/MicIcon.tsx +0 -72
- package/src/components/VoiceMicrophone.tsx +0 -238
- package/src/components/index.ts +0 -4
- package/src/hooks/index.ts +0 -5
- package/src/hooks/useVoiceRecognition.ts +0 -217
- package/src/images/mic.svg +0 -16
- package/src/index.ts +0 -515
package/src/VoiceUtilTypes.ts
DELETED
|
@@ -1,46 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* React Native Voice - Speech Recognition Utility Types
|
|
3
|
-
*
|
|
4
|
-
* This file contains helper types and interfaces for working with
|
|
5
|
-
* speech recognition functionality.
|
|
6
|
-
*/
|
|
7
|
-
|
|
8
|
-
export interface VoiceOptions {
|
|
9
|
-
/** Android: Language model type (LANGUAGE_MODEL_FREE_FORM or LANGUAGE_MODEL_WEB_SEARCH) */
|
|
10
|
-
EXTRA_LANGUAGE_MODEL?: string;
|
|
11
|
-
/** Android: Maximum number of results */
|
|
12
|
-
EXTRA_MAX_RESULTS?: number;
|
|
13
|
-
/** Android: Enable partial results */
|
|
14
|
-
EXTRA_PARTIAL_RESULTS?: boolean;
|
|
15
|
-
/** Android: Auto request permissions */
|
|
16
|
-
REQUEST_PERMISSIONS_AUTO?: boolean;
|
|
17
|
-
/** iOS: Detection mode (automatic, manual) */
|
|
18
|
-
iosCategory?: string;
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
export interface RecognitionStats {
|
|
22
|
-
/** Duration of recognition in milliseconds */
|
|
23
|
-
duration: number;
|
|
24
|
-
/** Whether recognition is currently active */
|
|
25
|
-
isActive: boolean;
|
|
26
|
-
/** Last recognized results */
|
|
27
|
-
lastResults: string[];
|
|
28
|
-
/** Start timestamp */
|
|
29
|
-
startTime: number;
|
|
30
|
-
}
|
|
31
|
-
|
|
32
|
-
export interface PermissionResult {
|
|
33
|
-
/** Whether permission was granted */
|
|
34
|
-
granted: boolean;
|
|
35
|
-
/** Error message if permission check failed */
|
|
36
|
-
error?: string;
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
export interface Language {
|
|
40
|
-
/** Language code (e.g., 'en-US') */
|
|
41
|
-
code: string;
|
|
42
|
-
/** Display name */
|
|
43
|
-
name: string;
|
|
44
|
-
/** Whether language is available */
|
|
45
|
-
available: boolean;
|
|
46
|
-
}
|
|
@@ -1,72 +0,0 @@
|
|
|
1
|
-
import React from 'react';
|
|
2
|
-
import Svg, { Path, Rect } from 'react-native-svg';
|
|
3
|
-
|
|
4
|
-
export interface MicIconProps {
|
|
5
|
-
size?: number;
|
|
6
|
-
color?: string;
|
|
7
|
-
strokeWidth?: number;
|
|
8
|
-
}
|
|
9
|
-
|
|
10
|
-
/**
|
|
11
|
-
* Microphone Icon Component
|
|
12
|
-
* Based on Lucide mic icon
|
|
13
|
-
*/
|
|
14
|
-
export const MicIcon: React.FC<MicIconProps> = ({
|
|
15
|
-
size = 24,
|
|
16
|
-
color = 'currentColor',
|
|
17
|
-
strokeWidth = 2,
|
|
18
|
-
}) => {
|
|
19
|
-
return (
|
|
20
|
-
<Svg
|
|
21
|
-
width={size}
|
|
22
|
-
height={size}
|
|
23
|
-
viewBox="0 0 24 24"
|
|
24
|
-
fill="none"
|
|
25
|
-
stroke={color}
|
|
26
|
-
strokeWidth={strokeWidth}
|
|
27
|
-
strokeLinecap="round"
|
|
28
|
-
strokeLinejoin="round"
|
|
29
|
-
>
|
|
30
|
-
<Path d="M12 19v3" />
|
|
31
|
-
<Path d="M19 10v2a7 7 0 0 1-14 0v-2" />
|
|
32
|
-
<Rect x="9" y="2" width="6" height="13" rx="3" />
|
|
33
|
-
</Svg>
|
|
34
|
-
);
|
|
35
|
-
};
|
|
36
|
-
|
|
37
|
-
export interface MicOffIconProps {
|
|
38
|
-
size?: number;
|
|
39
|
-
color?: string;
|
|
40
|
-
strokeWidth?: number;
|
|
41
|
-
}
|
|
42
|
-
|
|
43
|
-
/**
|
|
44
|
-
* Microphone Off Icon Component
|
|
45
|
-
* For recording/stop state
|
|
46
|
-
*/
|
|
47
|
-
export const MicOffIcon: React.FC<MicOffIconProps> = ({
|
|
48
|
-
size = 24,
|
|
49
|
-
color = 'currentColor',
|
|
50
|
-
strokeWidth = 2,
|
|
51
|
-
}) => {
|
|
52
|
-
return (
|
|
53
|
-
<Svg
|
|
54
|
-
width={size}
|
|
55
|
-
height={size}
|
|
56
|
-
viewBox="0 0 24 24"
|
|
57
|
-
fill="none"
|
|
58
|
-
stroke={color}
|
|
59
|
-
strokeWidth={strokeWidth}
|
|
60
|
-
strokeLinecap="round"
|
|
61
|
-
strokeLinejoin="round"
|
|
62
|
-
>
|
|
63
|
-
<Path d="M2 2l20 20" />
|
|
64
|
-
<Path d="M12 12a3 3 0 0 0 3-3V5a3 3 0 1 0-6 0v1" />
|
|
65
|
-
<Path d="M19 10v2a7 7 0 0 1-11.18 5.66" />
|
|
66
|
-
<Path d="M4.27 16.73A7 7 0 0 1 5 12v-2" />
|
|
67
|
-
<Path d="M12 17v5" />
|
|
68
|
-
</Svg>
|
|
69
|
-
);
|
|
70
|
-
};
|
|
71
|
-
|
|
72
|
-
export default MicIcon;
|
|
@@ -1,238 +0,0 @@
|
|
|
1
|
-
import React, { useEffect, useState, useCallback } from 'react';
|
|
2
|
-
import Voice from '../index';
|
|
3
|
-
import type { SpeechErrorEvent, SpeechResultsEvent } from '../VoiceModuleTypes';
|
|
4
|
-
|
|
5
|
-
export interface VoiceMicrophoneProps {
|
|
6
|
-
/**
|
|
7
|
-
* Callback fired when speech is recognized and converted to text
|
|
8
|
-
*/
|
|
9
|
-
onSpeechResult?: (text: string) => void;
|
|
10
|
-
|
|
11
|
-
/**
|
|
12
|
-
* Callback fired when partial results are available (real-time)
|
|
13
|
-
*/
|
|
14
|
-
onPartialResult?: (text: string) => void;
|
|
15
|
-
|
|
16
|
-
/**
|
|
17
|
-
* Callback fired when recording starts
|
|
18
|
-
*/
|
|
19
|
-
onStart?: () => void;
|
|
20
|
-
|
|
21
|
-
/**
|
|
22
|
-
* Callback fired when recording stops
|
|
23
|
-
*/
|
|
24
|
-
onStop?: () => void;
|
|
25
|
-
|
|
26
|
-
/**
|
|
27
|
-
* Callback fired when an error occurs
|
|
28
|
-
*/
|
|
29
|
-
onError?: (error: string) => void;
|
|
30
|
-
|
|
31
|
-
/**
|
|
32
|
-
* Language locale for speech recognition
|
|
33
|
-
* @default 'en-US'
|
|
34
|
-
*/
|
|
35
|
-
locale?: string;
|
|
36
|
-
|
|
37
|
-
/**
|
|
38
|
-
* Whether to automatically start recording on mount
|
|
39
|
-
* @default false
|
|
40
|
-
*/
|
|
41
|
-
autoStart?: boolean;
|
|
42
|
-
|
|
43
|
-
/**
|
|
44
|
-
* Whether to enable partial results (real-time transcription)
|
|
45
|
-
* @default true
|
|
46
|
-
*/
|
|
47
|
-
enablePartialResults?: boolean;
|
|
48
|
-
|
|
49
|
-
/**
|
|
50
|
-
* Custom render function for the component
|
|
51
|
-
* Receives isRecording state and control functions
|
|
52
|
-
*/
|
|
53
|
-
children?: (props: {
|
|
54
|
-
isRecording: boolean;
|
|
55
|
-
recognizedText: string;
|
|
56
|
-
partialText: string;
|
|
57
|
-
start: () => Promise<void>;
|
|
58
|
-
stop: () => Promise<void>;
|
|
59
|
-
cancel: () => Promise<void>;
|
|
60
|
-
error: string | null;
|
|
61
|
-
}) => React.ReactNode;
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
/**
|
|
65
|
-
* VoiceMicrophone Component
|
|
66
|
-
*
|
|
67
|
-
* A ready-to-use voice recognition component that handles microphone access,
|
|
68
|
-
* speech recognition, and provides real-time text results.
|
|
69
|
-
*
|
|
70
|
-
* @example
|
|
71
|
-
* ```tsx
|
|
72
|
-
* // Simple usage with callback
|
|
73
|
-
* <VoiceMicrophone
|
|
74
|
-
* onSpeechResult={(text) => setSearchQuery(text)}
|
|
75
|
-
* />
|
|
76
|
-
*
|
|
77
|
-
* // Custom render with full control
|
|
78
|
-
* <VoiceMicrophone locale="en-US">
|
|
79
|
-
* {({ isRecording, recognizedText, start, stop }) => (
|
|
80
|
-
* <View>
|
|
81
|
-
* <Text>{recognizedText}</Text>
|
|
82
|
-
* <Button
|
|
83
|
-
* onPress={isRecording ? stop : start}
|
|
84
|
-
* title={isRecording ? 'Stop' : 'Start'}
|
|
85
|
-
* />
|
|
86
|
-
* </View>
|
|
87
|
-
* )}
|
|
88
|
-
* </VoiceMicrophone>
|
|
89
|
-
* ```
|
|
90
|
-
*/
|
|
91
|
-
const VoiceMicrophone: React.FC<VoiceMicrophoneProps> = ({
|
|
92
|
-
onSpeechResult,
|
|
93
|
-
onPartialResult,
|
|
94
|
-
onStart,
|
|
95
|
-
onStop,
|
|
96
|
-
onError,
|
|
97
|
-
locale = 'en-US',
|
|
98
|
-
autoStart = false,
|
|
99
|
-
enablePartialResults = true,
|
|
100
|
-
children,
|
|
101
|
-
}) => {
|
|
102
|
-
const [isRecording, setIsRecording] = useState(false);
|
|
103
|
-
const [recognizedText, setRecognizedText] = useState('');
|
|
104
|
-
const [partialText, setPartialText] = useState('');
|
|
105
|
-
const [error, setError] = useState<string | null>(null);
|
|
106
|
-
|
|
107
|
-
useEffect(() => {
|
|
108
|
-
// Set up event listeners
|
|
109
|
-
Voice.onSpeechStart = () => {
|
|
110
|
-
setIsRecording(true);
|
|
111
|
-
setError(null);
|
|
112
|
-
onStart?.();
|
|
113
|
-
};
|
|
114
|
-
|
|
115
|
-
Voice.onSpeechEnd = () => {
|
|
116
|
-
setIsRecording(false);
|
|
117
|
-
onStop?.();
|
|
118
|
-
};
|
|
119
|
-
|
|
120
|
-
Voice.onSpeechError = (e: SpeechErrorEvent) => {
|
|
121
|
-
const errorMessage = e.error?.message || 'Unknown error';
|
|
122
|
-
setError(errorMessage);
|
|
123
|
-
setIsRecording(false);
|
|
124
|
-
onError?.(errorMessage);
|
|
125
|
-
};
|
|
126
|
-
|
|
127
|
-
Voice.onSpeechResults = (e: SpeechResultsEvent) => {
|
|
128
|
-
if (e.value && e.value.length > 0) {
|
|
129
|
-
const text = e.value[0];
|
|
130
|
-
setRecognizedText(text);
|
|
131
|
-
onSpeechResult?.(text);
|
|
132
|
-
}
|
|
133
|
-
};
|
|
134
|
-
|
|
135
|
-
if (enablePartialResults) {
|
|
136
|
-
Voice.onSpeechPartialResults = (e: SpeechResultsEvent) => {
|
|
137
|
-
if (e.value && e.value.length > 0) {
|
|
138
|
-
const text = e.value[0];
|
|
139
|
-
setPartialText(text);
|
|
140
|
-
onPartialResult?.(text);
|
|
141
|
-
}
|
|
142
|
-
};
|
|
143
|
-
}
|
|
144
|
-
|
|
145
|
-
// Cleanup
|
|
146
|
-
return () => {
|
|
147
|
-
Voice.destroy().then(Voice.removeAllListeners);
|
|
148
|
-
};
|
|
149
|
-
}, [
|
|
150
|
-
onSpeechResult,
|
|
151
|
-
onPartialResult,
|
|
152
|
-
onStart,
|
|
153
|
-
onStop,
|
|
154
|
-
onError,
|
|
155
|
-
enablePartialResults,
|
|
156
|
-
]);
|
|
157
|
-
|
|
158
|
-
// Auto-start if enabled
|
|
159
|
-
useEffect(() => {
|
|
160
|
-
if (autoStart) {
|
|
161
|
-
start();
|
|
162
|
-
}
|
|
163
|
-
// eslint-disable-next-line react-hooks/exhaustive-deps
|
|
164
|
-
}, [autoStart]);
|
|
165
|
-
|
|
166
|
-
const start = useCallback(async () => {
|
|
167
|
-
try {
|
|
168
|
-
setError(null);
|
|
169
|
-
setRecognizedText('');
|
|
170
|
-
setPartialText('');
|
|
171
|
-
|
|
172
|
-
// Check permission (Android only)
|
|
173
|
-
const hasPermission = await Voice.checkMicrophonePermission();
|
|
174
|
-
if (!hasPermission) {
|
|
175
|
-
const granted = await Voice.requestMicrophonePermission();
|
|
176
|
-
if (!granted) {
|
|
177
|
-
setError('Microphone permission denied');
|
|
178
|
-
return;
|
|
179
|
-
}
|
|
180
|
-
}
|
|
181
|
-
|
|
182
|
-
await Voice.start(locale, {
|
|
183
|
-
EXTRA_PARTIAL_RESULTS: enablePartialResults,
|
|
184
|
-
});
|
|
185
|
-
} catch (e) {
|
|
186
|
-
const errorMessage =
|
|
187
|
-
e instanceof Error ? e.message : 'Failed to start recording';
|
|
188
|
-
setError(errorMessage);
|
|
189
|
-
onError?.(errorMessage);
|
|
190
|
-
}
|
|
191
|
-
}, [locale, enablePartialResults, onError]);
|
|
192
|
-
|
|
193
|
-
const stop = useCallback(async () => {
|
|
194
|
-
try {
|
|
195
|
-
await Voice.stop();
|
|
196
|
-
} catch (e) {
|
|
197
|
-
const errorMessage =
|
|
198
|
-
e instanceof Error ? e.message : 'Failed to stop recording';
|
|
199
|
-
setError(errorMessage);
|
|
200
|
-
onError?.(errorMessage);
|
|
201
|
-
}
|
|
202
|
-
}, [onError]);
|
|
203
|
-
|
|
204
|
-
const cancel = useCallback(async () => {
|
|
205
|
-
try {
|
|
206
|
-
await Voice.cancel();
|
|
207
|
-
setRecognizedText('');
|
|
208
|
-
setPartialText('');
|
|
209
|
-
} catch (e) {
|
|
210
|
-
const errorMessage =
|
|
211
|
-
e instanceof Error ? e.message : 'Failed to cancel recording';
|
|
212
|
-
setError(errorMessage);
|
|
213
|
-
onError?.(errorMessage);
|
|
214
|
-
}
|
|
215
|
-
}, [onError]);
|
|
216
|
-
|
|
217
|
-
// If children render prop is provided, use it
|
|
218
|
-
if (children) {
|
|
219
|
-
return (
|
|
220
|
-
<>
|
|
221
|
-
{children({
|
|
222
|
-
isRecording,
|
|
223
|
-
recognizedText,
|
|
224
|
-
partialText,
|
|
225
|
-
start,
|
|
226
|
-
stop,
|
|
227
|
-
cancel,
|
|
228
|
-
error,
|
|
229
|
-
})}
|
|
230
|
-
</>
|
|
231
|
-
);
|
|
232
|
-
}
|
|
233
|
-
|
|
234
|
-
// Default: render nothing (headless component)
|
|
235
|
-
return null;
|
|
236
|
-
};
|
|
237
|
-
|
|
238
|
-
export default VoiceMicrophone;
|
package/src/components/index.ts
DELETED
package/src/hooks/index.ts
DELETED
|
@@ -1,217 +0,0 @@
|
|
|
1
|
-
import { useEffect, useState, useCallback } from 'react';
|
|
2
|
-
import Voice from '../index';
|
|
3
|
-
import type { SpeechErrorEvent, SpeechResultsEvent } from '../VoiceModuleTypes';
|
|
4
|
-
|
|
5
|
-
export interface UseVoiceRecognitionOptions {
|
|
6
|
-
/**
|
|
7
|
-
* Language locale for speech recognition
|
|
8
|
-
* @default 'en-US'
|
|
9
|
-
*/
|
|
10
|
-
locale?: string;
|
|
11
|
-
|
|
12
|
-
/**
|
|
13
|
-
* Whether to enable partial results (real-time transcription)
|
|
14
|
-
* @default true
|
|
15
|
-
*/
|
|
16
|
-
enablePartialResults?: boolean;
|
|
17
|
-
|
|
18
|
-
/**
|
|
19
|
-
* Callback fired when speech is recognized
|
|
20
|
-
*/
|
|
21
|
-
onResult?: (text: string) => void;
|
|
22
|
-
|
|
23
|
-
/**
|
|
24
|
-
* Callback fired when an error occurs
|
|
25
|
-
*/
|
|
26
|
-
onError?: (error: string) => void;
|
|
27
|
-
}
|
|
28
|
-
|
|
29
|
-
export interface UseVoiceRecognitionReturn {
|
|
30
|
-
/**
|
|
31
|
-
* Whether voice recognition is currently active
|
|
32
|
-
*/
|
|
33
|
-
isRecording: boolean;
|
|
34
|
-
|
|
35
|
-
/**
|
|
36
|
-
* Final recognized text results
|
|
37
|
-
*/
|
|
38
|
-
results: string[];
|
|
39
|
-
|
|
40
|
-
/**
|
|
41
|
-
* Partial results (real-time transcription)
|
|
42
|
-
*/
|
|
43
|
-
partialResults: string[];
|
|
44
|
-
|
|
45
|
-
/**
|
|
46
|
-
* Error message if an error occurred
|
|
47
|
-
*/
|
|
48
|
-
error: string | null;
|
|
49
|
-
|
|
50
|
-
/**
|
|
51
|
-
* Start voice recognition
|
|
52
|
-
*/
|
|
53
|
-
start: () => Promise<void>;
|
|
54
|
-
|
|
55
|
-
/**
|
|
56
|
-
* Stop voice recognition and get final results
|
|
57
|
-
*/
|
|
58
|
-
stop: () => Promise<void>;
|
|
59
|
-
|
|
60
|
-
/**
|
|
61
|
-
* Cancel voice recognition without getting results
|
|
62
|
-
*/
|
|
63
|
-
cancel: () => Promise<void>;
|
|
64
|
-
|
|
65
|
-
/**
|
|
66
|
-
* Reset all state
|
|
67
|
-
*/
|
|
68
|
-
reset: () => void;
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
/**
|
|
72
|
-
* Custom hook for voice recognition
|
|
73
|
-
*
|
|
74
|
-
* Provides a simple interface for speech-to-text functionality with automatic
|
|
75
|
-
* event listener setup and cleanup.
|
|
76
|
-
*
|
|
77
|
-
* @example
|
|
78
|
-
* ```tsx
|
|
79
|
-
* const { isRecording, results, start, stop } = useVoiceRecognition({
|
|
80
|
-
* locale: 'en-US',
|
|
81
|
-
* onResult: (text) => setSearchQuery(text),
|
|
82
|
-
* });
|
|
83
|
-
*
|
|
84
|
-
* // In your component
|
|
85
|
-
* <Button
|
|
86
|
-
* onPress={isRecording ? stop : start}
|
|
87
|
-
* title={isRecording ? 'Stop' : 'Start Recording'}
|
|
88
|
-
* />
|
|
89
|
-
* <Text>{results[0]}</Text>
|
|
90
|
-
* ```
|
|
91
|
-
*/
|
|
92
|
-
export const useVoiceRecognition = (
|
|
93
|
-
options: UseVoiceRecognitionOptions = {},
|
|
94
|
-
): UseVoiceRecognitionReturn => {
|
|
95
|
-
const {
|
|
96
|
-
locale = 'en-US',
|
|
97
|
-
enablePartialResults = true,
|
|
98
|
-
onResult,
|
|
99
|
-
onError,
|
|
100
|
-
} = options;
|
|
101
|
-
|
|
102
|
-
const [isRecording, setIsRecording] = useState(false);
|
|
103
|
-
const [results, setResults] = useState<string[]>([]);
|
|
104
|
-
const [partialResults, setPartialResults] = useState<string[]>([]);
|
|
105
|
-
const [error, setError] = useState<string | null>(null);
|
|
106
|
-
|
|
107
|
-
useEffect(() => {
|
|
108
|
-
// Set up event listeners
|
|
109
|
-
Voice.onSpeechStart = () => {
|
|
110
|
-
setIsRecording(true);
|
|
111
|
-
setError(null);
|
|
112
|
-
};
|
|
113
|
-
|
|
114
|
-
Voice.onSpeechEnd = () => {
|
|
115
|
-
setIsRecording(false);
|
|
116
|
-
};
|
|
117
|
-
|
|
118
|
-
Voice.onSpeechError = (e: SpeechErrorEvent) => {
|
|
119
|
-
const errorMessage = e.error?.message || 'Unknown error';
|
|
120
|
-
setError(errorMessage);
|
|
121
|
-
setIsRecording(false);
|
|
122
|
-
onError?.(errorMessage);
|
|
123
|
-
};
|
|
124
|
-
|
|
125
|
-
Voice.onSpeechResults = (e: SpeechResultsEvent) => {
|
|
126
|
-
if (e.value && e.value.length > 0) {
|
|
127
|
-
setResults(e.value);
|
|
128
|
-
const firstResult = e.value[0];
|
|
129
|
-
if (firstResult) {
|
|
130
|
-
onResult?.(firstResult);
|
|
131
|
-
}
|
|
132
|
-
}
|
|
133
|
-
};
|
|
134
|
-
|
|
135
|
-
if (enablePartialResults) {
|
|
136
|
-
Voice.onSpeechPartialResults = (e: SpeechResultsEvent) => {
|
|
137
|
-
if (e.value && e.value.length > 0) {
|
|
138
|
-
setPartialResults(e.value);
|
|
139
|
-
}
|
|
140
|
-
};
|
|
141
|
-
}
|
|
142
|
-
|
|
143
|
-
// Cleanup
|
|
144
|
-
return () => {
|
|
145
|
-
Voice.destroy().then(Voice.removeAllListeners);
|
|
146
|
-
};
|
|
147
|
-
}, [enablePartialResults, onResult, onError]);
|
|
148
|
-
|
|
149
|
-
const start = useCallback(async () => {
|
|
150
|
-
try {
|
|
151
|
-
setError(null);
|
|
152
|
-
setResults([]);
|
|
153
|
-
setPartialResults([]);
|
|
154
|
-
|
|
155
|
-
// Check permission (Android only)
|
|
156
|
-
const hasPermission = await Voice.checkMicrophonePermission();
|
|
157
|
-
if (!hasPermission) {
|
|
158
|
-
const granted = await Voice.requestMicrophonePermission();
|
|
159
|
-
if (!granted) {
|
|
160
|
-
setError('Microphone permission denied');
|
|
161
|
-
return;
|
|
162
|
-
}
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
await Voice.start(locale, {
|
|
166
|
-
EXTRA_PARTIAL_RESULTS: enablePartialResults,
|
|
167
|
-
});
|
|
168
|
-
} catch (e) {
|
|
169
|
-
const errorMessage =
|
|
170
|
-
e instanceof Error ? e.message : 'Failed to start recording';
|
|
171
|
-
setError(errorMessage);
|
|
172
|
-
onError?.(errorMessage);
|
|
173
|
-
}
|
|
174
|
-
}, [locale, enablePartialResults, onError]);
|
|
175
|
-
|
|
176
|
-
const stop = useCallback(async () => {
|
|
177
|
-
try {
|
|
178
|
-
await Voice.stop();
|
|
179
|
-
} catch (e) {
|
|
180
|
-
const errorMessage =
|
|
181
|
-
e instanceof Error ? e.message : 'Failed to stop recording';
|
|
182
|
-
setError(errorMessage);
|
|
183
|
-
onError?.(errorMessage);
|
|
184
|
-
}
|
|
185
|
-
}, [onError]);
|
|
186
|
-
|
|
187
|
-
const cancel = useCallback(async () => {
|
|
188
|
-
try {
|
|
189
|
-
await Voice.cancel();
|
|
190
|
-
setResults([]);
|
|
191
|
-
setPartialResults([]);
|
|
192
|
-
} catch (e) {
|
|
193
|
-
const errorMessage =
|
|
194
|
-
e instanceof Error ? e.message : 'Failed to cancel recording';
|
|
195
|
-
setError(errorMessage);
|
|
196
|
-
onError?.(errorMessage);
|
|
197
|
-
}
|
|
198
|
-
}, [onError]);
|
|
199
|
-
|
|
200
|
-
const reset = useCallback(() => {
|
|
201
|
-
setResults([]);
|
|
202
|
-
setPartialResults([]);
|
|
203
|
-
setError(null);
|
|
204
|
-
setIsRecording(false);
|
|
205
|
-
}, []);
|
|
206
|
-
|
|
207
|
-
return {
|
|
208
|
-
isRecording,
|
|
209
|
-
results,
|
|
210
|
-
partialResults,
|
|
211
|
-
error,
|
|
212
|
-
start,
|
|
213
|
-
stop,
|
|
214
|
-
cancel,
|
|
215
|
-
reset,
|
|
216
|
-
};
|
|
217
|
-
};
|
package/src/images/mic.svg
DELETED
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
<svg
|
|
2
|
-
xmlns="http://www.w3.org/2000/svg"
|
|
3
|
-
width="24"
|
|
4
|
-
height="24"
|
|
5
|
-
viewBox="0 0 24 24"
|
|
6
|
-
fill="none"
|
|
7
|
-
stroke="currentColor"
|
|
8
|
-
stroke-width="2"
|
|
9
|
-
stroke-linecap="round"
|
|
10
|
-
stroke-linejoin="round"
|
|
11
|
-
class="lucide lucide-mic-icon lucide-mic"
|
|
12
|
-
>
|
|
13
|
-
<path d="M12 19v3" />
|
|
14
|
-
<path d="M19 10v2a7 7 0 0 1-14 0v-2" />
|
|
15
|
-
<rect x="9" y="2" width="6" height="13" rx="3" />
|
|
16
|
-
</svg>;
|