react-native-srschat 0.1.62 → 0.1.63
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/commonjs/components/input.js +3 -2
- package/lib/commonjs/components/input.js.map +1 -1
- package/lib/commonjs/components/voice.js +3 -2
- package/lib/commonjs/components/voice.js.map +1 -1
- package/lib/commonjs/components/welcomeInput.js +3 -2
- package/lib/commonjs/components/welcomeInput.js.map +1 -1
- package/lib/commonjs/contexts/AppContext.js +4 -1
- package/lib/commonjs/contexts/AppContext.js.map +1 -1
- package/lib/commonjs/utils/audioRecorder.js +168 -164
- package/lib/commonjs/utils/audioRecorder.js.map +1 -1
- package/lib/module/components/input.js +3 -2
- package/lib/module/components/input.js.map +1 -1
- package/lib/module/components/voice.js +3 -2
- package/lib/module/components/voice.js.map +1 -1
- package/lib/module/components/welcomeInput.js +3 -2
- package/lib/module/components/welcomeInput.js.map +1 -1
- package/lib/module/contexts/AppContext.js +4 -1
- package/lib/module/contexts/AppContext.js.map +1 -1
- package/lib/module/utils/audioRecorder.js +168 -164
- package/lib/module/utils/audioRecorder.js.map +1 -1
- package/lib/typescript/components/voice.d.ts.map +1 -1
- package/lib/typescript/contexts/AppContext.d.ts.map +1 -1
- package/lib/typescript/utils/audioRecorder.d.ts.map +1 -1
- package/package.json +1 -1
- package/src/components/input.js +2 -2
- package/src/components/voice.js +1 -2
- package/src/components/welcomeInput.js +2 -2
- package/src/contexts/AppContext.js +2 -1
- package/src/utils/audioRecorder.js +184 -167
|
@@ -16,7 +16,7 @@ import { VoiceButton } from './voice';
|
|
|
16
16
|
|
|
17
17
|
export const WelcomeInput = ({ onProductCardClick, onAddToCartClick }) => {
|
|
18
18
|
|
|
19
|
-
const { data, handleSend, input, setInput, showModal, theme } = useContext(AppContext);
|
|
19
|
+
const { data, handleSend, input, setInput, showModal, theme, isListening } = useContext(AppContext);
|
|
20
20
|
const inputRef = useRef(null);
|
|
21
21
|
|
|
22
22
|
const handleKeyPress = ({ nativeEvent }) => {
|
|
@@ -40,7 +40,7 @@ export const WelcomeInput = ({ onProductCardClick, onAddToCartClick }) => {
|
|
|
40
40
|
style={styles.input}
|
|
41
41
|
value={input}
|
|
42
42
|
onChangeText={setInput}
|
|
43
|
-
placeholder="Ask a question..."
|
|
43
|
+
placeholder={isListening && !input ? "Listening..." : "Ask a question..."}
|
|
44
44
|
placeholderTextColor="#999"
|
|
45
45
|
multiline={false}
|
|
46
46
|
returnKeyType="send"
|
|
@@ -48,6 +48,7 @@ export const AppProvider = ({ data, onProductCardClick, onAddToCartClick, uiConf
|
|
|
48
48
|
const [lastUserMessage, setLastUserMessage] = useState("");
|
|
49
49
|
const [lastMessageId, setLastMessageId] = useState("");
|
|
50
50
|
const [sessionId, setSessionId] = useState(null);
|
|
51
|
+
const [isListening, setIsListening] = useState(false);
|
|
51
52
|
|
|
52
53
|
// Message UI
|
|
53
54
|
const [typingIndicator, setTypingIndicator] = useState(false);
|
|
@@ -374,7 +375,7 @@ export const AppProvider = ({ data, onProductCardClick, onAddToCartClick, uiConf
|
|
|
374
375
|
startStreaming, setStartStreaming, maintenance, setMaintenance, feedback, setFeedback, handleFeedback, feedbackOpen, setFeedbackOpen,
|
|
375
376
|
writeFeedback, setWriteFeedback, writeAnswer, setWriteAnswer, BASE_URL, lastMessageId, setLastMessageId,
|
|
376
377
|
onProductCardClick, onAddToCartClick: handleAddToCartWithMessage, data, sessionId, setSessionId, handleWrittenFeedback, switchFeedbackOpen, confirmDisclaimer,
|
|
377
|
-
formatChatHistory, uiConfig, handleVoiceSend, TRACK_CLICK_URL, ADD_TO_CART_URL
|
|
378
|
+
formatChatHistory, uiConfig, handleVoiceSend, TRACK_CLICK_URL, ADD_TO_CART_URL, isListening, setIsListening
|
|
378
379
|
}}
|
|
379
380
|
>
|
|
380
381
|
{children}
|
|
@@ -7,13 +7,9 @@ import { check, PERMISSIONS, request, RESULTS } from 'react-native-permissions';
|
|
|
7
7
|
let resultCallback = null;
|
|
8
8
|
let partialResultCallback = null;
|
|
9
9
|
let silenceTimer = null;
|
|
10
|
+
let isCurrentlyRecording = false;
|
|
10
11
|
let finalResult = '';
|
|
11
|
-
const SILENCE_DURATION = 1500;
|
|
12
|
-
|
|
13
|
-
const State = { IDLE: 'IDLE', LISTENING: 'LISTENING', FINALIZING: 'FINALIZING' };
|
|
14
|
-
let state = State.IDLE;
|
|
15
|
-
|
|
16
|
-
let listenersBound = false;
|
|
12
|
+
const SILENCE_DURATION = 1500; // 1.5 seconds of silence before stopping
|
|
17
13
|
|
|
18
14
|
// Add this constant for AsyncStorage key
|
|
19
15
|
const PERMISSION_STORAGE_KEY = '@voice_permission_status';
|
|
@@ -34,9 +30,7 @@ export async function initVoice(onResult, onPartialResult = null) {
|
|
|
34
30
|
resultCallback = onResult;
|
|
35
31
|
partialResultCallback = onPartialResult; // Store partial callback
|
|
36
32
|
finalResult = '';
|
|
37
|
-
|
|
38
|
-
if (listenersBound) return true;
|
|
39
|
-
|
|
33
|
+
|
|
40
34
|
// Check if Voice module is available
|
|
41
35
|
if (!Voice) {
|
|
42
36
|
console.error('Voice module is not available');
|
|
@@ -50,109 +44,99 @@ export async function initVoice(onResult, onPartialResult = null) {
|
|
|
50
44
|
return false;
|
|
51
45
|
}
|
|
52
46
|
|
|
47
|
+
// Remove any existing listeners
|
|
53
48
|
Voice.removeAllListeners();
|
|
54
49
|
|
|
55
50
|
// Set up all event listeners
|
|
56
|
-
Voice.onSpeechStart = () => {
|
|
57
|
-
console.log('
|
|
58
|
-
|
|
51
|
+
Voice.onSpeechStart = (e) => {
|
|
52
|
+
console.log('onSpeechStart: ', e);
|
|
53
|
+
isCurrentlyRecording = true;
|
|
59
54
|
finalResult = '';
|
|
60
|
-
|
|
55
|
+
|
|
56
|
+
if (silenceTimer) {
|
|
57
|
+
clearTimeout(silenceTimer);
|
|
58
|
+
silenceTimer = null;
|
|
59
|
+
}
|
|
61
60
|
};
|
|
62
61
|
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
62
|
+
Voice.onSpeechRecognized = (e) => {
|
|
63
|
+
console.log('onSpeechRecognized: ', e);
|
|
64
|
+
if (e.isFinal) {
|
|
65
|
+
console.log('Speech recognition final');
|
|
66
|
+
handleFinalResult();
|
|
67
|
+
}
|
|
68
|
+
};
|
|
69
|
+
|
|
70
|
+
Voice.onSpeechEnd = async (e) => {
|
|
71
|
+
console.log('onSpeechEnd: ', e);
|
|
72
|
+
|
|
73
|
+
if (silenceTimer) {
|
|
74
|
+
clearTimeout(silenceTimer);
|
|
75
|
+
silenceTimer = null;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
// Only handle final result if we're still recording
|
|
79
|
+
if (isCurrentlyRecording) {
|
|
80
|
+
await handleFinalResult();
|
|
79
81
|
}
|
|
80
82
|
};
|
|
81
83
|
|
|
82
|
-
Voice.onSpeechError = (e) => {
|
|
83
|
-
console.log('
|
|
84
|
-
|
|
84
|
+
Voice.onSpeechError = async (e) => {
|
|
85
|
+
console.log('onSpeechError: ', e);
|
|
86
|
+
|
|
87
|
+
if (silenceTimer) {
|
|
88
|
+
clearTimeout(silenceTimer);
|
|
89
|
+
silenceTimer = null;
|
|
90
|
+
}
|
|
91
|
+
|
|
85
92
|
const code = e.error?.code?.toString();
|
|
86
93
|
const msg = e.error?.message || '';
|
|
87
94
|
|
|
88
|
-
// Handle
|
|
95
|
+
// Handle Android-specific errors
|
|
89
96
|
if (Platform.OS === 'android' && (code === '7' || code === '5')) {
|
|
90
|
-
if (finalResult && resultCallback)
|
|
91
|
-
|
|
97
|
+
if (finalResult && resultCallback) {
|
|
98
|
+
resultCallback(finalResult, null);
|
|
99
|
+
}
|
|
92
100
|
} else if (!msg.includes('No speech detected') && resultCallback) {
|
|
93
101
|
resultCallback(null, msg);
|
|
94
|
-
} else if (resultCallback) {
|
|
95
|
-
resultCallback(null, null);
|
|
96
102
|
}
|
|
97
103
|
|
|
98
|
-
|
|
99
|
-
console.log('[onSpeechError] Scheduling IDLE reset');
|
|
100
|
-
if (Platform.OS === 'android') {
|
|
101
|
-
setTimeout(() => {
|
|
102
|
-
console.log('[onSpeechError] Android timeout - setting state to IDLE');
|
|
103
|
-
state = State.IDLE;
|
|
104
|
-
}, 800); // Increased delay to match onSpeechEnd
|
|
105
|
-
} else {
|
|
106
|
-
console.log('[onSpeechError] iOS - setting state to IDLE immediately');
|
|
107
|
-
state = State.IDLE;
|
|
108
|
-
}
|
|
104
|
+
await cleanupVoiceSession();
|
|
109
105
|
};
|
|
110
106
|
|
|
111
107
|
Voice.onSpeechResults = (e) => {
|
|
112
|
-
console.log('
|
|
113
|
-
clearSilenceTimer();
|
|
108
|
+
console.log('onSpeechResults: ', e);
|
|
114
109
|
if (e.value && e.value.length > 0) {
|
|
115
110
|
finalResult = e.value[0];
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
// Only call callback if we haven't already (avoid double-calling)
|
|
119
|
-
if (state === State.LISTENING && resultCallback) {
|
|
120
|
-
console.log('[onSpeechResults] Calling callback with results');
|
|
121
|
-
resultCallback(finalResult, null);
|
|
122
|
-
} else {
|
|
123
|
-
console.log('[onSpeechResults] Not calling callback - state:', state);
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
// On Android, we must explicitly stop to avoid session corruption
|
|
127
|
-
if (Platform.OS === 'android') {
|
|
128
|
-
console.log('[onSpeechResults] Android: Explicitly calling stopRecording()');
|
|
129
|
-
stopRecording();
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
// Results end the session, reset to IDLE with delay
|
|
133
|
-
console.log('[onSpeechResults] Scheduling IDLE reset');
|
|
134
|
-
if (Platform.OS === 'android') {
|
|
135
|
-
setTimeout(() => {
|
|
136
|
-
console.log('[onSpeechResults] Android timeout - setting state to IDLE');
|
|
137
|
-
state = State.IDLE;
|
|
138
|
-
}, 800); // Increased delay
|
|
139
|
-
} else {
|
|
140
|
-
console.log('[onSpeechResults] iOS - setting state to IDLE immediately');
|
|
141
|
-
state = State.IDLE;
|
|
111
|
+
handleSilenceDetection();
|
|
142
112
|
}
|
|
143
113
|
};
|
|
144
114
|
|
|
145
115
|
Voice.onSpeechPartialResults = (e) => {
|
|
116
|
+
console.log('onSpeechPartialResults: ', e);
|
|
117
|
+
|
|
118
|
+
if (silenceTimer) {
|
|
119
|
+
clearTimeout(silenceTimer);
|
|
120
|
+
}
|
|
121
|
+
|
|
146
122
|
if (e.value && e.value.length > 0) {
|
|
147
123
|
finalResult = e.value[0];
|
|
148
|
-
|
|
124
|
+
|
|
125
|
+
// Call partial callback for live transcription
|
|
126
|
+
if (partialResultCallback) {
|
|
127
|
+
partialResultCallback(finalResult);
|
|
128
|
+
}
|
|
129
|
+
|
|
149
130
|
handleSilenceDetection();
|
|
150
131
|
}
|
|
151
132
|
};
|
|
152
133
|
|
|
153
|
-
if (Platform.OS === 'android')
|
|
134
|
+
if (Platform.OS === 'android') {
|
|
135
|
+
Voice.onSpeechVolumeChanged = (e) => {
|
|
136
|
+
console.log('onSpeechVolumeChanged: ', e);
|
|
137
|
+
};
|
|
138
|
+
}
|
|
154
139
|
|
|
155
|
-
listenersBound = true;
|
|
156
140
|
return true;
|
|
157
141
|
} catch (error) {
|
|
158
142
|
console.error('Error initializing Voice:', error);
|
|
@@ -166,144 +150,163 @@ const handleSilenceDetection = () => {
|
|
|
166
150
|
}
|
|
167
151
|
|
|
168
152
|
silenceTimer = setTimeout(async () => {
|
|
169
|
-
if (
|
|
153
|
+
if (isCurrentlyRecording) {
|
|
170
154
|
await handleFinalResult();
|
|
171
155
|
}
|
|
172
156
|
}, SILENCE_DURATION);
|
|
173
157
|
};
|
|
174
158
|
|
|
175
159
|
const handleFinalResult = async () => {
|
|
176
|
-
|
|
177
|
-
if (state !== State.LISTENING) {
|
|
178
|
-
console.log('[handleFinalResult] State not LISTENING, returning');
|
|
179
|
-
return;
|
|
180
|
-
}
|
|
160
|
+
if (!isCurrentlyRecording) return;
|
|
181
161
|
|
|
182
|
-
|
|
183
|
-
console.log('[handleFinalResult] Setting state to FINALIZING');
|
|
184
|
-
state = State.FINALIZING;
|
|
162
|
+
console.log('handleFinalResult called with:', finalResult);
|
|
185
163
|
|
|
186
|
-
// Call the callback with results
|
|
187
164
|
if (finalResult && resultCallback) {
|
|
188
|
-
console.log('[handleFinalResult] Calling callback with result:', finalResult);
|
|
189
165
|
resultCallback(finalResult, null);
|
|
190
166
|
}
|
|
191
167
|
|
|
192
|
-
//
|
|
193
|
-
console.log('[handleFinalResult] Calling stopRecording');
|
|
168
|
+
// Stop recording first
|
|
194
169
|
await stopRecording();
|
|
195
170
|
};
|
|
196
171
|
|
|
197
|
-
const cleanupVoiceSession = () => {
|
|
198
|
-
console.log('
|
|
199
|
-
|
|
200
|
-
clearSilenceTimer();
|
|
172
|
+
const cleanupVoiceSession = async () => {
|
|
173
|
+
console.log('cleanupVoiceSession called');
|
|
174
|
+
isCurrentlyRecording = false;
|
|
201
175
|
|
|
202
|
-
// Add delay before allowing next session on Android
|
|
203
|
-
if (Platform.OS === 'android') {
|
|
204
|
-
setTimeout(() => {
|
|
205
|
-
console.log('[cleanupVoiceSession] Android timeout - setting state to IDLE');
|
|
206
|
-
state = State.IDLE;
|
|
207
|
-
}, 800);
|
|
208
|
-
} else {
|
|
209
|
-
console.log('[cleanupVoiceSession] iOS - setting state to IDLE immediately');
|
|
210
|
-
state = State.IDLE;
|
|
211
|
-
}
|
|
212
|
-
};
|
|
213
|
-
|
|
214
|
-
const clearSilenceTimer = () => {
|
|
215
176
|
if (silenceTimer) {
|
|
216
177
|
clearTimeout(silenceTimer);
|
|
217
178
|
silenceTimer = null;
|
|
218
179
|
}
|
|
219
|
-
};
|
|
220
180
|
|
|
221
|
-
export async function startRecording() {
|
|
222
181
|
try {
|
|
223
|
-
|
|
182
|
+
// Check if Voice module is available
|
|
183
|
+
if (!Voice) {
|
|
184
|
+
console.log('Voice module not available during cleanup');
|
|
185
|
+
return;
|
|
186
|
+
}
|
|
224
187
|
|
|
225
|
-
//
|
|
226
|
-
|
|
188
|
+
// Check if still recognizing
|
|
189
|
+
const isRecognizing = await Voice.isRecognizing();
|
|
190
|
+
console.log('Voice.isRecognizing() in cleanup:', isRecognizing);
|
|
191
|
+
|
|
192
|
+
if (isRecognizing) {
|
|
227
193
|
try {
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
194
|
+
// For iOS, use cancel for immediate termination
|
|
195
|
+
if (Platform.OS === 'ios') {
|
|
196
|
+
await Voice.cancel();
|
|
197
|
+
console.log('Voice.cancel() completed for iOS');
|
|
198
|
+
} else {
|
|
199
|
+
await Voice.stop();
|
|
200
|
+
console.log('Voice.stop() completed for Android');
|
|
201
|
+
}
|
|
202
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
231
203
|
} catch (e) {
|
|
232
|
-
console.log('
|
|
204
|
+
console.log('Error stopping/canceling in cleanup:', e);
|
|
233
205
|
}
|
|
234
206
|
}
|
|
235
207
|
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
208
|
+
// Destroy the instance
|
|
209
|
+
try {
|
|
210
|
+
await Voice.destroy();
|
|
211
|
+
console.log('Voice.destroy() completed');
|
|
212
|
+
await new Promise(resolve => setTimeout(resolve, 200));
|
|
213
|
+
} catch (e) {
|
|
214
|
+
console.log('Error destroying in cleanup:', e);
|
|
239
215
|
}
|
|
240
|
-
|
|
241
|
-
|
|
216
|
+
|
|
217
|
+
} catch (error) {
|
|
218
|
+
console.error('Error in cleanupVoiceSession:', error);
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
finalResult = '';
|
|
222
|
+
};
|
|
223
|
+
|
|
224
|
+
export async function startRecording() {
|
|
225
|
+
try {
|
|
226
|
+
console.log('startRecording called');
|
|
227
|
+
|
|
228
|
+
// Check if Voice module is available
|
|
229
|
+
if (!Voice) {
|
|
230
|
+
console.error('Voice module is not available');
|
|
242
231
|
return false;
|
|
243
232
|
}
|
|
233
|
+
|
|
234
|
+
// Ensure cleanup of any existing session
|
|
235
|
+
await cleanupVoiceSession();
|
|
236
|
+
|
|
237
|
+
// Small delay to ensure cleanup is complete
|
|
238
|
+
await new Promise(resolve => setTimeout(resolve, 200));
|
|
244
239
|
|
|
245
240
|
const hasPermission = await requestAudioPermission();
|
|
246
241
|
if (!hasPermission) {
|
|
247
242
|
console.error('No permission to record audio');
|
|
248
243
|
return false;
|
|
249
244
|
}
|
|
250
|
-
|
|
251
|
-
const recognizing = await Voice.isRecognizing();
|
|
252
|
-
console.log('[startRecording] Voice.isRecognizing():', recognizing);
|
|
253
|
-
if (recognizing) {
|
|
254
|
-
console.log('[startRecording] Already recognizing, canceling first');
|
|
255
|
-
await Voice.cancel();
|
|
256
|
-
// Wait longer for cancel to take effect
|
|
257
|
-
await new Promise(r => setTimeout(r, 500));
|
|
258
|
-
|
|
259
|
-
// Double-check if still recognizing after cancel
|
|
260
|
-
const stillRecognizing = await Voice.isRecognizing();
|
|
261
|
-
console.log('[startRecording] After cancel, still recognizing:', stillRecognizing);
|
|
262
|
-
if (stillRecognizing) {
|
|
263
|
-
console.log('[startRecording] Still recognizing after cancel, stopping');
|
|
264
|
-
try {
|
|
265
|
-
await Voice.stop();
|
|
266
|
-
await new Promise(r => setTimeout(r, 300));
|
|
267
|
-
} catch (e) {
|
|
268
|
-
console.log('[startRecording] Error stopping:', e);
|
|
269
|
-
}
|
|
270
|
-
}
|
|
271
|
-
}
|
|
272
245
|
|
|
273
|
-
|
|
246
|
+
// Re-initialize listeners each time for iOS stability
|
|
247
|
+
await initVoice(resultCallback, partialResultCallback);
|
|
248
|
+
|
|
249
|
+
// Start recognition
|
|
274
250
|
await Voice.start('en-US');
|
|
275
|
-
console.log('
|
|
276
|
-
|
|
251
|
+
console.log('Voice.start() completed');
|
|
252
|
+
isCurrentlyRecording = true;
|
|
277
253
|
return true;
|
|
278
254
|
} catch (error) {
|
|
279
255
|
console.error('Error starting voice recognition:', error);
|
|
280
|
-
cleanupVoiceSession();
|
|
256
|
+
await cleanupVoiceSession();
|
|
281
257
|
return false;
|
|
282
258
|
}
|
|
283
259
|
}
|
|
284
260
|
|
|
285
261
|
export async function stopRecording() {
|
|
286
262
|
try {
|
|
287
|
-
console.log('
|
|
288
|
-
|
|
289
|
-
if (
|
|
290
|
-
console.log('
|
|
263
|
+
console.log('stopRecording called');
|
|
264
|
+
|
|
265
|
+
if (!isCurrentlyRecording || !Voice) {
|
|
266
|
+
console.log('Not recording or Voice not available');
|
|
291
267
|
return;
|
|
292
268
|
}
|
|
293
|
-
|
|
294
|
-
//
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
269
|
+
|
|
270
|
+
// Set this first to prevent race conditions
|
|
271
|
+
isCurrentlyRecording = false;
|
|
272
|
+
|
|
273
|
+
if (silenceTimer) {
|
|
274
|
+
clearTimeout(silenceTimer);
|
|
275
|
+
silenceTimer = null;
|
|
298
276
|
}
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
277
|
+
|
|
278
|
+
// Platform-specific stop
|
|
279
|
+
try {
|
|
280
|
+
if (Platform.OS === 'ios') {
|
|
281
|
+
// iOS: Use cancel for immediate termination
|
|
282
|
+
await Voice.cancel();
|
|
283
|
+
console.log('Voice.cancel() completed for iOS');
|
|
284
|
+
} else {
|
|
285
|
+
// Android: Use stop
|
|
286
|
+
await Voice.stop();
|
|
287
|
+
console.log('Voice.stop() completed for Android');
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
// Small delay
|
|
291
|
+
await new Promise(resolve => setTimeout(resolve, 100));
|
|
292
|
+
} catch (error) {
|
|
293
|
+
console.log('Error stopping/canceling Voice:', error);
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
// Then destroy
|
|
297
|
+
try {
|
|
298
|
+
await Voice.destroy();
|
|
299
|
+
console.log('Voice.destroy() completed');
|
|
300
|
+
await new Promise(resolve => setTimeout(resolve, 200));
|
|
301
|
+
} catch (error) {
|
|
302
|
+
console.log('Error destroying Voice:', error);
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
// Final cleanup
|
|
306
|
+
finalResult = '';
|
|
304
307
|
} catch (error) {
|
|
305
308
|
console.error('Error in stopRecording:', error);
|
|
306
|
-
cleanupVoiceSession();
|
|
309
|
+
await cleanupVoiceSession();
|
|
307
310
|
}
|
|
308
311
|
}
|
|
309
312
|
|
|
@@ -311,10 +314,10 @@ export async function cancelRecording() {
|
|
|
311
314
|
try {
|
|
312
315
|
if (!Voice) return;
|
|
313
316
|
await Voice.cancel();
|
|
314
|
-
cleanupVoiceSession();
|
|
317
|
+
await cleanupVoiceSession();
|
|
315
318
|
} catch (error) {
|
|
316
319
|
console.error('Error canceling voice recognition:', error);
|
|
317
|
-
cleanupVoiceSession();
|
|
320
|
+
await cleanupVoiceSession();
|
|
318
321
|
}
|
|
319
322
|
}
|
|
320
323
|
|
|
@@ -407,5 +410,19 @@ export function resetStoredPermission() {
|
|
|
407
410
|
}
|
|
408
411
|
|
|
409
412
|
export function cleanup() {
|
|
410
|
-
|
|
413
|
+
if (!Voice) {
|
|
414
|
+
console.log('Voice module not available during cleanup');
|
|
415
|
+
return;
|
|
416
|
+
}
|
|
417
|
+
|
|
418
|
+
Voice.destroy().then(() => {
|
|
419
|
+
Voice.removeAllListeners();
|
|
420
|
+
cleanupVoiceSession();
|
|
421
|
+
}).catch(error => {
|
|
422
|
+
console.error('Error in cleanup:', error);
|
|
423
|
+
// Try one more time
|
|
424
|
+
if (Voice) {
|
|
425
|
+
Voice.destroy().catch(e => console.error('Final cleanup attempt failed:', e));
|
|
426
|
+
}
|
|
427
|
+
});
|
|
411
428
|
}
|