react-native-ai-hooks 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -0
- package/package.json +1 -1
- package/src/hooks/useAIVoice.ts +219 -0
- package/src/index.ts +2 -1
package/README.md
CHANGED
|
@@ -1,3 +1,8 @@
|
|
|
1
|
+
[](https://npmjs.com/package/react-native-ai-hooks)
|
|
2
|
+
[](https://npmjs.com/package/react-native-ai-hooks)
|
|
3
|
+
[](https://github.com/nikapkh/react-native-ai-hooks)
|
|
4
|
+
[](https://opensource.org/licenses/MIT)
|
|
5
|
+
|
|
1
6
|
# react-native-ai-hooks
|
|
2
7
|
|
|
3
8
|
AI hooks for React Native — add Claude, OpenAI & Gemini to your app in minutes.
|
package/package.json
CHANGED
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
import { useCallback, useEffect, useRef, useState } from 'react';
|
|
2
|
+
import { PermissionsAndroid, Platform } from 'react-native';
|
|
3
|
+
import Voice from '@react-native-voice/voice';
|
|
4
|
+
|
|
5
|
+
interface UseAIVoiceOptions {
|
|
6
|
+
apiKey: string;
|
|
7
|
+
model?: string;
|
|
8
|
+
maxTokens?: number;
|
|
9
|
+
system?: string;
|
|
10
|
+
language?: string;
|
|
11
|
+
autoSendOnStop?: boolean;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
interface UseAIVoiceReturn {
|
|
15
|
+
transcription: string;
|
|
16
|
+
response: string;
|
|
17
|
+
isRecording: boolean;
|
|
18
|
+
isLoading: boolean;
|
|
19
|
+
error: string | null;
|
|
20
|
+
startRecording: () => Promise<void>;
|
|
21
|
+
stopRecording: () => Promise<void>;
|
|
22
|
+
sendTranscription: (overrideText?: string) => Promise<string | null>;
|
|
23
|
+
clearVoiceState: () => void;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
interface ClaudeTextBlock {
|
|
27
|
+
type?: string;
|
|
28
|
+
text?: string;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
interface ClaudeApiResult {
|
|
32
|
+
content?: ClaudeTextBlock[];
|
|
33
|
+
error?: {
|
|
34
|
+
message?: string;
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
interface SpeechResultsEvent {
|
|
39
|
+
value?: string[];
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
interface SpeechErrorEvent {
|
|
43
|
+
error?: {
|
|
44
|
+
message?: string;
|
|
45
|
+
};
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
function getClaudeTextContent(data: unknown): string {
|
|
49
|
+
const content = (data as ClaudeApiResult)?.content;
|
|
50
|
+
if (!Array.isArray(content)) {
|
|
51
|
+
return '';
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
return content
|
|
55
|
+
.filter(item => item?.type === 'text' && typeof item.text === 'string')
|
|
56
|
+
.map(item => item.text as string)
|
|
57
|
+
.join('\n')
|
|
58
|
+
.trim();
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
export function useAIVoice(options: UseAIVoiceOptions): UseAIVoiceReturn {
|
|
62
|
+
const [transcription, setTranscription] = useState('');
|
|
63
|
+
const [response, setResponse] = useState('');
|
|
64
|
+
const [isRecording, setIsRecording] = useState(false);
|
|
65
|
+
const [isLoading, setIsLoading] = useState(false);
|
|
66
|
+
const [error, setError] = useState<string | null>(null);
|
|
67
|
+
|
|
68
|
+
const transcriptionRef = useRef('');
|
|
69
|
+
const isMountedRef = useRef(true);
|
|
70
|
+
|
|
71
|
+
const requestMicPermission = useCallback(async () => {
|
|
72
|
+
if (Platform.OS !== 'android') {
|
|
73
|
+
return true;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
const permission = await PermissionsAndroid.request(
|
|
77
|
+
PermissionsAndroid.PERMISSIONS.RECORD_AUDIO,
|
|
78
|
+
);
|
|
79
|
+
return permission === PermissionsAndroid.RESULTS.GRANTED;
|
|
80
|
+
}, []);
|
|
81
|
+
|
|
82
|
+
const sendTranscription = useCallback(
|
|
83
|
+
async (overrideText?: string) => {
|
|
84
|
+
const prompt = (overrideText ?? transcriptionRef.current).trim();
|
|
85
|
+
|
|
86
|
+
if (!prompt) {
|
|
87
|
+
setError('No transcription available to send.');
|
|
88
|
+
return null;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
if (!options.apiKey) {
|
|
92
|
+
setError('Missing Claude API key.');
|
|
93
|
+
return null;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
setIsLoading(true);
|
|
97
|
+
setError(null);
|
|
98
|
+
|
|
99
|
+
try {
|
|
100
|
+
const apiResponse = await fetch('https://api.anthropic.com/v1/messages', {
|
|
101
|
+
method: 'POST',
|
|
102
|
+
headers: {
|
|
103
|
+
'Content-Type': 'application/json',
|
|
104
|
+
'x-api-key': options.apiKey,
|
|
105
|
+
'anthropic-version': '2023-06-01',
|
|
106
|
+
},
|
|
107
|
+
body: JSON.stringify({
|
|
108
|
+
model: options.model || 'claude-sonnet-4-20250514',
|
|
109
|
+
max_tokens: options.maxTokens ?? 1024,
|
|
110
|
+
system: options.system,
|
|
111
|
+
messages: [{ role: 'user', content: prompt }],
|
|
112
|
+
}),
|
|
113
|
+
});
|
|
114
|
+
|
|
115
|
+
const data = (await apiResponse.json()) as ClaudeApiResult;
|
|
116
|
+
if (!apiResponse.ok) {
|
|
117
|
+
throw new Error(data?.error?.message || `Claude API error: ${apiResponse.status}`);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
const text = getClaudeTextContent(data);
|
|
121
|
+
if (!text) {
|
|
122
|
+
throw new Error('No text response returned by Claude API.');
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
setResponse(text);
|
|
126
|
+
return text;
|
|
127
|
+
} catch (err) {
|
|
128
|
+
const message = (err as Error).message || 'Failed to send transcription';
|
|
129
|
+
setError(message);
|
|
130
|
+
return null;
|
|
131
|
+
} finally {
|
|
132
|
+
if (isMountedRef.current) {
|
|
133
|
+
setIsLoading(false);
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
},
|
|
137
|
+
[options.apiKey, options.maxTokens, options.model, options.system],
|
|
138
|
+
);
|
|
139
|
+
|
|
140
|
+
const startRecording = useCallback(async () => {
|
|
141
|
+
setError(null);
|
|
142
|
+
|
|
143
|
+
const permissionGranted = await requestMicPermission();
|
|
144
|
+
if (!permissionGranted) {
|
|
145
|
+
setError('Microphone permission not granted.');
|
|
146
|
+
return;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
try {
|
|
150
|
+
transcriptionRef.current = '';
|
|
151
|
+
setTranscription('');
|
|
152
|
+
await Voice.start(options.language || 'en-US');
|
|
153
|
+
setIsRecording(true);
|
|
154
|
+
} catch (err) {
|
|
155
|
+
const message = (err as Error).message || 'Failed to start voice recording';
|
|
156
|
+
setError(message);
|
|
157
|
+
setIsRecording(false);
|
|
158
|
+
}
|
|
159
|
+
}, [options.language, requestMicPermission]);
|
|
160
|
+
|
|
161
|
+
const stopRecording = useCallback(async () => {
|
|
162
|
+
try {
|
|
163
|
+
await Voice.stop();
|
|
164
|
+
} catch {
|
|
165
|
+
// Ignore stop failures; state is reset below.
|
|
166
|
+
} finally {
|
|
167
|
+
setIsRecording(false);
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
if (options.autoSendOnStop !== false) {
|
|
171
|
+
await sendTranscription();
|
|
172
|
+
}
|
|
173
|
+
}, [options.autoSendOnStop, sendTranscription]);
|
|
174
|
+
|
|
175
|
+
const clearVoiceState = useCallback(() => {
|
|
176
|
+
transcriptionRef.current = '';
|
|
177
|
+
setTranscription('');
|
|
178
|
+
setResponse('');
|
|
179
|
+
setError(null);
|
|
180
|
+
}, []);
|
|
181
|
+
|
|
182
|
+
useEffect(() => {
|
|
183
|
+
isMountedRef.current = true;
|
|
184
|
+
|
|
185
|
+
Voice.onSpeechResults = (event: SpeechResultsEvent) => {
|
|
186
|
+
const latestText = event?.value?.[0]?.trim() || '';
|
|
187
|
+
transcriptionRef.current = latestText;
|
|
188
|
+
setTranscription(latestText);
|
|
189
|
+
};
|
|
190
|
+
|
|
191
|
+
Voice.onSpeechError = (event: SpeechErrorEvent) => {
|
|
192
|
+
const message = event?.error?.message || 'Speech recognition failed.';
|
|
193
|
+
setError(message);
|
|
194
|
+
setIsRecording(false);
|
|
195
|
+
};
|
|
196
|
+
|
|
197
|
+
Voice.onSpeechEnd = () => {
|
|
198
|
+
setIsRecording(false);
|
|
199
|
+
};
|
|
200
|
+
|
|
201
|
+
return () => {
|
|
202
|
+
isMountedRef.current = false;
|
|
203
|
+
Voice.destroy().catch(() => undefined);
|
|
204
|
+
Voice.removeAllListeners();
|
|
205
|
+
};
|
|
206
|
+
}, []);
|
|
207
|
+
|
|
208
|
+
return {
|
|
209
|
+
transcription,
|
|
210
|
+
response,
|
|
211
|
+
isRecording,
|
|
212
|
+
isLoading,
|
|
213
|
+
error,
|
|
214
|
+
startRecording,
|
|
215
|
+
stopRecording,
|
|
216
|
+
sendTranscription,
|
|
217
|
+
clearVoiceState,
|
|
218
|
+
};
|
|
219
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
export { useAIChat } from './hooks/useAIChat';
|
|
2
2
|
export { useAIStream } from './hooks/useAIStream';
|
|
3
3
|
export { useImageAnalysis } from './hooks/useImageAnalysis';
|
|
4
|
-
export { useAIForm } from './hooks/useAIForm';
|
|
4
|
+
export { useAIForm } from './hooks/useAIForm';
|
|
5
|
+
export { useAIVoice } from './hooks/useAIVoice';
|