react-native-ai-hooks 0.3.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/.github/workflows/ci.yml +34 -0
  2. package/CONTRIBUTING.md +122 -0
  3. package/README.md +73 -20
  4. package/docs/ARCHITECTURE.md +301 -0
  5. package/docs/ARCHITECTURE_GUIDE.md +467 -0
  6. package/docs/IMPLEMENTATION_COMPLETE.md +349 -0
  7. package/docs/README.md +17 -0
  8. package/docs/TECHNICAL_SPECIFICATION.md +748 -0
  9. package/example/App.tsx +95 -0
  10. package/example/README.md +27 -0
  11. package/example/index.js +5 -0
  12. package/example/package.json +22 -0
  13. package/example/src/components/ProviderPicker.tsx +62 -0
  14. package/example/src/context/APIKeysContext.tsx +96 -0
  15. package/example/src/screens/ChatScreen.tsx +205 -0
  16. package/example/src/screens/SettingsScreen.tsx +124 -0
  17. package/example/tsconfig.json +7 -0
  18. package/jest.config.cjs +7 -0
  19. package/jest.setup.ts +28 -0
  20. package/package.json +17 -3
  21. package/src/hooks/__tests__/useAIForm.test.ts +345 -0
  22. package/src/hooks/__tests__/useAIStream.test.ts +427 -0
  23. package/src/hooks/useAIChat.ts +111 -51
  24. package/src/hooks/useAICode.ts +8 -0
  25. package/src/hooks/useAIForm.ts +92 -202
  26. package/src/hooks/useAIStream.ts +114 -58
  27. package/src/hooks/useAISummarize.ts +8 -0
  28. package/src/hooks/useAITranslate.ts +9 -0
  29. package/src/hooks/useAIVoice.ts +8 -0
  30. package/src/hooks/useImageAnalysis.ts +134 -79
  31. package/src/index.ts +25 -1
  32. package/src/types/index.ts +178 -4
  33. package/src/utils/__tests__/fetchWithRetry.test.ts +168 -0
  34. package/src/utils/__tests__/providerFactory.test.ts +493 -0
  35. package/src/utils/fetchWithRetry.ts +100 -0
  36. package/src/utils/index.ts +8 -0
  37. package/src/utils/providerFactory.ts +288 -0
@@ -0,0 +1,288 @@
1
+ /**
2
+ * Unified Provider Factory for handling multiple AI providers
3
+ * Normalizes responses across Anthropic, OpenAI, and Gemini
4
+ */
5
+
6
+ import type {
7
+ AIResponse,
8
+ AIRequestOptions,
9
+ AnthropicResponse,
10
+ OpenAIResponse,
11
+ GeminiResponse,
12
+ ProviderConfig,
13
+ } from '../types';
14
+ import { fetchWithRetry } from './fetchWithRetry';
15
+
16
+ interface ProviderFactoryOptions extends ProviderConfig {
17
+ system?: string;
18
+ context?: Array<{ role: 'user' | 'assistant'; content: string }>;
19
+ }
20
+
21
+ interface ProviderRequest {
22
+ prompt: string;
23
+ options?: AIRequestOptions;
24
+ context?: Array<{ role: 'user' | 'assistant'; content: string }>;
25
+ }
26
+
27
+ export class ProviderFactory {
28
+ private config: ProviderConfig;
29
+
30
+ constructor(config: ProviderConfig) {
31
+ this.config = {
32
+ ...config,
33
+ timeout: config.timeout || 30000,
34
+ maxRetries: config.maxRetries || 3,
35
+ };
36
+ }
37
+
38
+ private getBaseUrl(): string {
39
+ if (this.config.baseUrl) {
40
+ return this.config.baseUrl;
41
+ }
42
+
43
+ switch (this.config.provider) {
44
+ case 'anthropic':
45
+ return 'https://api.anthropic.com';
46
+ case 'openai':
47
+ return 'https://api.openai.com';
48
+ case 'gemini':
49
+ return 'https://generativelanguage.googleapis.com';
50
+ default:
51
+ throw new Error(`Unknown provider: ${this.config.provider}`);
52
+ }
53
+ }
54
+
55
+ async makeRequest(request: ProviderRequest): Promise<AIResponse> {
56
+ switch (this.config.provider) {
57
+ case 'anthropic':
58
+ return this.makeAnthropicRequest(request);
59
+ case 'openai':
60
+ return this.makeOpenAIRequest(request);
61
+ case 'gemini':
62
+ return this.makeGeminiRequest(request);
63
+ default:
64
+ throw new Error(`Unknown provider: ${this.config.provider}`);
65
+ }
66
+ }
67
+
68
+ private async makeAnthropicRequest(request: ProviderRequest): Promise<AIResponse> {
69
+ const baseUrl = this.getBaseUrl();
70
+ const url = `${baseUrl}/v1/messages`;
71
+
72
+ const body = {
73
+ model: this.config.model,
74
+ max_tokens: request.options?.maxTokens || 1024,
75
+ temperature: request.options?.temperature ?? 0.7,
76
+ system: request.options?.system,
77
+ messages: this.buildAnthropicMessages(request),
78
+ };
79
+
80
+ const response = await fetchWithRetry(
81
+ url,
82
+ {
83
+ method: 'POST',
84
+ headers: {
85
+ 'Content-Type': 'application/json',
86
+ 'x-api-key': this.config.apiKey,
87
+ 'anthropic-version': '2023-06-01',
88
+ },
89
+ body: JSON.stringify(body),
90
+ },
91
+ {
92
+ timeout: this.config.timeout,
93
+ maxRetries: this.config.maxRetries,
94
+ },
95
+ );
96
+
97
+ if (!response.ok) {
98
+ let errorData: any;
99
+ try {
100
+ errorData = await response.json();
101
+ } catch {
102
+ errorData = undefined;
103
+ }
104
+ throw new Error(errorData?.error?.message || 'API error');
105
+ }
106
+
107
+ const data = (await response.json()) as AnthropicResponse;
108
+
109
+ return this.normalizeAnthropicResponse(data);
110
+ }
111
+
112
+ private async makeOpenAIRequest(request: ProviderRequest): Promise<AIResponse> {
113
+ const baseUrl = this.getBaseUrl();
114
+ const url = `${baseUrl}/v1/chat/completions`;
115
+
116
+ const body = {
117
+ model: this.config.model,
118
+ max_tokens: request.options?.maxTokens || 1024,
119
+ temperature: request.options?.temperature ?? 0.7,
120
+ messages: this.buildOpenAIMessages(request),
121
+ };
122
+
123
+ const response = await fetchWithRetry(
124
+ url,
125
+ {
126
+ method: 'POST',
127
+ headers: {
128
+ 'Content-Type': 'application/json',
129
+ Authorization: `Bearer ${this.config.apiKey}`,
130
+ },
131
+ body: JSON.stringify(body),
132
+ },
133
+ {
134
+ timeout: this.config.timeout,
135
+ maxRetries: this.config.maxRetries,
136
+ },
137
+ );
138
+
139
+ if (!response.ok) {
140
+ let errorData: any;
141
+ try {
142
+ errorData = await response.json();
143
+ } catch {
144
+ errorData = undefined;
145
+ }
146
+ throw new Error(errorData?.error?.message || 'API error');
147
+ }
148
+
149
+ const data = (await response.json()) as OpenAIResponse;
150
+
151
+ return this.normalizeOpenAIResponse(data);
152
+ }
153
+
154
+ private async makeGeminiRequest(request: ProviderRequest): Promise<AIResponse> {
155
+ const baseUrl = this.getBaseUrl();
156
+ const url = `${baseUrl}/v1beta/models/${this.config.model}:generateContent?key=${this.config.apiKey}`;
157
+
158
+ const body = {
159
+ contents: this.buildGeminiMessages(request),
160
+ generationConfig: {
161
+ maxOutputTokens: request.options?.maxTokens || 1024,
162
+ temperature: request.options?.temperature ?? 0.7,
163
+ },
164
+ };
165
+
166
+ const response = await fetchWithRetry(
167
+ url,
168
+ {
169
+ method: 'POST',
170
+ headers: {
171
+ 'Content-Type': 'application/json',
172
+ },
173
+ body: JSON.stringify(body),
174
+ },
175
+ {
176
+ timeout: this.config.timeout,
177
+ maxRetries: this.config.maxRetries,
178
+ },
179
+ );
180
+
181
+ if (!response.ok) {
182
+ let errorData: any;
183
+ try {
184
+ errorData = await response.json();
185
+ } catch {
186
+ errorData = undefined;
187
+ }
188
+ throw new Error(errorData?.error?.message || 'API error');
189
+ }
190
+
191
+ const data = (await response.json()) as GeminiResponse;
192
+
193
+ return this.normalizeGeminiResponse(data);
194
+ }
195
+
196
+ private buildAnthropicMessages(
197
+ request: ProviderRequest,
198
+ ): Array<{ role: 'user' | 'assistant'; content: string }> {
199
+ const messages = request.context || [];
200
+ return [...messages, { role: 'user', content: request.prompt }];
201
+ }
202
+
203
+ private buildOpenAIMessages(
204
+ request: ProviderRequest,
205
+ ): Array<{ role: 'system' | 'user' | 'assistant'; content: string }> {
206
+ const messages = request.context || [];
207
+ const promptMessage = { role: 'user' as const, content: request.prompt };
208
+
209
+ if (request.options?.system) {
210
+ return [{ role: 'system', content: request.options.system }, ...messages, promptMessage];
211
+ }
212
+
213
+ return [...messages, promptMessage];
214
+ }
215
+
216
+ private buildGeminiMessages(request: ProviderRequest): Array<{ role: string; parts: Array<{ text: string }> }> {
217
+ const messages = request.context || [];
218
+
219
+ return messages
220
+ .map(msg => ({
221
+ role: msg.role === 'user' ? 'user' : 'model',
222
+ parts: [{ text: msg.content }],
223
+ }))
224
+ .concat({
225
+ role: 'user',
226
+ parts: [{ text: request.prompt }],
227
+ });
228
+ }
229
+
230
+ private normalizeAnthropicResponse(data: AnthropicResponse): AIResponse {
231
+ const textContent = data.content?.find(block => block.type === 'text');
232
+ const text = (textContent?.type === 'text' && 'text' in textContent ? (textContent as any).text : '') || '';
233
+
234
+ if (!text) {
235
+ throw new Error('No text content returned by Anthropic API');
236
+ }
237
+
238
+ return {
239
+ text,
240
+ raw: data,
241
+ usage: {
242
+ inputTokens: data.usage?.input_tokens,
243
+ outputTokens: data.usage?.output_tokens,
244
+ totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0),
245
+ },
246
+ };
247
+ }
248
+
249
+ private normalizeOpenAIResponse(data: OpenAIResponse): AIResponse {
250
+ const text = data.choices?.[0]?.message?.content || '';
251
+
252
+ if (!text) {
253
+ throw new Error('No text content returned by OpenAI API');
254
+ }
255
+
256
+ return {
257
+ text,
258
+ raw: data,
259
+ usage: {
260
+ inputTokens: data.usage?.prompt_tokens,
261
+ outputTokens: data.usage?.completion_tokens,
262
+ totalTokens: data.usage?.total_tokens,
263
+ },
264
+ };
265
+ }
266
+
267
+ private normalizeGeminiResponse(data: GeminiResponse): AIResponse {
268
+ const text = data.candidates?.[0]?.content?.parts?.[0]?.text || '';
269
+
270
+ if (!text) {
271
+ throw new Error('No text content returned by Gemini API');
272
+ }
273
+
274
+ return {
275
+ text,
276
+ raw: data,
277
+ usage: {
278
+ inputTokens: data.usageMetadata?.promptTokenCount,
279
+ outputTokens: data.usageMetadata?.candidatesTokenCount,
280
+ totalTokens: data.usageMetadata?.totalTokenCount,
281
+ },
282
+ };
283
+ }
284
+ }
285
+
286
+ export function createProvider(config: ProviderConfig): ProviderFactory {
287
+ return new ProviderFactory(config);
288
+ }