react-native-ai-hooks 0.2.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,301 @@
1
+ /**
2
+ * React Native AI Hooks - Production Architecture
3
+ *
4
+ * This file documents the complete internal architecture of the react-native-ai-hooks
5
+ * library, designed for type-safety, multi-provider support, and optimal performance.
6
+ */
7
+
8
+ /**
9
+ * CORE ARCHITECTURE PRINCIPLES
10
+ * ============================
11
+ *
12
+ * 1. Provider Abstraction Layer
13
+ * - All API calls go through ProviderFactory
14
+ * - Supports Anthropic, OpenAI, Gemini with uniform interface
15
+ * - Easy to extend with new providers
16
+ *
17
+ * 2. Unified Response Normalization
18
+ * - Every provider returns standardized AIResponse object
19
+ * - Includes text content, raw response, and token usage
20
+ * - Enables seamless provider switching
21
+ *
22
+ * 3. Resilience & Retry Logic
23
+ * - fetchWithRetry handles exponential backoff
24
+ * - Automatic rate-limit (429) handling with Retry-After header
25
+ * - Timeout support using AbortController
26
+ * - Configurable max retries and delays
27
+ *
28
+ * 4. Performance Optimization
29
+ * - useMemo for provider config to prevent recreations
30
+ * - useCallback for all callback functions
31
+ * - Proper cleanup for abort controllers and timers
32
+ * - Minimal re-renders through optimized dependencies
33
+ *
34
+ * 5. Error Handling Consistency
35
+ * - All hooks follow same error pattern
36
+ * - Errors caught and stored in hook state
37
+ * - Abort errors handled gracefully (no-op vs throw)
38
+ *
39
+ *
40
+ * PROVIDER FACTORY ARCHITECTURE
41
+ * =============================
42
+ *
43
+ * The ProviderFactory class (src/utils/providerFactory.ts) is the central hub
44
+ * for all API communications. It:
45
+ *
46
+ * - Normalizes request/response formats across providers
47
+ * - Handles authentication (API keys, OAuth for different providers)
48
+ * - Manages baseUrl configuration for proxy/backend integration
49
+ * - Applies consistent rate-limit and timeout handling
50
+ *
51
+ * Usage:
52
+ * const provider = createProvider({
53
+ * provider: 'anthropic',
54
+ * apiKey: 'your-key',
55
+ * model: 'claude-sonnet-4-20250514',
56
+ * baseUrl: 'https://your-proxy.com', // Optional
57
+ * timeout: 30000,
58
+ * maxRetries: 3
59
+ * });
60
+ *
61
+ * const response = await provider.makeRequest({
62
+ * prompt: 'Hello, world!',
63
+ * options: { temperature: 0.7, maxTokens: 1024 },
64
+ * context: [] // Previous messages
65
+ * });
66
+ *
67
+ * Response Structure:
68
+ * {
69
+ * text: string, // The AI response
70
+ * raw: object, // Raw provider response
71
+ * usage: {
72
+ * inputTokens?: number,
73
+ * outputTokens?: number,
74
+ * totalTokens?: number
75
+ * }
76
+ * }
77
+ *
78
+ *
79
+ * FETCH WITH RETRY UTILITY
80
+ * ========================
81
+ *
82
+ * The fetchWithRetry function (src/utils/fetchWithRetry.ts) wraps fetch with:
83
+ *
84
+ * - Exponential backoff: baseDelay * (backoffMultiplier ^ attempt)
85
+ * - Max delay cap: prevents excessive wait times
86
+ * - Rate limit handling: respects Retry-After header (429 status)
87
+ * - Timeout support: AbortController with configurable timeout
88
+ * - Server error retries: automatic retry on 5xx errors
89
+ *
90
+ * Configuration:
91
+ * {
92
+ * maxRetries: 3, // Total attempts
93
+ * baseDelay: 1000, // Initial delay (ms)
94
+ * maxDelay: 10000, // Cap delay (ms)
95
+ * timeout: 30000, // Per-request timeout (ms)
96
+ * backoffMultiplier: 2 // Exponential backoff factor
97
+ * }
98
+ *
99
+ *
100
+ * HOOK ARCHITECTURE
101
+ * =================
102
+ *
103
+ * All hooks follow a consistent pattern:
104
+ *
105
+ * 1. useAIChat - Multi-turn conversations
106
+ * - Manages message history
107
+ * - Auto-includes system prompt and context
108
+ * - Returns messages array + send/abort/clear functions
109
+ *
110
+ * 2. useAIStream - Real-time token streaming
111
+ * - Streams responses token-by-token
112
+ * - Handles both Anthropic and OpenAI stream formats
113
+ * - Supports abort and cleanup
114
+ *
115
+ * 3. useAIForm - Form validation against AI schema
116
+ * - Validates entire form at once
117
+ * - Parses AI response into errors object
118
+ * - Returns FormValidationResult with isValid flag
119
+ *
120
+ * 4. useImageAnalysis - Vision model integration
121
+ * - Accepts URI or base64 image
122
+ * - Supports Anthropic and OpenAI vision models
123
+ * - Auto-converts URIs to base64
124
+ *
125
+ * 5. useAITranslate - Real-time translation
126
+ * - Auto-detects source language
127
+ * - Supports configurable target language
128
+ * - Debounced auto-translate option
129
+ *
130
+ * 6. useAISummarize - Text summarization
131
+ * - Adjustable summary length (short/medium/long)
132
+ * - Maintains text accuracy and fidelity
133
+ *
134
+ * 7. useAICode - Code generation and explanation
135
+ * - Generate code in any language
136
+ * - Explain existing code with focus options
137
+ *
138
+ *
139
+ * TYPE DEFINITIONS
140
+ * ================
141
+ *
142
+ * Core types (src/types/index.ts):
143
+ *
144
+ * - Message: Single message object with role, content, timestamp
145
+ * - AIProviderType: Union of 'anthropic' | 'openai' | 'gemini'
146
+ * - ProviderConfig: Configuration for creating providers
147
+ * - AIResponse: Normalized response structure
148
+ * - AIRequestOptions: Parameters for AI requests
149
+ * - UseAI*Options: Hook configuration interfaces
150
+ * - UseAI*Return: Hook return type interfaces
151
+ * - FormValidationRequest/Result: Form validation types
152
+ * - *Response: Provider-specific response interfaces
153
+ *
154
+ *
155
+ * MULTI-PROVIDER SUPPORT
156
+ * ======================
157
+ *
158
+ * Supported Providers:
159
+ *
160
+ * Provider | Base URL | Auth Header
161
+ * ------------|------------------------------------|-----------------------
162
+ * Anthropic | api.anthropic.com/v1/messages | x-api-key
163
+ * OpenAI | api.openai.com/v1/chat/completions | Authorization: Bearer
164
+ * Gemini | generativelanguage.googleapis.com | Key in URL param
165
+ *
166
+ * To use different provider:
167
+ * const { sendMessage } = useAIChat({
168
+ * apiKey: 'your-key',
169
+ * provider: 'openai', // ← Change provider
170
+ * model: 'gpt-4' // ← Use provider-specific model
171
+ * });
172
+ *
173
+ * Router automatically selects matching endpoint and auth method.
174
+ *
175
+ *
176
+ * SECURITY BEST PRACTICES
177
+ * =======================
178
+ *
179
+ * 1. API Key Management
180
+ * - Store keys in environment variables, never hardcode
181
+ * - Consider passing through backend proxy (baseUrl option)
182
+ *
183
+ * 2. Backend Proxy Pattern
184
+ * - Set baseUrl to your backend endpoint
185
+ * - Backend validates and authenticates requests
186
+ * - Example: https://my-api.com/ai (then /v1/messages appended)
187
+ *
188
+ * 3. Rate Limiting
189
+ * - All providers have rate limits
190
+ * - fetchWithRetry handles 429 responses automatically
191
+ * - Implement customer-side throttling for high-volume apps
192
+ *
193
+ * 4. Timeout Configuration
194
+ * - Default: 30 seconds per request
195
+ * - Adjust based on model complexity and network
196
+ * - Lower timeout for real-time UX requirements
197
+ *
198
+ *
199
+ * PERFORMANCE TUNING
200
+ * ==================
201
+ *
202
+ * 1. Hook Dependencies
203
+ * - Memoized provider configs via useMemo
204
+ * - Wrapped callbacks with useCallback
205
+ * - Deps list carefully curated to prevent recreations
206
+ *
207
+ * 2. Message Management
208
+ * - Store message history in component state
209
+ * - Consider pagination for large conversations
210
+ * - useCallback for sendMessage prevents parent re-renders
211
+ *
212
+ * 3. Streaming Performance
213
+ * - Streaming in useAIStream is incremental
214
+ * - Response state updates are batched by React
215
+ * - Large responses streamed smoothly token-by-token
216
+ *
217
+ * 4. Image Analysis
218
+ * - Image conversion to base64 happens async
219
+ * - Large images may take time to convert
220
+ * - Consider file size limits on client-side
221
+ *
222
+ *
223
+ * EXTENDING THE LIBRARY
224
+ * =====================
225
+ *
226
+ * To add a new AI provider:
227
+ *
228
+ * 1. Add provider type to AIProviderType union
229
+ * 2. Implement makeXyzRequest method in ProviderFactory
230
+ * 3. Implement normalizeXyzResponse method
231
+ * 4. Add default model to DEFAULT_MODEL_MAP in hooks
232
+ * 5. Test with all hook types
233
+ *
234
+ * To add a new hook:
235
+ *
236
+ * 1. Define UseAIXyzOptions interface in types
237
+ * 2. Define UseAIXyzReturn interface in types
238
+ * 3. Create src/hooks/useAIXyz.ts
239
+ * 4. Use ProviderFactory for all API calls
240
+ * 5. Follow same error/loading/cleanup patterns
241
+ * 6. Export from src/index.ts
242
+ *
243
+ *
244
+ * ERROR HANDLING PATTERNS
245
+ * =======================
246
+ *
247
+ * All hooks follow this pattern:
248
+ *
249
+ * try {
250
+ * // API call via ProviderFactory
251
+ * } catch (err) {
252
+ * if (isMountedRef.current) {
253
+ * setError(err.message);
254
+ * }
255
+ * } finally {
256
+ * if (isMountedRef.current) {
257
+ * setIsLoading(false);
258
+ * }
259
+ * }
260
+ *
261
+ * The isMountedRef prevents state updates on unmounted components.
262
+ *
263
+ *
264
+ * STREAMING IMPLEMENTATION
265
+ * ========================
266
+ *
267
+ * Streaming works by parsing newline-delimited JSON from response.body:
268
+ *
269
+ * Anthropic Format:
270
+ * data: {"type":"content_block_delta","delta":{"type":"text_delta","text":"hello"}}
271
+ *
272
+ * OpenAI Format:
273
+ * data: {"choices":[{"delta":{"content":"hello"}}]}
274
+ *
275
+ * Both formats handled in useAIStream with provider-specific parsing.
276
+ *
277
+ *
278
+ * TESTING STRATEGY
279
+ * ================
280
+ *
281
+ * Unit tests should verify:
282
+ * - Provider factory normalization for each provider
283
+ * - Retry logic with mock fetch
284
+ * - Hook state management (loading, error, data)
285
+ * - Callback cleanup on unmount
286
+ * - JSON parsing in form validation
287
+ *
288
+ * Integration tests should verify:
289
+ * - Multi-turn conversation flow
290
+ * - Image analysis with different mime types
291
+ * - Form validation with complex schemas
292
+ * - Streaming response handling
293
+ *
294
+ * E2E tests should verify:
295
+ * - Real API calls with live keys
296
+ * - Provider switching credentials
297
+ * - Rate limit retry behavior
298
+ * - Error recovery workflows
299
+ */
300
+
301
+ export {};
@@ -1,66 +1,118 @@
1
- import { useState, useCallback } from 'react';
1
+ import { useCallback, useRef, useState, useMemo } from 'react';
2
+ import type { Message, UseAIChatOptions, UseAIChatReturn } from '../types';
3
+ import { createProvider } from '../utils/providerFactory';
2
4
 
3
- interface Message {
4
- role: 'user' | 'assistant';
5
- content: string;
6
- }
7
-
8
- interface UseAIChatOptions {
9
- apiKey: string;
10
- provider?: 'claude' | 'openai';
11
- model?: string;
12
- }
13
-
14
- interface UseAIChatReturn {
15
- messages: Message[];
16
- isLoading: boolean;
17
- error: string | null;
18
- sendMessage: (content: string) => Promise<void>;
19
- clearMessages: () => void;
20
- }
5
+ const DEFAULT_MODEL_MAP = {
6
+ anthropic: 'claude-sonnet-4-20250514',
7
+ openai: 'gpt-4',
8
+ gemini: 'gemini-pro',
9
+ };
21
10
 
22
11
  export function useAIChat(options: UseAIChatOptions): UseAIChatReturn {
23
12
  const [messages, setMessages] = useState<Message[]>([]);
24
13
  const [isLoading, setIsLoading] = useState(false);
25
14
  const [error, setError] = useState<string | null>(null);
26
15
 
27
- const sendMessage = useCallback(async (content: string) => {
28
- setIsLoading(true);
29
- setError(null);
16
+ const abortControllerRef = useRef<AbortController | null>(null);
17
+ const isMountedRef = useRef(true);
30
18
 
31
- const userMessage: Message = { role: 'user', content };
32
- setMessages(prev => [...prev, userMessage]);
19
+ // Memoize provider config to prevent unnecessary recreations
20
+ const providerConfig = useMemo(
21
+ () => ({
22
+ provider: (options.provider || 'anthropic') as 'anthropic' | 'openai' | 'gemini',
23
+ apiKey: options.apiKey,
24
+ model: options.model || DEFAULT_MODEL_MAP[options.provider || 'anthropic'],
25
+ baseUrl: options.baseUrl,
26
+ timeout: options.timeout,
27
+ maxRetries: options.maxRetries,
28
+ }),
29
+ [options],
30
+ );
33
31
 
34
- try {
35
- const response = await fetch('https://api.anthropic.com/v1/messages', {
36
- method: 'POST',
37
- headers: {
38
- 'Content-Type': 'application/json',
39
- 'x-api-key': options.apiKey,
40
- 'anthropic-version': '2023-06-01',
41
- },
42
- body: JSON.stringify({
43
- model: options.model || 'claude-sonnet-4-20250514',
44
- max_tokens: 1024,
45
- messages: [...messages, userMessage],
46
- }),
47
- });
32
+ const provider = useMemo(() => createProvider(providerConfig), [providerConfig]);
48
33
 
49
- const data = await response.json();
50
- const assistantMessage: Message = {
51
- role: 'assistant',
52
- content: data.content[0].text,
53
- };
54
-
55
- setMessages(prev => [...prev, assistantMessage]);
56
- } catch (err) {
57
- setError('Failed to send message');
58
- } finally {
34
+ const abort = useCallback(() => {
35
+ abortControllerRef.current?.abort();
36
+ abortControllerRef.current = null;
37
+ if (isMountedRef.current) {
59
38
  setIsLoading(false);
60
39
  }
61
- }, [messages, options]);
40
+ }, []);
41
+
42
+ const clearMessages = useCallback(() => {
43
+ setMessages([]);
44
+ setError(null);
45
+ }, []);
46
+
47
+ const sendMessage = useCallback(
48
+ async (content: string) => {
49
+ if (!content.trim()) {
50
+ setError('Message cannot be empty');
51
+ return;
52
+ }
53
+
54
+ setError(null);
55
+ const userMessage: Message = {
56
+ role: 'user',
57
+ content: content.trim(),
58
+ timestamp: Date.now(),
59
+ };
60
+
61
+ setMessages((prev: Message[]) => [...prev, userMessage]);
62
+ setIsLoading(true);
63
+
64
+ try {
65
+ const aiResponse = await provider.makeRequest({
66
+ prompt: content,
67
+ options: {
68
+ system: options.system,
69
+ temperature: options.temperature,
70
+ maxTokens: options.maxTokens,
71
+ },
72
+ context: messages.map((msg: Message) => ({
73
+ role: msg.role,
74
+ content: msg.content,
75
+ })),
76
+ });
77
+
78
+ const assistantMessage: Message = {
79
+ role: 'assistant',
80
+ content: aiResponse.text,
81
+ timestamp: Date.now(),
82
+ };
83
+
84
+ if (isMountedRef.current) {
85
+ setMessages((prev: Message[]) => [...prev, assistantMessage]);
86
+ }
87
+ } catch (err) {
88
+ if (isMountedRef.current) {
89
+ const message = err instanceof Error ? err.message : 'Failed to send message';
90
+ setError(message);
91
+ }
92
+ } finally {
93
+ if (isMountedRef.current) {
94
+ setIsLoading(false);
95
+ }
96
+ }
97
+ },
98
+ [provider, messages, options],
99
+ );
62
100
 
63
- const clearMessages = useCallback(() => setMessages([]), []);
101
+ // Cleanup on unmount
102
+ useState(() => {
103
+ isMountedRef.current = true;
104
+ return () => {
105
+ isMountedRef.current = false;
106
+ abortControllerRef.current?.abort();
107
+ };
108
+ }, []);
64
109
 
65
- return { messages, isLoading, error, sendMessage, clearMessages };
110
+ return {
111
+ messages,
112
+ isLoading,
113
+ error,
114
+ sendMessage,
115
+ abort,
116
+ clearMessages,
117
+ };
66
118
  }
@@ -0,0 +1,206 @@
1
+ import { useCallback, useRef, useState } from 'react';
2
+
3
+ interface UseAICodeOptions {
4
+ apiKey: string;
5
+ model?: string;
6
+ system?: string;
7
+ maxTokens?: number;
8
+ temperature?: number;
9
+ defaultLanguage?: string;
10
+ }
11
+
12
+ interface GenerateCodeInput {
13
+ prompt: string;
14
+ language?: string;
15
+ }
16
+
17
+ interface ExplainCodeInput {
18
+ code: string;
19
+ language?: string;
20
+ focus?: string;
21
+ }
22
+
23
+ interface UseAICodeReturn {
24
+ language: string;
25
+ generatedCode: string;
26
+ explanation: string;
27
+ isLoading: boolean;
28
+ error: string | null;
29
+ setLanguage: (language: string) => void;
30
+ generateCode: (input: GenerateCodeInput) => Promise<string | null>;
31
+ explainCode: (input: ExplainCodeInput) => Promise<string | null>;
32
+ clearCodeState: () => void;
33
+ }
34
+
35
+ interface ClaudeTextBlock {
36
+ type?: string;
37
+ text?: string;
38
+ }
39
+
40
+ interface ClaudeApiResult {
41
+ content?: ClaudeTextBlock[];
42
+ error?: {
43
+ message?: string;
44
+ };
45
+ }
46
+
47
+ function getClaudeTextContent(data: unknown): string {
48
+ const content = (data as ClaudeApiResult)?.content;
49
+ if (!Array.isArray(content)) {
50
+ return '';
51
+ }
52
+
53
+ return content
54
+ .filter(item => item?.type === 'text' && typeof item.text === 'string')
55
+ .map(item => item.text as string)
56
+ .join('\n')
57
+ .trim();
58
+ }
59
+
60
+ export function useAICode(options: UseAICodeOptions): UseAICodeReturn {
61
+ const [language, setLanguage] = useState(options.defaultLanguage || 'typescript');
62
+ const [generatedCode, setGeneratedCode] = useState('');
63
+ const [explanation, setExplanation] = useState('');
64
+ const [isLoading, setIsLoading] = useState(false);
65
+ const [error, setError] = useState<string | null>(null);
66
+
67
+ const isMountedRef = useRef(true);
68
+
69
+ const clearCodeState = useCallback(() => {
70
+ setGeneratedCode('');
71
+ setExplanation('');
72
+ setError(null);
73
+ }, []);
74
+
75
+ const sendClaudeRequest = useCallback(
76
+ async (prompt: string) => {
77
+ const apiResponse = await fetch('https://api.anthropic.com/v1/messages', {
78
+ method: 'POST',
79
+ headers: {
80
+ 'Content-Type': 'application/json',
81
+ 'x-api-key': options.apiKey,
82
+ 'anthropic-version': '2023-06-01',
83
+ },
84
+ body: JSON.stringify({
85
+ model: options.model || 'claude-sonnet-4-20250514',
86
+ max_tokens: options.maxTokens ?? 1800,
87
+ temperature: options.temperature ?? 0.2,
88
+ system:
89
+ options.system ||
90
+ 'You are an expert software engineer. Produce practical, correct code and clear explanations.',
91
+ messages: [{ role: 'user', content: prompt }],
92
+ }),
93
+ });
94
+
95
+ const data = (await apiResponse.json()) as ClaudeApiResult;
96
+ if (!apiResponse.ok) {
97
+ throw new Error(data?.error?.message || `Claude API error: ${apiResponse.status}`);
98
+ }
99
+
100
+ const text = getClaudeTextContent(data);
101
+ if (!text) {
102
+ throw new Error('No content returned by Claude API.');
103
+ }
104
+
105
+ return text;
106
+ },
107
+ [options.apiKey, options.maxTokens, options.model, options.system, options.temperature],
108
+ );
109
+
110
+ const generateCode = useCallback(
111
+ async (input: GenerateCodeInput) => {
112
+ const taskPrompt = input.prompt.trim();
113
+ const selectedLanguage = (input.language || language).trim();
114
+
115
+ if (!taskPrompt) {
116
+ setError('No code generation prompt provided.');
117
+ return null;
118
+ }
119
+
120
+ if (!options.apiKey) {
121
+ setError('Missing Claude API key.');
122
+ return null;
123
+ }
124
+
125
+ setIsLoading(true);
126
+ setError(null);
127
+ setLanguage(selectedLanguage);
128
+
129
+ try {
130
+ const prompt = [
131
+ `Generate ${selectedLanguage} code for the following request:`,
132
+ taskPrompt,
133
+ 'Return runnable code and include brief usage notes only when necessary.',
134
+ ].join('\n');
135
+
136
+ const result = await sendClaudeRequest(prompt);
137
+ setGeneratedCode(result);
138
+ return result;
139
+ } catch (err) {
140
+ const message = (err as Error).message || 'Failed to generate code';
141
+ setError(message);
142
+ return null;
143
+ } finally {
144
+ if (isMountedRef.current) {
145
+ setIsLoading(false);
146
+ }
147
+ }
148
+ },
149
+ [language, options.apiKey, sendClaudeRequest],
150
+ );
151
+
152
+ const explainCode = useCallback(
153
+ async (input: ExplainCodeInput) => {
154
+ const code = input.code.trim();
155
+ const selectedLanguage = (input.language || language).trim();
156
+
157
+ if (!code) {
158
+ setError('No code provided for explanation.');
159
+ return null;
160
+ }
161
+
162
+ if (!options.apiKey) {
163
+ setError('Missing Claude API key.');
164
+ return null;
165
+ }
166
+
167
+ setIsLoading(true);
168
+ setError(null);
169
+ setLanguage(selectedLanguage);
170
+
171
+ try {
172
+ const prompt = [
173
+ `Explain the following ${selectedLanguage} code.`,
174
+ input.focus ? `Focus: ${input.focus}` : 'Focus: logic, structure, and potential pitfalls.',
175
+ 'Code:',
176
+ code,
177
+ ].join('\n');
178
+
179
+ const result = await sendClaudeRequest(prompt);
180
+ setExplanation(result);
181
+ return result;
182
+ } catch (err) {
183
+ const message = (err as Error).message || 'Failed to explain code';
184
+ setError(message);
185
+ return null;
186
+ } finally {
187
+ if (isMountedRef.current) {
188
+ setIsLoading(false);
189
+ }
190
+ }
191
+ },
192
+ [language, options.apiKey, sendClaudeRequest],
193
+ );
194
+
195
+ return {
196
+ language,
197
+ generatedCode,
198
+ explanation,
199
+ isLoading,
200
+ error,
201
+ setLanguage,
202
+ generateCode,
203
+ explainCode,
204
+ clearCodeState,
205
+ };
206
+ }