react-native-ai-hooks 0.2.0 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/ARCHITECTURE_GUIDE.md +467 -0
- package/IMPLEMENTATION_COMPLETE.md +349 -0
- package/README.md +10 -0
- package/TECHNICAL_SPECIFICATION.md +748 -0
- package/example/App.tsx +95 -0
- package/example/README.md +27 -0
- package/example/index.js +5 -0
- package/example/package.json +22 -0
- package/example/src/components/ProviderPicker.tsx +62 -0
- package/example/src/context/APIKeysContext.tsx +96 -0
- package/example/src/screens/ChatScreen.tsx +205 -0
- package/example/src/screens/SettingsScreen.tsx +124 -0
- package/example/tsconfig.json +7 -0
- package/package.json +1 -1
- package/src/ARCHITECTURE.md +301 -0
- package/src/hooks/useAIChat.ts +103 -51
- package/src/hooks/useAICode.ts +206 -0
- package/src/hooks/useAIForm.ts +84 -202
- package/src/hooks/useAIStream.ts +104 -57
- package/src/hooks/useAISummarize.ts +158 -0
- package/src/hooks/useAITranslate.ts +207 -0
- package/src/hooks/useImageAnalysis.ts +126 -79
- package/src/index.ts +28 -1
- package/src/types/index.ts +178 -4
- package/src/utils/fetchWithRetry.ts +98 -0
- package/src/utils/index.ts +8 -0
- package/src/utils/providerFactory.ts +265 -0
package/src/types/index.ts
CHANGED
|
@@ -1,15 +1,189 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Core Message Types
|
|
3
|
+
*/
|
|
1
4
|
export interface Message {
|
|
2
5
|
role: 'user' | 'assistant';
|
|
3
6
|
content: string;
|
|
7
|
+
timestamp?: number;
|
|
4
8
|
}
|
|
5
9
|
|
|
6
|
-
|
|
10
|
+
/**
|
|
11
|
+
* AI Provider Types
|
|
12
|
+
*/
|
|
13
|
+
export type AIProviderType = 'anthropic' | 'openai' | 'gemini';
|
|
14
|
+
|
|
15
|
+
export interface ProviderConfig {
|
|
16
|
+
provider: AIProviderType;
|
|
7
17
|
apiKey: string;
|
|
8
|
-
|
|
9
|
-
|
|
18
|
+
model: string;
|
|
19
|
+
baseUrl?: string;
|
|
20
|
+
timeout?: number;
|
|
21
|
+
maxRetries?: number;
|
|
10
22
|
}
|
|
11
23
|
|
|
24
|
+
/**
|
|
25
|
+
* Standardized API Response
|
|
26
|
+
*/
|
|
12
27
|
export interface AIResponse {
|
|
13
|
-
|
|
28
|
+
text: string;
|
|
29
|
+
raw: Record<string, unknown>;
|
|
30
|
+
usage?: {
|
|
31
|
+
inputTokens?: number;
|
|
32
|
+
outputTokens?: number;
|
|
33
|
+
totalTokens?: number;
|
|
34
|
+
};
|
|
14
35
|
error?: string;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
* API Request Options
|
|
40
|
+
*/
|
|
41
|
+
export interface AIRequestOptions {
|
|
42
|
+
system?: string;
|
|
43
|
+
temperature?: number;
|
|
44
|
+
maxTokens?: number;
|
|
45
|
+
topP?: number;
|
|
46
|
+
stopSequences?: string[];
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Hook Options Interface
|
|
51
|
+
*/
|
|
52
|
+
export interface UseAIChatOptions {
|
|
53
|
+
apiKey: string;
|
|
54
|
+
provider?: AIProviderType;
|
|
55
|
+
model?: string;
|
|
56
|
+
system?: string;
|
|
57
|
+
temperature?: number;
|
|
58
|
+
maxTokens?: number;
|
|
59
|
+
baseUrl?: string;
|
|
60
|
+
timeout?: number;
|
|
61
|
+
maxRetries?: number;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
export interface UseAIStreamOptions extends UseAIChatOptions {}
|
|
65
|
+
|
|
66
|
+
export interface UseAIFormOptions {
|
|
67
|
+
apiKey: string;
|
|
68
|
+
provider?: AIProviderType;
|
|
69
|
+
model?: string;
|
|
70
|
+
system?: string;
|
|
71
|
+
temperature?: number;
|
|
72
|
+
maxTokens?: number;
|
|
73
|
+
baseUrl?: string;
|
|
74
|
+
timeout?: number;
|
|
75
|
+
maxRetries?: number;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
export interface UseImageAnalysisOptions {
|
|
79
|
+
apiKey: string;
|
|
80
|
+
provider?: AIProviderType;
|
|
81
|
+
model?: string;
|
|
82
|
+
system?: string;
|
|
83
|
+
maxTokens?: number;
|
|
84
|
+
baseUrl?: string;
|
|
85
|
+
timeout?: number;
|
|
86
|
+
maxRetries?: number;
|
|
87
|
+
uriToBase64?: (uri: string) => Promise<string>;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Normalized Internal API Structures
|
|
92
|
+
*/
|
|
93
|
+
export interface NormalizedMessage {
|
|
94
|
+
role: 'user' | 'assistant';
|
|
95
|
+
content: NormalizedContent;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
export type NormalizedContent =
|
|
99
|
+
| { type: 'text'; text: string }
|
|
100
|
+
| { type: 'image'; source: { type: 'base64'; media_type: string; data: string } }
|
|
101
|
+
| Array<NormalizedContent>;
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Provider-Specific Raw Responses
|
|
105
|
+
*/
|
|
106
|
+
export interface AnthropicResponse {
|
|
107
|
+
content?: Array<{ type: string; text?: string }>;
|
|
108
|
+
error?: { type: string; message?: string };
|
|
109
|
+
usage?: {
|
|
110
|
+
input_tokens?: number;
|
|
111
|
+
output_tokens?: number;
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
export interface OpenAIResponse {
|
|
116
|
+
choices?: Array<{ message?: { content?: string } }>;
|
|
117
|
+
error?: { message?: string };
|
|
118
|
+
usage?: {
|
|
119
|
+
prompt_tokens?: number;
|
|
120
|
+
completion_tokens?: number;
|
|
121
|
+
total_tokens?: number;
|
|
122
|
+
};
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
export interface GeminiResponse {
|
|
126
|
+
candidates?: Array<{
|
|
127
|
+
content?: {
|
|
128
|
+
parts?: Array<{ text?: string }>;
|
|
129
|
+
};
|
|
130
|
+
}>;
|
|
131
|
+
error?: { message?: string };
|
|
132
|
+
usageMetadata?: {
|
|
133
|
+
promptTokenCount?: number;
|
|
134
|
+
candidatesTokenCount?: number;
|
|
135
|
+
totalTokenCount?: number;
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Form Validation Input/Output
|
|
141
|
+
*/
|
|
142
|
+
export interface FormValidationRequest {
|
|
143
|
+
formData: Record<string, unknown>;
|
|
144
|
+
validationSchema?: Record<string, string>;
|
|
145
|
+
customInstructions?: string;
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
export interface FormValidationResult {
|
|
149
|
+
isValid: boolean;
|
|
150
|
+
errors: Record<string, string>;
|
|
151
|
+
raw: unknown;
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
/**
|
|
155
|
+
* Generic Hook Return Types
|
|
156
|
+
*/
|
|
157
|
+
export interface UseAIChatReturn {
|
|
158
|
+
messages: Message[];
|
|
159
|
+
isLoading: boolean;
|
|
160
|
+
error: string | null;
|
|
161
|
+
sendMessage: (content: string) => Promise<void>;
|
|
162
|
+
abort: () => void;
|
|
163
|
+
clearMessages: () => void;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
export interface UseAIStreamReturn {
|
|
167
|
+
response: string;
|
|
168
|
+
isLoading: boolean;
|
|
169
|
+
error: string | null;
|
|
170
|
+
streamResponse: (prompt: string) => Promise<void>;
|
|
171
|
+
abort: () => void;
|
|
172
|
+
clearResponse: () => void;
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
export interface UseImageAnalysisReturn {
|
|
176
|
+
description: string;
|
|
177
|
+
isLoading: boolean;
|
|
178
|
+
error: string | null;
|
|
179
|
+
analyzeImage: (uri: string, prompt?: string) => Promise<string | null>;
|
|
180
|
+
clearDescription: () => void;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
export interface UseAIFormReturn {
|
|
184
|
+
validationResult: FormValidationResult | null;
|
|
185
|
+
isLoading: boolean;
|
|
186
|
+
error: string | null;
|
|
187
|
+
validateForm: (input: FormValidationRequest) => Promise<FormValidationResult | null>;
|
|
188
|
+
clearValidation: () => void;
|
|
15
189
|
}
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Fetch with exponential backoff, timeout, and rate-limit handling
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
interface FetchWithRetryOptions {
|
|
6
|
+
maxRetries?: number;
|
|
7
|
+
baseDelay?: number;
|
|
8
|
+
maxDelay?: number;
|
|
9
|
+
timeout?: number;
|
|
10
|
+
backoffMultiplier?: number;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
interface RetryableError extends Error {
|
|
14
|
+
statusCode?: number;
|
|
15
|
+
isRetryable?: boolean;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export async function fetchWithRetry(
|
|
19
|
+
url: string,
|
|
20
|
+
init?: RequestInit,
|
|
21
|
+
options?: FetchWithRetryOptions,
|
|
22
|
+
): Promise<Response> {
|
|
23
|
+
const {
|
|
24
|
+
maxRetries = 3,
|
|
25
|
+
baseDelay = 1000,
|
|
26
|
+
maxDelay = 10000,
|
|
27
|
+
timeout = 30000,
|
|
28
|
+
backoffMultiplier = 2,
|
|
29
|
+
} = options || {};
|
|
30
|
+
|
|
31
|
+
let lastError: RetryableError | undefined;
|
|
32
|
+
|
|
33
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
34
|
+
try {
|
|
35
|
+
const controller = new AbortController();
|
|
36
|
+
const timeoutId = setTimeout(() => controller.abort(), timeout);
|
|
37
|
+
|
|
38
|
+
try {
|
|
39
|
+
const response = await fetch(url, {
|
|
40
|
+
...init,
|
|
41
|
+
signal: controller.signal,
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
clearTimeout(timeoutId);
|
|
45
|
+
|
|
46
|
+
// Handle rate limiting
|
|
47
|
+
if (response.status === 429) {
|
|
48
|
+
const retryAfter = response.headers.get('Retry-After');
|
|
49
|
+
const delay = retryAfter ? parseInt(retryAfter, 10) * 1000 : Math.min(baseDelay * Math.pow(backoffMultiplier, attempt), maxDelay);
|
|
50
|
+
|
|
51
|
+
if (attempt < maxRetries) {
|
|
52
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
53
|
+
continue;
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Handle server errors (5xx) with retry
|
|
58
|
+
if (response.status >= 500 && attempt < maxRetries) {
|
|
59
|
+
const delay = Math.min(baseDelay * Math.pow(backoffMultiplier, attempt), maxDelay);
|
|
60
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
61
|
+
continue;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
return response;
|
|
65
|
+
} finally {
|
|
66
|
+
clearTimeout(timeoutId);
|
|
67
|
+
}
|
|
68
|
+
} catch (err) {
|
|
69
|
+
const error = err as RetryableError;
|
|
70
|
+
|
|
71
|
+
// Handle timeout
|
|
72
|
+
if (error.name === 'AbortError') {
|
|
73
|
+
error.isRetryable = attempt < maxRetries;
|
|
74
|
+
|
|
75
|
+
if (attempt < maxRetries) {
|
|
76
|
+
const delay = Math.min(baseDelay * Math.pow(backoffMultiplier, attempt), maxDelay);
|
|
77
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
78
|
+
continue;
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
lastError = error;
|
|
83
|
+
|
|
84
|
+
// Non-retryable errors should throw immediately
|
|
85
|
+
if (!error.isRetryable && attempt < maxRetries) {
|
|
86
|
+
const delay = Math.min(baseDelay * Math.pow(backoffMultiplier, attempt), maxDelay);
|
|
87
|
+
await new Promise(resolve => setTimeout(resolve, delay));
|
|
88
|
+
continue;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
if (attempt === maxRetries) {
|
|
92
|
+
throw error;
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
throw lastError || new Error('Fetch failed after retries');
|
|
98
|
+
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Public utility exports for advanced use cases
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
export { createProvider, ProviderFactory } from './providerFactory';
|
|
6
|
+
export { fetchWithRetry } from './fetchWithRetry';
|
|
7
|
+
|
|
8
|
+
export type { ProviderConfig, AIResponse, AIRequestOptions } from '../types';
|
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Unified Provider Factory for handling multiple AI providers
|
|
3
|
+
* Normalizes responses across Anthropic, OpenAI, and Gemini
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import type {
|
|
7
|
+
AIResponse,
|
|
8
|
+
AIRequestOptions,
|
|
9
|
+
AnthropicResponse,
|
|
10
|
+
OpenAIResponse,
|
|
11
|
+
GeminiResponse,
|
|
12
|
+
ProviderConfig,
|
|
13
|
+
} from '../types';
|
|
14
|
+
import { fetchWithRetry } from './fetchWithRetry';
|
|
15
|
+
|
|
16
|
+
interface ProviderFactoryOptions extends ProviderConfig {
|
|
17
|
+
system?: string;
|
|
18
|
+
context?: Array<{ role: 'user' | 'assistant'; content: string }>;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
interface ProviderRequest {
|
|
22
|
+
prompt: string;
|
|
23
|
+
options?: AIRequestOptions;
|
|
24
|
+
context?: Array<{ role: 'user' | 'assistant'; content: string }>;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
export class ProviderFactory {
|
|
28
|
+
private config: ProviderConfig;
|
|
29
|
+
|
|
30
|
+
constructor(config: ProviderConfig) {
|
|
31
|
+
this.config = {
|
|
32
|
+
...config,
|
|
33
|
+
timeout: config.timeout || 30000,
|
|
34
|
+
maxRetries: config.maxRetries || 3,
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
private getBaseUrl(): string {
|
|
39
|
+
if (this.config.baseUrl) {
|
|
40
|
+
return this.config.baseUrl;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
switch (this.config.provider) {
|
|
44
|
+
case 'anthropic':
|
|
45
|
+
return 'https://api.anthropic.com';
|
|
46
|
+
case 'openai':
|
|
47
|
+
return 'https://api.openai.com';
|
|
48
|
+
case 'gemini':
|
|
49
|
+
return 'https://generativelanguage.googleapis.com';
|
|
50
|
+
default:
|
|
51
|
+
throw new Error(`Unknown provider: ${this.config.provider}`);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
async makeRequest(request: ProviderRequest): Promise<AIResponse> {
|
|
56
|
+
switch (this.config.provider) {
|
|
57
|
+
case 'anthropic':
|
|
58
|
+
return this.makeAnthropicRequest(request);
|
|
59
|
+
case 'openai':
|
|
60
|
+
return this.makeOpenAIRequest(request);
|
|
61
|
+
case 'gemini':
|
|
62
|
+
return this.makeGeminiRequest(request);
|
|
63
|
+
default:
|
|
64
|
+
throw new Error(`Unknown provider: ${this.config.provider}`);
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
private async makeAnthropicRequest(request: ProviderRequest): Promise<AIResponse> {
|
|
69
|
+
const baseUrl = this.getBaseUrl();
|
|
70
|
+
const url = `${baseUrl}/v1/messages`;
|
|
71
|
+
|
|
72
|
+
const body = {
|
|
73
|
+
model: this.config.model,
|
|
74
|
+
max_tokens: request.options?.maxTokens || 1024,
|
|
75
|
+
temperature: request.options?.temperature ?? 0.7,
|
|
76
|
+
system: request.options?.system,
|
|
77
|
+
messages: this.buildAnthropicMessages(request),
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
const response = await fetchWithRetry(
|
|
81
|
+
url,
|
|
82
|
+
{
|
|
83
|
+
method: 'POST',
|
|
84
|
+
headers: {
|
|
85
|
+
'Content-Type': 'application/json',
|
|
86
|
+
'x-api-key': this.config.apiKey,
|
|
87
|
+
'anthropic-version': '2023-06-01',
|
|
88
|
+
},
|
|
89
|
+
body: JSON.stringify(body),
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
timeout: this.config.timeout,
|
|
93
|
+
maxRetries: this.config.maxRetries,
|
|
94
|
+
},
|
|
95
|
+
);
|
|
96
|
+
|
|
97
|
+
const data = (await response.json()) as AnthropicResponse;
|
|
98
|
+
|
|
99
|
+
if (!response.ok) {
|
|
100
|
+
throw new Error(data?.error?.message || `Anthropic API error: ${response.status}`);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
return this.normalizeAnthropicResponse(data);
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
private async makeOpenAIRequest(request: ProviderRequest): Promise<AIResponse> {
|
|
107
|
+
const baseUrl = this.getBaseUrl();
|
|
108
|
+
const url = `${baseUrl}/v1/chat/completions`;
|
|
109
|
+
|
|
110
|
+
const body = {
|
|
111
|
+
model: this.config.model,
|
|
112
|
+
max_tokens: request.options?.maxTokens || 1024,
|
|
113
|
+
temperature: request.options?.temperature ?? 0.7,
|
|
114
|
+
system: request.options?.system,
|
|
115
|
+
messages: this.buildOpenAIMessages(request),
|
|
116
|
+
};
|
|
117
|
+
|
|
118
|
+
const response = await fetchWithRetry(
|
|
119
|
+
url,
|
|
120
|
+
{
|
|
121
|
+
method: 'POST',
|
|
122
|
+
headers: {
|
|
123
|
+
'Content-Type': 'application/json',
|
|
124
|
+
Authorization: `Bearer ${this.config.apiKey}`,
|
|
125
|
+
},
|
|
126
|
+
body: JSON.stringify(body),
|
|
127
|
+
},
|
|
128
|
+
{
|
|
129
|
+
timeout: this.config.timeout,
|
|
130
|
+
maxRetries: this.config.maxRetries,
|
|
131
|
+
},
|
|
132
|
+
);
|
|
133
|
+
|
|
134
|
+
const data = (await response.json()) as OpenAIResponse;
|
|
135
|
+
|
|
136
|
+
if (!response.ok) {
|
|
137
|
+
throw new Error(data?.error?.message || `OpenAI API error: ${response.status}`);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
return this.normalizeOpenAIResponse(data);
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
private async makeGeminiRequest(request: ProviderRequest): Promise<AIResponse> {
|
|
144
|
+
const baseUrl = this.getBaseUrl();
|
|
145
|
+
const url = `${baseUrl}/v1beta/models/${this.config.model}:generateContent?key=${this.config.apiKey}`;
|
|
146
|
+
|
|
147
|
+
const body = {
|
|
148
|
+
contents: this.buildGeminiMessages(request),
|
|
149
|
+
generationConfig: {
|
|
150
|
+
maxOutputTokens: request.options?.maxTokens || 1024,
|
|
151
|
+
temperature: request.options?.temperature ?? 0.7,
|
|
152
|
+
},
|
|
153
|
+
};
|
|
154
|
+
|
|
155
|
+
const response = await fetchWithRetry(
|
|
156
|
+
url,
|
|
157
|
+
{
|
|
158
|
+
method: 'POST',
|
|
159
|
+
headers: {
|
|
160
|
+
'Content-Type': 'application/json',
|
|
161
|
+
},
|
|
162
|
+
body: JSON.stringify(body),
|
|
163
|
+
},
|
|
164
|
+
{
|
|
165
|
+
timeout: this.config.timeout,
|
|
166
|
+
maxRetries: this.config.maxRetries,
|
|
167
|
+
},
|
|
168
|
+
);
|
|
169
|
+
|
|
170
|
+
const data = (await response.json()) as GeminiResponse;
|
|
171
|
+
|
|
172
|
+
if (!response.ok) {
|
|
173
|
+
throw new Error(data?.error?.message || `Gemini API error: ${response.status}`);
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
return this.normalizeGeminiResponse(data);
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
private buildAnthropicMessages(
|
|
180
|
+
request: ProviderRequest,
|
|
181
|
+
): Array<{ role: 'user' | 'assistant'; content: string }> {
|
|
182
|
+
const messages = request.context || [];
|
|
183
|
+
return [...messages, { role: 'user', content: request.prompt }];
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
private buildOpenAIMessages(
|
|
187
|
+
request: ProviderRequest,
|
|
188
|
+
): Array<{ role: 'user' | 'assistant'; content: string }> {
|
|
189
|
+
const messages = request.context || [];
|
|
190
|
+
return [...messages, { role: 'user', content: request.prompt }];
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
private buildGeminiMessages(request: ProviderRequest): Array<{ role: string; parts: Array<{ text: string }> }> {
|
|
194
|
+
const messages = request.context || [];
|
|
195
|
+
|
|
196
|
+
return messages
|
|
197
|
+
.map(msg => ({
|
|
198
|
+
role: msg.role === 'user' ? 'user' : 'model',
|
|
199
|
+
parts: [{ text: msg.content }],
|
|
200
|
+
}))
|
|
201
|
+
.concat({
|
|
202
|
+
role: 'user',
|
|
203
|
+
parts: [{ text: request.prompt }],
|
|
204
|
+
});
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
private normalizeAnthropicResponse(data: AnthropicResponse): AIResponse {
|
|
208
|
+
const textContent = data.content?.find(block => block.type === 'text');
|
|
209
|
+
const text = (textContent?.type === 'text' && 'text' in textContent ? (textContent as any).text : '') || '';
|
|
210
|
+
|
|
211
|
+
if (!text) {
|
|
212
|
+
throw new Error('No text content returned by Anthropic API');
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
return {
|
|
216
|
+
text,
|
|
217
|
+
raw: data,
|
|
218
|
+
usage: {
|
|
219
|
+
inputTokens: data.usage?.input_tokens,
|
|
220
|
+
outputTokens: data.usage?.output_tokens,
|
|
221
|
+
totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0),
|
|
222
|
+
},
|
|
223
|
+
};
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
private normalizeOpenAIResponse(data: OpenAIResponse): AIResponse {
|
|
227
|
+
const text = data.choices?.[0]?.message?.content || '';
|
|
228
|
+
|
|
229
|
+
if (!text) {
|
|
230
|
+
throw new Error('No text content returned by OpenAI API');
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
return {
|
|
234
|
+
text,
|
|
235
|
+
raw: data,
|
|
236
|
+
usage: {
|
|
237
|
+
inputTokens: data.usage?.prompt_tokens,
|
|
238
|
+
outputTokens: data.usage?.completion_tokens,
|
|
239
|
+
totalTokens: data.usage?.total_tokens,
|
|
240
|
+
},
|
|
241
|
+
};
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
private normalizeGeminiResponse(data: GeminiResponse): AIResponse {
|
|
245
|
+
const text = data.candidates?.[0]?.content?.parts?.[0]?.text || '';
|
|
246
|
+
|
|
247
|
+
if (!text) {
|
|
248
|
+
throw new Error('No text content returned by Gemini API');
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
return {
|
|
252
|
+
text,
|
|
253
|
+
raw: data,
|
|
254
|
+
usage: {
|
|
255
|
+
inputTokens: data.usageMetadata?.promptTokenCount,
|
|
256
|
+
outputTokens: data.usageMetadata?.candidatesTokenCount,
|
|
257
|
+
totalTokens: data.usageMetadata?.totalTokenCount,
|
|
258
|
+
},
|
|
259
|
+
};
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
export function createProvider(config: ProviderConfig): ProviderFactory {
|
|
264
|
+
return new ProviderFactory(config);
|
|
265
|
+
}
|