claude-flow 2.0.0-alpha.65 → 2.0.0-alpha.67
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/cache/agent-pool.json +33 -0
- package/.claude/cache/memory-optimization.json +19 -0
- package/.claude/cache/neural-optimization.json +25 -0
- package/.claude/cache/optimized-hooks.json +19 -0
- package/.claude/cache/parallel-processing.json +26 -0
- package/.claude/optimized-settings.json +270 -0
- package/.claude/settings-enhanced.json +278 -0
- package/.claude/settings.json +105 -8
- package/CHANGELOG.md +40 -0
- package/bin/claude-flow +1 -1
- package/dist/cli/simple-commands/hive-mind.js +1 -1
- package/dist/cli/simple-commands/hive-mind.js.map +1 -1
- package/dist/cli/simple-commands/hooks.js +6 -4
- package/dist/cli/simple-commands/hooks.js.map +1 -1
- package/dist/providers/anthropic-provider.d.ts +27 -0
- package/dist/providers/anthropic-provider.d.ts.map +1 -0
- package/dist/providers/anthropic-provider.js +247 -0
- package/dist/providers/anthropic-provider.js.map +1 -0
- package/dist/providers/base-provider.d.ts +134 -0
- package/dist/providers/base-provider.d.ts.map +1 -0
- package/dist/providers/base-provider.js +407 -0
- package/dist/providers/base-provider.js.map +1 -0
- package/dist/providers/cohere-provider.d.ts +28 -0
- package/dist/providers/cohere-provider.d.ts.map +1 -0
- package/dist/providers/cohere-provider.js +407 -0
- package/dist/providers/cohere-provider.js.map +1 -0
- package/dist/providers/google-provider.d.ts +23 -0
- package/dist/providers/google-provider.d.ts.map +1 -0
- package/dist/providers/google-provider.js +362 -0
- package/dist/providers/google-provider.js.map +1 -0
- package/dist/providers/index.d.ts +14 -0
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/index.js +18 -0
- package/dist/providers/index.js.map +1 -0
- package/dist/providers/ollama-provider.d.ts +23 -0
- package/dist/providers/ollama-provider.d.ts.map +1 -0
- package/dist/providers/ollama-provider.js +374 -0
- package/dist/providers/ollama-provider.js.map +1 -0
- package/dist/providers/openai-provider.d.ts +23 -0
- package/dist/providers/openai-provider.d.ts.map +1 -0
- package/dist/providers/openai-provider.js +349 -0
- package/dist/providers/openai-provider.js.map +1 -0
- package/dist/providers/provider-manager.d.ts +139 -0
- package/dist/providers/provider-manager.d.ts.map +1 -0
- package/dist/providers/provider-manager.js +513 -0
- package/dist/providers/provider-manager.js.map +1 -0
- package/dist/providers/types.d.ts +356 -0
- package/dist/providers/types.d.ts.map +1 -0
- package/dist/providers/types.js +61 -0
- package/dist/providers/types.js.map +1 -0
- package/dist/providers/utils.d.ts +37 -0
- package/dist/providers/utils.d.ts.map +1 -0
- package/dist/providers/utils.js +322 -0
- package/dist/providers/utils.js.map +1 -0
- package/dist/services/agentic-flow-hooks/hook-manager.d.ts +70 -0
- package/dist/services/agentic-flow-hooks/hook-manager.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/hook-manager.js +512 -0
- package/dist/services/agentic-flow-hooks/hook-manager.js.map +1 -0
- package/dist/services/agentic-flow-hooks/index.d.ts +36 -0
- package/dist/services/agentic-flow-hooks/index.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/index.js +325 -0
- package/dist/services/agentic-flow-hooks/index.js.map +1 -0
- package/dist/services/agentic-flow-hooks/llm-hooks.d.ts +33 -0
- package/dist/services/agentic-flow-hooks/llm-hooks.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/llm-hooks.js +415 -0
- package/dist/services/agentic-flow-hooks/llm-hooks.js.map +1 -0
- package/dist/services/agentic-flow-hooks/memory-hooks.d.ts +45 -0
- package/dist/services/agentic-flow-hooks/memory-hooks.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/memory-hooks.js +532 -0
- package/dist/services/agentic-flow-hooks/memory-hooks.js.map +1 -0
- package/dist/services/agentic-flow-hooks/neural-hooks.d.ts +39 -0
- package/dist/services/agentic-flow-hooks/neural-hooks.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/neural-hooks.js +561 -0
- package/dist/services/agentic-flow-hooks/neural-hooks.js.map +1 -0
- package/dist/services/agentic-flow-hooks/performance-hooks.d.ts +33 -0
- package/dist/services/agentic-flow-hooks/performance-hooks.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/performance-hooks.js +621 -0
- package/dist/services/agentic-flow-hooks/performance-hooks.js.map +1 -0
- package/dist/services/agentic-flow-hooks/types.d.ts +379 -0
- package/dist/services/agentic-flow-hooks/types.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/types.js +8 -0
- package/dist/services/agentic-flow-hooks/types.js.map +1 -0
- package/dist/services/agentic-flow-hooks/workflow-hooks.d.ts +39 -0
- package/dist/services/agentic-flow-hooks/workflow-hooks.d.ts.map +1 -0
- package/dist/services/agentic-flow-hooks/workflow-hooks.js +742 -0
- package/dist/services/agentic-flow-hooks/workflow-hooks.js.map +1 -0
- package/package.json +2 -2
- package/scripts/install-arm64.js +78 -0
- package/scripts/optimize-performance.js +400 -0
- package/scripts/performance-monitor.js +263 -0
- package/src/cli/help-text.js +1 -1
- package/src/cli/simple-cli.js +1 -1
- package/src/cli/simple-commands/hive-mind.js +1 -1
- package/src/cli/simple-commands/hooks.js +8 -6
- package/src/providers/anthropic-provider.ts +282 -0
- package/src/providers/base-provider.ts +560 -0
- package/src/providers/cohere-provider.ts +521 -0
- package/src/providers/google-provider.ts +477 -0
- package/src/providers/index.ts +21 -0
- package/src/providers/ollama-provider.ts +489 -0
- package/src/providers/openai-provider.ts +476 -0
- package/src/providers/provider-manager.ts +654 -0
- package/src/providers/types.ts +531 -0
- package/src/providers/utils.ts +376 -0
- package/src/services/agentic-flow-hooks/hook-manager.ts +701 -0
- package/src/services/agentic-flow-hooks/index.ts +386 -0
- package/src/services/agentic-flow-hooks/llm-hooks.ts +557 -0
- package/src/services/agentic-flow-hooks/memory-hooks.ts +710 -0
- package/src/services/agentic-flow-hooks/neural-hooks.ts +758 -0
- package/src/services/agentic-flow-hooks/performance-hooks.ts +827 -0
- package/src/services/agentic-flow-hooks/types.ts +503 -0
- package/src/services/agentic-flow-hooks/workflow-hooks.ts +1026 -0
|
@@ -0,0 +1,477 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Google AI Provider Implementation
|
|
3
|
+
* Supports Gemini Pro, PaLM, and other Google models
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
import { BaseProvider } from './base-provider.js';
|
|
7
|
+
import {
|
|
8
|
+
LLMProvider,
|
|
9
|
+
LLMModel,
|
|
10
|
+
LLMRequest,
|
|
11
|
+
LLMResponse,
|
|
12
|
+
LLMStreamEvent,
|
|
13
|
+
ModelInfo,
|
|
14
|
+
ProviderCapabilities,
|
|
15
|
+
HealthCheckResult,
|
|
16
|
+
LLMProviderError,
|
|
17
|
+
RateLimitError,
|
|
18
|
+
AuthenticationError,
|
|
19
|
+
} from './types.js';
|
|
20
|
+
|
|
21
|
+
interface GoogleAIRequest {
|
|
22
|
+
contents: Array<{
|
|
23
|
+
role: 'user' | 'model';
|
|
24
|
+
parts: Array<{
|
|
25
|
+
text?: string;
|
|
26
|
+
inlineData?: {
|
|
27
|
+
mimeType: string;
|
|
28
|
+
data: string;
|
|
29
|
+
};
|
|
30
|
+
}>;
|
|
31
|
+
}>;
|
|
32
|
+
generationConfig?: {
|
|
33
|
+
temperature?: number;
|
|
34
|
+
topK?: number;
|
|
35
|
+
topP?: number;
|
|
36
|
+
maxOutputTokens?: number;
|
|
37
|
+
stopSequences?: string[];
|
|
38
|
+
};
|
|
39
|
+
safetySettings?: Array<{
|
|
40
|
+
category: string;
|
|
41
|
+
threshold: string;
|
|
42
|
+
}>;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
interface GoogleAIResponse {
|
|
46
|
+
candidates: Array<{
|
|
47
|
+
content: {
|
|
48
|
+
parts: Array<{
|
|
49
|
+
text: string;
|
|
50
|
+
}>;
|
|
51
|
+
role: string;
|
|
52
|
+
};
|
|
53
|
+
finishReason: string;
|
|
54
|
+
index: number;
|
|
55
|
+
safetyRatings: Array<{
|
|
56
|
+
category: string;
|
|
57
|
+
probability: string;
|
|
58
|
+
}>;
|
|
59
|
+
}>;
|
|
60
|
+
promptFeedback?: {
|
|
61
|
+
safetyRatings: Array<{
|
|
62
|
+
category: string;
|
|
63
|
+
probability: string;
|
|
64
|
+
}>;
|
|
65
|
+
};
|
|
66
|
+
usageMetadata?: {
|
|
67
|
+
promptTokenCount: number;
|
|
68
|
+
candidatesTokenCount: number;
|
|
69
|
+
totalTokenCount: number;
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
export class GoogleProvider extends BaseProvider {
|
|
74
|
+
readonly name: LLMProvider = 'google';
|
|
75
|
+
readonly capabilities: ProviderCapabilities = {
|
|
76
|
+
supportedModels: [
|
|
77
|
+
'gemini-pro',
|
|
78
|
+
'gemini-pro-vision',
|
|
79
|
+
'palm-2',
|
|
80
|
+
'bison',
|
|
81
|
+
],
|
|
82
|
+
maxContextLength: {
|
|
83
|
+
'gemini-pro': 32768,
|
|
84
|
+
'gemini-pro-vision': 16384,
|
|
85
|
+
'palm-2': 8192,
|
|
86
|
+
'bison': 4096,
|
|
87
|
+
} as Record<LLMModel, number>,
|
|
88
|
+
maxOutputTokens: {
|
|
89
|
+
'gemini-pro': 2048,
|
|
90
|
+
'gemini-pro-vision': 2048,
|
|
91
|
+
'palm-2': 1024,
|
|
92
|
+
'bison': 1024,
|
|
93
|
+
} as Record<LLMModel, number>,
|
|
94
|
+
supportsStreaming: true,
|
|
95
|
+
supportsFunctionCalling: true,
|
|
96
|
+
supportsSystemMessages: false, // Google AI doesn't have explicit system messages
|
|
97
|
+
supportsVision: true, // Gemini Pro Vision
|
|
98
|
+
supportsAudio: false,
|
|
99
|
+
supportsTools: true,
|
|
100
|
+
supportsFineTuning: false,
|
|
101
|
+
supportsEmbeddings: true,
|
|
102
|
+
supportsLogprobs: false,
|
|
103
|
+
supportsBatching: true,
|
|
104
|
+
rateLimit: {
|
|
105
|
+
requestsPerMinute: 60,
|
|
106
|
+
tokensPerMinute: 60000,
|
|
107
|
+
concurrentRequests: 10,
|
|
108
|
+
},
|
|
109
|
+
pricing: {
|
|
110
|
+
'gemini-pro': {
|
|
111
|
+
promptCostPer1k: 0.00025,
|
|
112
|
+
completionCostPer1k: 0.0005,
|
|
113
|
+
currency: 'USD',
|
|
114
|
+
},
|
|
115
|
+
'gemini-pro-vision': {
|
|
116
|
+
promptCostPer1k: 0.00025,
|
|
117
|
+
completionCostPer1k: 0.0005,
|
|
118
|
+
currency: 'USD',
|
|
119
|
+
},
|
|
120
|
+
'palm-2': {
|
|
121
|
+
promptCostPer1k: 0.0005,
|
|
122
|
+
completionCostPer1k: 0.001,
|
|
123
|
+
currency: 'USD',
|
|
124
|
+
},
|
|
125
|
+
'bison': {
|
|
126
|
+
promptCostPer1k: 0.0005,
|
|
127
|
+
completionCostPer1k: 0.001,
|
|
128
|
+
currency: 'USD',
|
|
129
|
+
},
|
|
130
|
+
},
|
|
131
|
+
};
|
|
132
|
+
|
|
133
|
+
private baseUrl: string;
|
|
134
|
+
|
|
135
|
+
protected async doInitialize(): Promise<void> {
|
|
136
|
+
if (!this.config.apiKey) {
|
|
137
|
+
throw new AuthenticationError('Google AI API key is required', 'google');
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
// Use Gemini API for newer models, PaLM API for older ones
|
|
141
|
+
const model = this.config.model;
|
|
142
|
+
if (model.startsWith('gemini')) {
|
|
143
|
+
this.baseUrl = 'https://generativelanguage.googleapis.com/v1beta';
|
|
144
|
+
} else {
|
|
145
|
+
this.baseUrl = 'https://generativelanguage.googleapis.com/v1beta2';
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
protected async doComplete(request: LLMRequest): Promise<LLMResponse> {
|
|
150
|
+
const googleRequest = this.buildGoogleRequest(request);
|
|
151
|
+
const model = this.mapToGoogleModel(request.model || this.config.model);
|
|
152
|
+
|
|
153
|
+
const url = `${this.baseUrl}/models/${model}:generateContent?key=${this.config.apiKey}`;
|
|
154
|
+
|
|
155
|
+
const controller = new AbortController();
|
|
156
|
+
const timeout = setTimeout(() => controller.abort(), this.config.timeout || 60000);
|
|
157
|
+
|
|
158
|
+
try {
|
|
159
|
+
const response = await fetch(url, {
|
|
160
|
+
method: 'POST',
|
|
161
|
+
headers: {
|
|
162
|
+
'Content-Type': 'application/json',
|
|
163
|
+
},
|
|
164
|
+
body: JSON.stringify(googleRequest),
|
|
165
|
+
signal: controller.signal,
|
|
166
|
+
});
|
|
167
|
+
|
|
168
|
+
clearTimeout(timeout);
|
|
169
|
+
|
|
170
|
+
if (!response.ok) {
|
|
171
|
+
await this.handleErrorResponse(response);
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
const data: GoogleAIResponse = await response.json();
|
|
175
|
+
|
|
176
|
+
if (!data.candidates || data.candidates.length === 0) {
|
|
177
|
+
throw new LLMProviderError(
|
|
178
|
+
'No response generated',
|
|
179
|
+
'NO_RESPONSE',
|
|
180
|
+
'google',
|
|
181
|
+
undefined,
|
|
182
|
+
false
|
|
183
|
+
);
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
const candidate = data.candidates[0];
|
|
187
|
+
const content = candidate.content.parts.map(part => part.text).join('');
|
|
188
|
+
|
|
189
|
+
// Calculate cost
|
|
190
|
+
const usageData = data.usageMetadata || {
|
|
191
|
+
promptTokenCount: this.estimateTokens(JSON.stringify(request.messages)),
|
|
192
|
+
candidatesTokenCount: this.estimateTokens(content),
|
|
193
|
+
totalTokenCount: 0,
|
|
194
|
+
};
|
|
195
|
+
usageData.totalTokenCount = usageData.promptTokenCount + usageData.candidatesTokenCount;
|
|
196
|
+
|
|
197
|
+
const pricing = this.capabilities.pricing![request.model || this.config.model];
|
|
198
|
+
const promptCost = (usageData.promptTokenCount / 1000) * pricing.promptCostPer1k;
|
|
199
|
+
const completionCost = (usageData.candidatesTokenCount / 1000) * pricing.completionCostPer1k;
|
|
200
|
+
|
|
201
|
+
return {
|
|
202
|
+
id: `google-${Date.now()}`,
|
|
203
|
+
model: request.model || this.config.model,
|
|
204
|
+
provider: 'google',
|
|
205
|
+
content,
|
|
206
|
+
usage: {
|
|
207
|
+
promptTokens: usageData.promptTokenCount,
|
|
208
|
+
completionTokens: usageData.candidatesTokenCount,
|
|
209
|
+
totalTokens: usageData.totalTokenCount,
|
|
210
|
+
},
|
|
211
|
+
cost: {
|
|
212
|
+
promptCost,
|
|
213
|
+
completionCost,
|
|
214
|
+
totalCost: promptCost + completionCost,
|
|
215
|
+
currency: 'USD',
|
|
216
|
+
},
|
|
217
|
+
finishReason: this.mapFinishReason(candidate.finishReason),
|
|
218
|
+
};
|
|
219
|
+
} catch (error) {
|
|
220
|
+
clearTimeout(timeout);
|
|
221
|
+
throw this.transformError(error);
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
protected async *doStreamComplete(request: LLMRequest): AsyncIterable<LLMStreamEvent> {
|
|
226
|
+
const googleRequest = this.buildGoogleRequest(request);
|
|
227
|
+
const model = this.mapToGoogleModel(request.model || this.config.model);
|
|
228
|
+
|
|
229
|
+
const url = `${this.baseUrl}/models/${model}:streamGenerateContent?key=${this.config.apiKey}`;
|
|
230
|
+
|
|
231
|
+
const controller = new AbortController();
|
|
232
|
+
const timeout = setTimeout(() => controller.abort(), (this.config.timeout || 60000) * 2);
|
|
233
|
+
|
|
234
|
+
try {
|
|
235
|
+
const response = await fetch(url, {
|
|
236
|
+
method: 'POST',
|
|
237
|
+
headers: {
|
|
238
|
+
'Content-Type': 'application/json',
|
|
239
|
+
},
|
|
240
|
+
body: JSON.stringify(googleRequest),
|
|
241
|
+
signal: controller.signal,
|
|
242
|
+
});
|
|
243
|
+
|
|
244
|
+
if (!response.ok) {
|
|
245
|
+
await this.handleErrorResponse(response);
|
|
246
|
+
}
|
|
247
|
+
|
|
248
|
+
const reader = response.body!.getReader();
|
|
249
|
+
const decoder = new TextDecoder();
|
|
250
|
+
let buffer = '';
|
|
251
|
+
let totalContent = '';
|
|
252
|
+
let promptTokens = 0;
|
|
253
|
+
let completionTokens = 0;
|
|
254
|
+
|
|
255
|
+
while (true) {
|
|
256
|
+
const { done, value } = await reader.read();
|
|
257
|
+
if (done) break;
|
|
258
|
+
|
|
259
|
+
buffer += decoder.decode(value, { stream: true });
|
|
260
|
+
const lines = buffer.split('\n');
|
|
261
|
+
buffer = lines.pop() || '';
|
|
262
|
+
|
|
263
|
+
for (const line of lines) {
|
|
264
|
+
if (line.trim() === '') continue;
|
|
265
|
+
|
|
266
|
+
try {
|
|
267
|
+
const data: GoogleAIResponse = JSON.parse(line);
|
|
268
|
+
|
|
269
|
+
if (data.candidates && data.candidates.length > 0) {
|
|
270
|
+
const candidate = data.candidates[0];
|
|
271
|
+
const content = candidate.content.parts.map(part => part.text).join('');
|
|
272
|
+
|
|
273
|
+
if (content) {
|
|
274
|
+
totalContent += content;
|
|
275
|
+
yield {
|
|
276
|
+
type: 'content',
|
|
277
|
+
delta: { content },
|
|
278
|
+
};
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
if (data.usageMetadata) {
|
|
282
|
+
promptTokens = data.usageMetadata.promptTokenCount;
|
|
283
|
+
completionTokens = data.usageMetadata.candidatesTokenCount;
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
} catch (e) {
|
|
287
|
+
this.logger.warn('Failed to parse Google AI stream chunk', { line, error: e });
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
// Final event with usage and cost
|
|
293
|
+
const pricing = this.capabilities.pricing![request.model || this.config.model];
|
|
294
|
+
const promptCost = (promptTokens / 1000) * pricing.promptCostPer1k;
|
|
295
|
+
const completionCost = (completionTokens / 1000) * pricing.completionCostPer1k;
|
|
296
|
+
|
|
297
|
+
yield {
|
|
298
|
+
type: 'done',
|
|
299
|
+
usage: {
|
|
300
|
+
promptTokens,
|
|
301
|
+
completionTokens,
|
|
302
|
+
totalTokens: promptTokens + completionTokens,
|
|
303
|
+
},
|
|
304
|
+
cost: {
|
|
305
|
+
promptCost,
|
|
306
|
+
completionCost,
|
|
307
|
+
totalCost: promptCost + completionCost,
|
|
308
|
+
currency: 'USD',
|
|
309
|
+
},
|
|
310
|
+
};
|
|
311
|
+
} catch (error) {
|
|
312
|
+
clearTimeout(timeout);
|
|
313
|
+
throw this.transformError(error);
|
|
314
|
+
} finally {
|
|
315
|
+
clearTimeout(timeout);
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
async listModels(): Promise<LLMModel[]> {
|
|
320
|
+
return this.capabilities.supportedModels;
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
async getModelInfo(model: LLMModel): Promise<ModelInfo> {
|
|
324
|
+
return {
|
|
325
|
+
model,
|
|
326
|
+
name: model,
|
|
327
|
+
description: this.getModelDescription(model),
|
|
328
|
+
contextLength: this.capabilities.maxContextLength[model] || 4096,
|
|
329
|
+
maxOutputTokens: this.capabilities.maxOutputTokens[model] || 2048,
|
|
330
|
+
supportedFeatures: [
|
|
331
|
+
'chat',
|
|
332
|
+
'completion',
|
|
333
|
+
...(model.includes('vision') ? ['vision'] : []),
|
|
334
|
+
...(model.startsWith('gemini') ? ['function_calling'] : []),
|
|
335
|
+
],
|
|
336
|
+
pricing: this.capabilities.pricing![model],
|
|
337
|
+
};
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
protected async doHealthCheck(): Promise<HealthCheckResult> {
|
|
341
|
+
try {
|
|
342
|
+
const url = `${this.baseUrl}/models?key=${this.config.apiKey}`;
|
|
343
|
+
const response = await fetch(url);
|
|
344
|
+
|
|
345
|
+
if (!response.ok) {
|
|
346
|
+
throw new Error(`Health check failed: ${response.status}`);
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
return {
|
|
350
|
+
healthy: true,
|
|
351
|
+
timestamp: new Date(),
|
|
352
|
+
};
|
|
353
|
+
} catch (error) {
|
|
354
|
+
return {
|
|
355
|
+
healthy: false,
|
|
356
|
+
error: error instanceof Error ? error.message : 'Unknown error',
|
|
357
|
+
timestamp: new Date(),
|
|
358
|
+
};
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
private buildGoogleRequest(request: LLMRequest): GoogleAIRequest {
|
|
363
|
+
// Convert messages to Google format
|
|
364
|
+
const contents: GoogleAIRequest['contents'] = [];
|
|
365
|
+
|
|
366
|
+
for (const message of request.messages) {
|
|
367
|
+
// Skip system messages or prepend to first user message
|
|
368
|
+
if (message.role === 'system') {
|
|
369
|
+
if (contents.length === 0) {
|
|
370
|
+
contents.push({
|
|
371
|
+
role: 'user',
|
|
372
|
+
parts: [{ text: `Instructions: ${message.content}` }],
|
|
373
|
+
});
|
|
374
|
+
}
|
|
375
|
+
continue;
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
contents.push({
|
|
379
|
+
role: message.role === 'assistant' ? 'model' : 'user',
|
|
380
|
+
parts: [{ text: message.content }],
|
|
381
|
+
});
|
|
382
|
+
}
|
|
383
|
+
|
|
384
|
+
return {
|
|
385
|
+
contents,
|
|
386
|
+
generationConfig: {
|
|
387
|
+
temperature: request.temperature ?? this.config.temperature,
|
|
388
|
+
topK: request.topK ?? this.config.topK,
|
|
389
|
+
topP: request.topP ?? this.config.topP,
|
|
390
|
+
maxOutputTokens: request.maxTokens ?? this.config.maxTokens,
|
|
391
|
+
stopSequences: request.stopSequences ?? this.config.stopSequences,
|
|
392
|
+
},
|
|
393
|
+
safetySettings: [
|
|
394
|
+
{
|
|
395
|
+
category: 'HARM_CATEGORY_HARASSMENT',
|
|
396
|
+
threshold: 'BLOCK_NONE',
|
|
397
|
+
},
|
|
398
|
+
{
|
|
399
|
+
category: 'HARM_CATEGORY_HATE_SPEECH',
|
|
400
|
+
threshold: 'BLOCK_NONE',
|
|
401
|
+
},
|
|
402
|
+
{
|
|
403
|
+
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
|
|
404
|
+
threshold: 'BLOCK_NONE',
|
|
405
|
+
},
|
|
406
|
+
{
|
|
407
|
+
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
|
|
408
|
+
threshold: 'BLOCK_NONE',
|
|
409
|
+
},
|
|
410
|
+
],
|
|
411
|
+
};
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
private mapToGoogleModel(model: LLMModel): string {
|
|
415
|
+
const modelMap: Record<string, string> = {
|
|
416
|
+
'gemini-pro': 'gemini-pro',
|
|
417
|
+
'gemini-pro-vision': 'gemini-pro-vision',
|
|
418
|
+
'palm-2': 'text-bison-001',
|
|
419
|
+
'bison': 'text-bison-001',
|
|
420
|
+
};
|
|
421
|
+
return modelMap[model] || model;
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
private mapFinishReason(reason: string): 'stop' | 'length' | 'content_filter' {
|
|
425
|
+
switch (reason) {
|
|
426
|
+
case 'STOP':
|
|
427
|
+
return 'stop';
|
|
428
|
+
case 'MAX_TOKENS':
|
|
429
|
+
return 'length';
|
|
430
|
+
case 'SAFETY':
|
|
431
|
+
case 'RECITATION':
|
|
432
|
+
return 'content_filter';
|
|
433
|
+
default:
|
|
434
|
+
return 'stop';
|
|
435
|
+
}
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
private getModelDescription(model: LLMModel): string {
|
|
439
|
+
const descriptions: Record<string, string> = {
|
|
440
|
+
'gemini-pro': 'Google\'s most capable text model',
|
|
441
|
+
'gemini-pro-vision': 'Gemini Pro with vision capabilities',
|
|
442
|
+
'palm-2': 'Previous generation large language model',
|
|
443
|
+
'bison': 'Efficient model for various tasks',
|
|
444
|
+
};
|
|
445
|
+
return descriptions[model] || 'Google AI language model';
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
private async handleErrorResponse(response: Response): Promise<void> {
|
|
449
|
+
const errorText = await response.text();
|
|
450
|
+
let errorData: any;
|
|
451
|
+
|
|
452
|
+
try {
|
|
453
|
+
errorData = JSON.parse(errorText);
|
|
454
|
+
} catch {
|
|
455
|
+
errorData = { error: { message: errorText } };
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
const message = errorData.error?.message || 'Unknown error';
|
|
459
|
+
|
|
460
|
+
switch (response.status) {
|
|
461
|
+
case 401:
|
|
462
|
+
case 403:
|
|
463
|
+
throw new AuthenticationError(message, 'google', errorData);
|
|
464
|
+
case 429:
|
|
465
|
+
throw new RateLimitError(message, 'google', undefined, errorData);
|
|
466
|
+
default:
|
|
467
|
+
throw new LLMProviderError(
|
|
468
|
+
message,
|
|
469
|
+
`GOOGLE_${response.status}`,
|
|
470
|
+
'google',
|
|
471
|
+
response.status,
|
|
472
|
+
response.status >= 500,
|
|
473
|
+
errorData
|
|
474
|
+
);
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Multi-LLM Provider System
|
|
3
|
+
* Export all provider types and implementations
|
|
4
|
+
*/
|
|
5
|
+
|
|
6
|
+
// Export types
|
|
7
|
+
export * from './types.js';
|
|
8
|
+
|
|
9
|
+
// Export providers
|
|
10
|
+
export { BaseProvider } from './base-provider.js';
|
|
11
|
+
export { AnthropicProvider } from './anthropic-provider.js';
|
|
12
|
+
export { OpenAIProvider } from './openai-provider.js';
|
|
13
|
+
export { GoogleProvider } from './google-provider.js';
|
|
14
|
+
export { CohereProvider } from './cohere-provider.js';
|
|
15
|
+
export { OllamaProvider } from './ollama-provider.js';
|
|
16
|
+
|
|
17
|
+
// Export manager
|
|
18
|
+
export { ProviderManager, ProviderManagerConfig } from './provider-manager.js';
|
|
19
|
+
|
|
20
|
+
// Export utility functions
|
|
21
|
+
export { createProviderManager, getDefaultProviderConfig } from './utils.js';
|