@sparkleideas/providers 3.0.0-alpha.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +574 -0
- package/package.json +70 -0
- package/src/__tests__/provider-integration.test.ts +446 -0
- package/src/__tests__/quick-test.ts +356 -0
- package/src/anthropic-provider.ts +435 -0
- package/src/base-provider.ts +596 -0
- package/src/cohere-provider.ts +423 -0
- package/src/google-provider.ts +429 -0
- package/src/index.ts +40 -0
- package/src/ollama-provider.ts +408 -0
- package/src/openai-provider.ts +490 -0
- package/src/provider-manager.ts +538 -0
- package/src/ruvector-provider.ts +721 -0
- package/src/types.ts +435 -0
|
@@ -0,0 +1,446 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Provider Integration Tests
|
|
3
|
+
*
|
|
4
|
+
* Tests all LLM providers with actual API calls using .env credentials
|
|
5
|
+
*
|
|
6
|
+
* Run with: npx vitest run src/__tests__/provider-integration.test.ts
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { describe, it, expect, beforeAll, afterAll } from 'vitest';
|
|
10
|
+
import { config } from 'dotenv';
|
|
11
|
+
import { resolve } from 'path';
|
|
12
|
+
|
|
13
|
+
// Load .env from project root
|
|
14
|
+
config({ path: resolve(__dirname, '../../../../../.env') });
|
|
15
|
+
|
|
16
|
+
import {
|
|
17
|
+
AnthropicProvider,
|
|
18
|
+
OpenAIProvider,
|
|
19
|
+
GoogleProvider,
|
|
20
|
+
OllamaProvider,
|
|
21
|
+
RuVectorProvider,
|
|
22
|
+
ProviderManager,
|
|
23
|
+
createProviderManager,
|
|
24
|
+
LLMRequest,
|
|
25
|
+
LLMProviderConfig,
|
|
26
|
+
ProviderManagerConfig,
|
|
27
|
+
} from '../index.js';
|
|
28
|
+
import { BaseProviderOptions, consoleLogger } from '../base-provider.js';
|
|
29
|
+
|
|
30
|
+
// Test configuration
|
|
31
|
+
const TEST_PROMPT = 'Say "Hello from Claude Flow V3!" in exactly 5 words.';
|
|
32
|
+
const TEST_MESSAGES: LLMRequest['messages'] = [
|
|
33
|
+
{ role: 'user', content: TEST_PROMPT }
|
|
34
|
+
];
|
|
35
|
+
|
|
36
|
+
// Simple test request
|
|
37
|
+
const createTestRequest = (model?: string): LLMRequest => ({
|
|
38
|
+
messages: TEST_MESSAGES,
|
|
39
|
+
model,
|
|
40
|
+
maxTokens: 50,
|
|
41
|
+
temperature: 0.1,
|
|
42
|
+
requestId: `test-${Date.now()}`,
|
|
43
|
+
});
|
|
44
|
+
|
|
45
|
+
describe('Provider Integration Tests', () => {
|
|
46
|
+
|
|
47
|
+
describe('Anthropic Provider', () => {
|
|
48
|
+
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
49
|
+
|
|
50
|
+
it.skipIf(!apiKey)('should complete request with Claude 3.5 Sonnet', async () => {
|
|
51
|
+
const provider = new AnthropicProvider({
|
|
52
|
+
config: {
|
|
53
|
+
provider: 'anthropic',
|
|
54
|
+
apiKey,
|
|
55
|
+
model: 'claude-3-5-sonnet-latest',
|
|
56
|
+
maxTokens: 100,
|
|
57
|
+
},
|
|
58
|
+
logger: consoleLogger,
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
await provider.initialize();
|
|
62
|
+
|
|
63
|
+
const response = await provider.complete(createTestRequest());
|
|
64
|
+
|
|
65
|
+
console.log('Anthropic Response:', response.content);
|
|
66
|
+
console.log('Usage:', response.usage);
|
|
67
|
+
console.log('Cost:', response.cost);
|
|
68
|
+
|
|
69
|
+
expect(response.content).toBeTruthy();
|
|
70
|
+
expect(response.provider).toBe('anthropic');
|
|
71
|
+
expect(response.usage.totalTokens).toBeGreaterThan(0);
|
|
72
|
+
|
|
73
|
+
provider.destroy();
|
|
74
|
+
}, 30000);
|
|
75
|
+
|
|
76
|
+
it.skipIf(!apiKey)('should stream response', async () => {
|
|
77
|
+
const provider = new AnthropicProvider({
|
|
78
|
+
config: {
|
|
79
|
+
provider: 'anthropic',
|
|
80
|
+
apiKey,
|
|
81
|
+
model: 'claude-3-5-sonnet-latest',
|
|
82
|
+
maxTokens: 100,
|
|
83
|
+
},
|
|
84
|
+
logger: consoleLogger,
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
await provider.initialize();
|
|
88
|
+
|
|
89
|
+
const chunks: string[] = [];
|
|
90
|
+
for await (const event of provider.streamComplete(createTestRequest())) {
|
|
91
|
+
if (event.type === 'content' && event.delta?.content) {
|
|
92
|
+
chunks.push(event.delta.content);
|
|
93
|
+
process.stdout.write(event.delta.content);
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
console.log('\n');
|
|
97
|
+
|
|
98
|
+
expect(chunks.length).toBeGreaterThan(0);
|
|
99
|
+
|
|
100
|
+
provider.destroy();
|
|
101
|
+
}, 30000);
|
|
102
|
+
});
|
|
103
|
+
|
|
104
|
+
describe('Google Gemini Provider', () => {
|
|
105
|
+
const apiKey = process.env.GOOGLE_GEMINI_API_KEY;
|
|
106
|
+
|
|
107
|
+
it.skipIf(!apiKey)('should complete request with Gemini 2.0 Flash', async () => {
|
|
108
|
+
const provider = new GoogleProvider({
|
|
109
|
+
config: {
|
|
110
|
+
provider: 'google',
|
|
111
|
+
apiKey,
|
|
112
|
+
model: 'gemini-2.0-flash',
|
|
113
|
+
maxTokens: 100,
|
|
114
|
+
},
|
|
115
|
+
logger: consoleLogger,
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
await provider.initialize();
|
|
119
|
+
|
|
120
|
+
const response = await provider.complete(createTestRequest());
|
|
121
|
+
|
|
122
|
+
console.log('Google Response:', response.content);
|
|
123
|
+
console.log('Usage:', response.usage);
|
|
124
|
+
console.log('Cost:', response.cost);
|
|
125
|
+
|
|
126
|
+
expect(response.content).toBeTruthy();
|
|
127
|
+
expect(response.provider).toBe('google');
|
|
128
|
+
|
|
129
|
+
provider.destroy();
|
|
130
|
+
}, 30000);
|
|
131
|
+
});
|
|
132
|
+
|
|
133
|
+
describe('OpenRouter Provider (OpenAI Compatible)', () => {
|
|
134
|
+
const apiKey = process.env.OPENROUTER_API_KEY;
|
|
135
|
+
|
|
136
|
+
it.skipIf(!apiKey)('should complete request via OpenRouter', async () => {
|
|
137
|
+
const provider = new OpenAIProvider({
|
|
138
|
+
config: {
|
|
139
|
+
provider: 'openai',
|
|
140
|
+
apiKey,
|
|
141
|
+
apiUrl: 'https://openrouter.ai/api/v1',
|
|
142
|
+
model: 'openai/gpt-4o-mini',
|
|
143
|
+
maxTokens: 100,
|
|
144
|
+
providerOptions: {
|
|
145
|
+
headers: {
|
|
146
|
+
'HTTP-Referer': 'https://claude-flow.dev',
|
|
147
|
+
'X-Title': 'Claude Flow V3 Test',
|
|
148
|
+
},
|
|
149
|
+
},
|
|
150
|
+
},
|
|
151
|
+
logger: consoleLogger,
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
await provider.initialize();
|
|
155
|
+
|
|
156
|
+
const response = await provider.complete(createTestRequest('openai/gpt-4o-mini'));
|
|
157
|
+
|
|
158
|
+
console.log('OpenRouter Response:', response.content);
|
|
159
|
+
console.log('Usage:', response.usage);
|
|
160
|
+
|
|
161
|
+
expect(response.content).toBeTruthy();
|
|
162
|
+
|
|
163
|
+
provider.destroy();
|
|
164
|
+
}, 30000);
|
|
165
|
+
});
|
|
166
|
+
|
|
167
|
+
describe('Ollama Provider (Local)', () => {
|
|
168
|
+
const ollamaUrl = process.env.OLLAMA_URL || 'http://localhost:11434';
|
|
169
|
+
|
|
170
|
+
it.skip('should complete request with local model', async () => {
|
|
171
|
+
const provider = new OllamaProvider({
|
|
172
|
+
config: {
|
|
173
|
+
provider: 'ollama',
|
|
174
|
+
apiUrl: ollamaUrl,
|
|
175
|
+
model: 'llama3.2',
|
|
176
|
+
maxTokens: 100,
|
|
177
|
+
},
|
|
178
|
+
logger: consoleLogger,
|
|
179
|
+
});
|
|
180
|
+
|
|
181
|
+
try {
|
|
182
|
+
await provider.initialize();
|
|
183
|
+
|
|
184
|
+
const response = await provider.complete(createTestRequest());
|
|
185
|
+
|
|
186
|
+
console.log('Ollama Response:', response.content);
|
|
187
|
+
console.log('Usage:', response.usage);
|
|
188
|
+
|
|
189
|
+
expect(response.content).toBeTruthy();
|
|
190
|
+
expect(response.provider).toBe('ollama');
|
|
191
|
+
|
|
192
|
+
provider.destroy();
|
|
193
|
+
} catch (error) {
|
|
194
|
+
console.log('Ollama not available locally, skipping test');
|
|
195
|
+
}
|
|
196
|
+
}, 60000);
|
|
197
|
+
});
|
|
198
|
+
|
|
199
|
+
describe('RuVector Provider (ruvllm)', () => {
|
|
200
|
+
|
|
201
|
+
it('should complete request with CPU-friendly Qwen model', async () => {
|
|
202
|
+
const provider = new RuVectorProvider({
|
|
203
|
+
config: {
|
|
204
|
+
provider: 'ruvector',
|
|
205
|
+
model: 'qwen2.5:0.5b', // CPU-friendly small Qwen model
|
|
206
|
+
maxTokens: 100,
|
|
207
|
+
providerOptions: {
|
|
208
|
+
// RuVector-specific options
|
|
209
|
+
sonaEnabled: true,
|
|
210
|
+
hnswEnabled: true,
|
|
211
|
+
fastgrnnEnabled: true,
|
|
212
|
+
// Local model settings
|
|
213
|
+
localModel: 'qwen2.5:0.5b',
|
|
214
|
+
ollamaUrl: 'http://localhost:11434',
|
|
215
|
+
},
|
|
216
|
+
},
|
|
217
|
+
logger: consoleLogger,
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
try {
|
|
221
|
+
await provider.initialize();
|
|
222
|
+
|
|
223
|
+
const response = await provider.complete(createTestRequest('qwen2.5:0.5b'));
|
|
224
|
+
|
|
225
|
+
console.log('RuVector Response:', response.content);
|
|
226
|
+
console.log('Usage:', response.usage);
|
|
227
|
+
console.log('Cost:', response.cost);
|
|
228
|
+
|
|
229
|
+
// Check SONA metrics
|
|
230
|
+
const sonaMetrics = await provider.getSonaMetrics();
|
|
231
|
+
console.log('SONA Metrics:', sonaMetrics);
|
|
232
|
+
|
|
233
|
+
expect(response.content).toBeTruthy();
|
|
234
|
+
|
|
235
|
+
provider.destroy();
|
|
236
|
+
} catch (error) {
|
|
237
|
+
console.log('RuVector/Ollama not available, test details:', error);
|
|
238
|
+
// Don't fail - local models may not be running
|
|
239
|
+
}
|
|
240
|
+
}, 120000);
|
|
241
|
+
|
|
242
|
+
it('should search memory with HNSW', async () => {
|
|
243
|
+
const provider = new RuVectorProvider({
|
|
244
|
+
config: {
|
|
245
|
+
provider: 'ruvector',
|
|
246
|
+
model: 'qwen2.5:0.5b',
|
|
247
|
+
maxTokens: 100,
|
|
248
|
+
providerOptions: {
|
|
249
|
+
hnswEnabled: true,
|
|
250
|
+
},
|
|
251
|
+
},
|
|
252
|
+
logger: consoleLogger,
|
|
253
|
+
});
|
|
254
|
+
|
|
255
|
+
try {
|
|
256
|
+
await provider.initialize();
|
|
257
|
+
|
|
258
|
+
// Search memory
|
|
259
|
+
const results = await provider.searchMemory('test query', 5);
|
|
260
|
+
console.log('Memory search results:', results);
|
|
261
|
+
|
|
262
|
+
expect(Array.isArray(results)).toBe(true);
|
|
263
|
+
|
|
264
|
+
provider.destroy();
|
|
265
|
+
} catch (error) {
|
|
266
|
+
console.log('Memory search not available:', error);
|
|
267
|
+
}
|
|
268
|
+
}, 30000);
|
|
269
|
+
});
|
|
270
|
+
|
|
271
|
+
describe('Provider Manager', () => {
|
|
272
|
+
const anthropicKey = process.env.ANTHROPIC_API_KEY;
|
|
273
|
+
const googleKey = process.env.GOOGLE_GEMINI_API_KEY;
|
|
274
|
+
|
|
275
|
+
it.skipIf(!anthropicKey && !googleKey)('should manage multiple providers with failover', async () => {
|
|
276
|
+
const providers: LLMProviderConfig[] = [];
|
|
277
|
+
|
|
278
|
+
if (anthropicKey) {
|
|
279
|
+
providers.push({
|
|
280
|
+
provider: 'anthropic',
|
|
281
|
+
apiKey: anthropicKey,
|
|
282
|
+
model: 'claude-3-5-sonnet-latest',
|
|
283
|
+
maxTokens: 100,
|
|
284
|
+
});
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
if (googleKey) {
|
|
288
|
+
providers.push({
|
|
289
|
+
provider: 'google',
|
|
290
|
+
apiKey: googleKey,
|
|
291
|
+
model: 'gemini-2.0-flash',
|
|
292
|
+
maxTokens: 100,
|
|
293
|
+
});
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
const config: ProviderManagerConfig = {
|
|
297
|
+
providers,
|
|
298
|
+
loadBalancing: {
|
|
299
|
+
enabled: true,
|
|
300
|
+
strategy: 'round-robin',
|
|
301
|
+
},
|
|
302
|
+
fallback: {
|
|
303
|
+
enabled: true,
|
|
304
|
+
maxAttempts: 2,
|
|
305
|
+
},
|
|
306
|
+
cache: {
|
|
307
|
+
enabled: true,
|
|
308
|
+
ttl: 60000,
|
|
309
|
+
maxSize: 100,
|
|
310
|
+
},
|
|
311
|
+
};
|
|
312
|
+
|
|
313
|
+
const manager = await createProviderManager(config, consoleLogger);
|
|
314
|
+
|
|
315
|
+
// List providers
|
|
316
|
+
const providerList = manager.listProviders();
|
|
317
|
+
console.log('Active providers:', providerList);
|
|
318
|
+
expect(providerList.length).toBeGreaterThan(0);
|
|
319
|
+
|
|
320
|
+
// Complete request
|
|
321
|
+
const response = await manager.complete(createTestRequest());
|
|
322
|
+
console.log('Manager Response:', response.content);
|
|
323
|
+
console.log('Provider used:', response.provider);
|
|
324
|
+
|
|
325
|
+
expect(response.content).toBeTruthy();
|
|
326
|
+
|
|
327
|
+
// Health check all
|
|
328
|
+
const health = await manager.healthCheck();
|
|
329
|
+
console.log('Health status:', Object.fromEntries(health));
|
|
330
|
+
|
|
331
|
+
// Get metrics
|
|
332
|
+
const metrics = manager.getMetrics();
|
|
333
|
+
console.log('Metrics:', Object.fromEntries(metrics));
|
|
334
|
+
|
|
335
|
+
manager.destroy();
|
|
336
|
+
}, 60000);
|
|
337
|
+
|
|
338
|
+
it.skipIf(!anthropicKey)('should use cache for repeated requests', async () => {
|
|
339
|
+
const manager = await createProviderManager({
|
|
340
|
+
providers: [{
|
|
341
|
+
provider: 'anthropic',
|
|
342
|
+
apiKey: anthropicKey,
|
|
343
|
+
model: 'claude-3-5-sonnet-latest',
|
|
344
|
+
maxTokens: 50,
|
|
345
|
+
}],
|
|
346
|
+
cache: {
|
|
347
|
+
enabled: true,
|
|
348
|
+
ttl: 60000,
|
|
349
|
+
maxSize: 100,
|
|
350
|
+
},
|
|
351
|
+
}, consoleLogger);
|
|
352
|
+
|
|
353
|
+
const request = createTestRequest();
|
|
354
|
+
|
|
355
|
+
// First request - no cache
|
|
356
|
+
const start1 = Date.now();
|
|
357
|
+
const response1 = await manager.complete(request);
|
|
358
|
+
const time1 = Date.now() - start1;
|
|
359
|
+
console.log(`First request: ${time1}ms`);
|
|
360
|
+
|
|
361
|
+
// Second request - should hit cache
|
|
362
|
+
const start2 = Date.now();
|
|
363
|
+
const response2 = await manager.complete(request);
|
|
364
|
+
const time2 = Date.now() - start2;
|
|
365
|
+
console.log(`Second request (cached): ${time2}ms`);
|
|
366
|
+
|
|
367
|
+
expect(response1.content).toBe(response2.content);
|
|
368
|
+
expect(time2).toBeLessThan(time1); // Cache should be faster
|
|
369
|
+
|
|
370
|
+
manager.destroy();
|
|
371
|
+
}, 60000);
|
|
372
|
+
});
|
|
373
|
+
|
|
374
|
+
describe('Cost Estimation', () => {
|
|
375
|
+
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
376
|
+
|
|
377
|
+
it.skipIf(!apiKey)('should estimate costs accurately', async () => {
|
|
378
|
+
const manager = await createProviderManager({
|
|
379
|
+
providers: [{
|
|
380
|
+
provider: 'anthropic',
|
|
381
|
+
apiKey,
|
|
382
|
+
model: 'claude-3-5-sonnet-latest',
|
|
383
|
+
maxTokens: 100,
|
|
384
|
+
}],
|
|
385
|
+
}, consoleLogger);
|
|
386
|
+
|
|
387
|
+
const request = createTestRequest();
|
|
388
|
+
|
|
389
|
+
// Get cost estimates
|
|
390
|
+
const estimates = await manager.estimateCost(request);
|
|
391
|
+
console.log('Cost estimates:', Object.fromEntries(estimates));
|
|
392
|
+
|
|
393
|
+
// Make actual request
|
|
394
|
+
const response = await manager.complete(request);
|
|
395
|
+
console.log('Actual cost:', response.cost);
|
|
396
|
+
|
|
397
|
+
// Compare estimate to actual
|
|
398
|
+
const estimate = estimates.get('anthropic');
|
|
399
|
+
if (estimate && response.cost) {
|
|
400
|
+
const estimateTotal = estimate.estimatedCost.total;
|
|
401
|
+
const actualTotal = response.cost.totalCost;
|
|
402
|
+
const accuracy = 1 - Math.abs(estimateTotal - actualTotal) / actualTotal;
|
|
403
|
+
console.log(`Estimation accuracy: ${(accuracy * 100).toFixed(1)}%`);
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
manager.destroy();
|
|
407
|
+
}, 30000);
|
|
408
|
+
});
|
|
409
|
+
});
|
|
410
|
+
|
|
411
|
+
// Quick standalone test runner
|
|
412
|
+
async function runQuickTest() {
|
|
413
|
+
console.log('\n=== Quick Provider Test ===\n');
|
|
414
|
+
|
|
415
|
+
const apiKey = process.env.ANTHROPIC_API_KEY;
|
|
416
|
+
if (!apiKey) {
|
|
417
|
+
console.log('No ANTHROPIC_API_KEY found in .env');
|
|
418
|
+
return;
|
|
419
|
+
}
|
|
420
|
+
|
|
421
|
+
const provider = new AnthropicProvider({
|
|
422
|
+
config: {
|
|
423
|
+
provider: 'anthropic',
|
|
424
|
+
apiKey,
|
|
425
|
+
model: 'claude-3-5-sonnet-latest',
|
|
426
|
+
maxTokens: 100,
|
|
427
|
+
},
|
|
428
|
+
logger: consoleLogger,
|
|
429
|
+
});
|
|
430
|
+
|
|
431
|
+
await provider.initialize();
|
|
432
|
+
|
|
433
|
+
const response = await provider.complete({
|
|
434
|
+
messages: [{ role: 'user', content: 'What is 2+2? Reply with just the number.' }],
|
|
435
|
+
maxTokens: 10,
|
|
436
|
+
});
|
|
437
|
+
|
|
438
|
+
console.log('Response:', response.content);
|
|
439
|
+
console.log('Tokens:', response.usage);
|
|
440
|
+
console.log('Cost:', response.cost);
|
|
441
|
+
|
|
442
|
+
provider.destroy();
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
// Export for direct execution
|
|
446
|
+
export { runQuickTest };
|