@sparkleideas/providers 3.5.2-patch.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +574 -0
- package/package.json +70 -0
- package/src/__tests__/provider-integration.test.ts +446 -0
- package/src/__tests__/quick-test.ts +356 -0
- package/src/anthropic-provider.ts +435 -0
- package/src/base-provider.ts +596 -0
- package/src/cohere-provider.ts +423 -0
- package/src/google-provider.ts +429 -0
- package/src/index.ts +40 -0
- package/src/ollama-provider.ts +408 -0
- package/src/openai-provider.ts +490 -0
- package/src/provider-manager.ts +538 -0
- package/src/ruvector-provider.ts +721 -0
- package/src/types.ts +435 -0
|
@@ -0,0 +1,429 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* V3 Google (Gemini) Provider
|
|
3
|
+
*
|
|
4
|
+
* Supports Gemini 2.0, 1.5 Pro, and Flash models.
|
|
5
|
+
*
|
|
6
|
+
* @module @sparkleideas/providers/google-provider
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { BaseProvider, BaseProviderOptions } from './base-provider.js';
|
|
10
|
+
import {
|
|
11
|
+
LLMProvider,
|
|
12
|
+
LLMModel,
|
|
13
|
+
LLMRequest,
|
|
14
|
+
LLMResponse,
|
|
15
|
+
LLMStreamEvent,
|
|
16
|
+
ModelInfo,
|
|
17
|
+
ProviderCapabilities,
|
|
18
|
+
HealthCheckResult,
|
|
19
|
+
AuthenticationError,
|
|
20
|
+
RateLimitError,
|
|
21
|
+
LLMProviderError,
|
|
22
|
+
} from './types.js';
|
|
23
|
+
|
|
24
|
+
interface GeminiRequest {
|
|
25
|
+
contents: Array<{
|
|
26
|
+
role: 'user' | 'model';
|
|
27
|
+
parts: Array<{ text: string }>;
|
|
28
|
+
}>;
|
|
29
|
+
systemInstruction?: {
|
|
30
|
+
parts: Array<{ text: string }>;
|
|
31
|
+
};
|
|
32
|
+
generationConfig?: {
|
|
33
|
+
temperature?: number;
|
|
34
|
+
topP?: number;
|
|
35
|
+
topK?: number;
|
|
36
|
+
maxOutputTokens?: number;
|
|
37
|
+
stopSequences?: string[];
|
|
38
|
+
};
|
|
39
|
+
tools?: Array<{
|
|
40
|
+
functionDeclarations: Array<{
|
|
41
|
+
name: string;
|
|
42
|
+
description: string;
|
|
43
|
+
parameters: unknown;
|
|
44
|
+
}>;
|
|
45
|
+
}>;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
interface GeminiResponse {
|
|
49
|
+
candidates: Array<{
|
|
50
|
+
content: {
|
|
51
|
+
parts: Array<{ text?: string; functionCall?: { name: string; args: unknown } }>;
|
|
52
|
+
role: string;
|
|
53
|
+
};
|
|
54
|
+
finishReason: string;
|
|
55
|
+
}>;
|
|
56
|
+
usageMetadata: {
|
|
57
|
+
promptTokenCount: number;
|
|
58
|
+
candidatesTokenCount: number;
|
|
59
|
+
totalTokenCount: number;
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
export class GoogleProvider extends BaseProvider {
|
|
64
|
+
readonly name: LLMProvider = 'google';
|
|
65
|
+
readonly capabilities: ProviderCapabilities = {
|
|
66
|
+
supportedModels: [
|
|
67
|
+
'gemini-2.0-flash',
|
|
68
|
+
'gemini-1.5-pro',
|
|
69
|
+
'gemini-1.5-flash',
|
|
70
|
+
'gemini-pro',
|
|
71
|
+
],
|
|
72
|
+
maxContextLength: {
|
|
73
|
+
'gemini-2.0-flash': 1000000,
|
|
74
|
+
'gemini-1.5-pro': 2000000,
|
|
75
|
+
'gemini-1.5-flash': 1000000,
|
|
76
|
+
'gemini-pro': 32000,
|
|
77
|
+
},
|
|
78
|
+
maxOutputTokens: {
|
|
79
|
+
'gemini-2.0-flash': 8192,
|
|
80
|
+
'gemini-1.5-pro': 8192,
|
|
81
|
+
'gemini-1.5-flash': 8192,
|
|
82
|
+
'gemini-pro': 8192,
|
|
83
|
+
},
|
|
84
|
+
supportsStreaming: true,
|
|
85
|
+
supportsToolCalling: true,
|
|
86
|
+
supportsSystemMessages: true,
|
|
87
|
+
supportsVision: true,
|
|
88
|
+
supportsAudio: true,
|
|
89
|
+
supportsFineTuning: false,
|
|
90
|
+
supportsEmbeddings: true,
|
|
91
|
+
supportsBatching: true,
|
|
92
|
+
rateLimit: {
|
|
93
|
+
requestsPerMinute: 1000,
|
|
94
|
+
tokensPerMinute: 4000000,
|
|
95
|
+
concurrentRequests: 100,
|
|
96
|
+
},
|
|
97
|
+
pricing: {
|
|
98
|
+
'gemini-2.0-flash': {
|
|
99
|
+
promptCostPer1k: 0.0, // Free tier available
|
|
100
|
+
completionCostPer1k: 0.0,
|
|
101
|
+
currency: 'USD',
|
|
102
|
+
},
|
|
103
|
+
'gemini-1.5-pro': {
|
|
104
|
+
promptCostPer1k: 0.00125,
|
|
105
|
+
completionCostPer1k: 0.005,
|
|
106
|
+
currency: 'USD',
|
|
107
|
+
},
|
|
108
|
+
'gemini-1.5-flash': {
|
|
109
|
+
promptCostPer1k: 0.000075,
|
|
110
|
+
completionCostPer1k: 0.0003,
|
|
111
|
+
currency: 'USD',
|
|
112
|
+
},
|
|
113
|
+
'gemini-pro': {
|
|
114
|
+
promptCostPer1k: 0.0005,
|
|
115
|
+
completionCostPer1k: 0.0015,
|
|
116
|
+
currency: 'USD',
|
|
117
|
+
},
|
|
118
|
+
},
|
|
119
|
+
};
|
|
120
|
+
|
|
121
|
+
private baseUrl: string = 'https://generativelanguage.googleapis.com/v1beta';
|
|
122
|
+
|
|
123
|
+
constructor(options: BaseProviderOptions) {
|
|
124
|
+
super(options);
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
protected async doInitialize(): Promise<void> {
|
|
128
|
+
if (!this.config.apiKey) {
|
|
129
|
+
throw new AuthenticationError('Google API key is required', 'google');
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
this.baseUrl = this.config.apiUrl || 'https://generativelanguage.googleapis.com/v1beta';
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
protected async doComplete(request: LLMRequest): Promise<LLMResponse> {
|
|
136
|
+
const geminiRequest = this.buildRequest(request);
|
|
137
|
+
const model = request.model || this.config.model;
|
|
138
|
+
const url = `${this.baseUrl}/models/${model}:generateContent?key=${this.config.apiKey}`;
|
|
139
|
+
|
|
140
|
+
const controller = new AbortController();
|
|
141
|
+
const timeout = setTimeout(() => controller.abort(), this.config.timeout || 60000);
|
|
142
|
+
|
|
143
|
+
try {
|
|
144
|
+
const response = await fetch(url, {
|
|
145
|
+
method: 'POST',
|
|
146
|
+
headers: { 'Content-Type': 'application/json' },
|
|
147
|
+
body: JSON.stringify(geminiRequest),
|
|
148
|
+
signal: controller.signal,
|
|
149
|
+
});
|
|
150
|
+
|
|
151
|
+
clearTimeout(timeout);
|
|
152
|
+
|
|
153
|
+
if (!response.ok) {
|
|
154
|
+
await this.handleErrorResponse(response);
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
const data = await response.json() as GeminiResponse;
|
|
158
|
+
return this.transformResponse(data, request);
|
|
159
|
+
} catch (error) {
|
|
160
|
+
clearTimeout(timeout);
|
|
161
|
+
throw this.transformError(error);
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
protected async *doStreamComplete(request: LLMRequest): AsyncIterable<LLMStreamEvent> {
|
|
166
|
+
const geminiRequest = this.buildRequest(request);
|
|
167
|
+
const model = request.model || this.config.model;
|
|
168
|
+
const url = `${this.baseUrl}/models/${model}:streamGenerateContent?key=${this.config.apiKey}&alt=sse`;
|
|
169
|
+
|
|
170
|
+
const controller = new AbortController();
|
|
171
|
+
const timeout = setTimeout(() => controller.abort(), (this.config.timeout || 60000) * 2);
|
|
172
|
+
|
|
173
|
+
try {
|
|
174
|
+
const response = await fetch(url, {
|
|
175
|
+
method: 'POST',
|
|
176
|
+
headers: { 'Content-Type': 'application/json' },
|
|
177
|
+
body: JSON.stringify(geminiRequest),
|
|
178
|
+
signal: controller.signal,
|
|
179
|
+
});
|
|
180
|
+
|
|
181
|
+
if (!response.ok) {
|
|
182
|
+
await this.handleErrorResponse(response);
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
const reader = response.body!.getReader();
|
|
186
|
+
const decoder = new TextDecoder();
|
|
187
|
+
let buffer = '';
|
|
188
|
+
let totalTokens = 0;
|
|
189
|
+
|
|
190
|
+
while (true) {
|
|
191
|
+
const { done, value } = await reader.read();
|
|
192
|
+
if (done) break;
|
|
193
|
+
|
|
194
|
+
buffer += decoder.decode(value, { stream: true });
|
|
195
|
+
const lines = buffer.split('\n');
|
|
196
|
+
buffer = lines.pop() || '';
|
|
197
|
+
|
|
198
|
+
for (const line of lines) {
|
|
199
|
+
if (line.startsWith('data: ')) {
|
|
200
|
+
const data = line.slice(6);
|
|
201
|
+
if (!data || data === '[DONE]') continue;
|
|
202
|
+
|
|
203
|
+
try {
|
|
204
|
+
const chunk: GeminiResponse = JSON.parse(data);
|
|
205
|
+
const candidate = chunk.candidates?.[0];
|
|
206
|
+
|
|
207
|
+
if (candidate?.content?.parts) {
|
|
208
|
+
for (const part of candidate.content.parts) {
|
|
209
|
+
if (part.text) {
|
|
210
|
+
yield {
|
|
211
|
+
type: 'content',
|
|
212
|
+
delta: { content: part.text },
|
|
213
|
+
};
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
if (chunk.usageMetadata) {
|
|
219
|
+
totalTokens = chunk.usageMetadata.totalTokenCount;
|
|
220
|
+
}
|
|
221
|
+
} catch {
|
|
222
|
+
// Ignore parse errors
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
// Final event
|
|
229
|
+
const pricing = this.capabilities.pricing[model];
|
|
230
|
+
const promptTokens = this.estimateTokens(JSON.stringify(request.messages));
|
|
231
|
+
|
|
232
|
+
yield {
|
|
233
|
+
type: 'done',
|
|
234
|
+
usage: {
|
|
235
|
+
promptTokens,
|
|
236
|
+
completionTokens: totalTokens - promptTokens,
|
|
237
|
+
totalTokens,
|
|
238
|
+
},
|
|
239
|
+
cost: {
|
|
240
|
+
promptCost: (promptTokens / 1000) * pricing.promptCostPer1k,
|
|
241
|
+
completionCost: ((totalTokens - promptTokens) / 1000) * pricing.completionCostPer1k,
|
|
242
|
+
totalCost:
|
|
243
|
+
(promptTokens / 1000) * pricing.promptCostPer1k +
|
|
244
|
+
((totalTokens - promptTokens) / 1000) * pricing.completionCostPer1k,
|
|
245
|
+
currency: 'USD',
|
|
246
|
+
},
|
|
247
|
+
};
|
|
248
|
+
} catch (error) {
|
|
249
|
+
clearTimeout(timeout);
|
|
250
|
+
throw this.transformError(error);
|
|
251
|
+
} finally {
|
|
252
|
+
clearTimeout(timeout);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
async listModels(): Promise<LLMModel[]> {
|
|
257
|
+
return this.capabilities.supportedModels;
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
async getModelInfo(model: LLMModel): Promise<ModelInfo> {
|
|
261
|
+
const descriptions: Record<string, string> = {
|
|
262
|
+
'gemini-2.0-flash': 'Latest Gemini 2.0 with multimodal capabilities',
|
|
263
|
+
'gemini-1.5-pro': 'Most capable Gemini model with 2M context',
|
|
264
|
+
'gemini-1.5-flash': 'Fast and efficient Gemini model',
|
|
265
|
+
'gemini-pro': 'Balanced Gemini model',
|
|
266
|
+
};
|
|
267
|
+
|
|
268
|
+
return {
|
|
269
|
+
model,
|
|
270
|
+
name: model,
|
|
271
|
+
description: descriptions[model] || 'Google Gemini model',
|
|
272
|
+
contextLength: this.capabilities.maxContextLength[model] || 32000,
|
|
273
|
+
maxOutputTokens: this.capabilities.maxOutputTokens[model] || 8192,
|
|
274
|
+
supportedFeatures: ['chat', 'completion', 'vision', 'audio', 'tool_calling'],
|
|
275
|
+
pricing: this.capabilities.pricing[model],
|
|
276
|
+
};
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
protected async doHealthCheck(): Promise<HealthCheckResult> {
|
|
280
|
+
try {
|
|
281
|
+
const url = `${this.baseUrl}/models?key=${this.config.apiKey}`;
|
|
282
|
+
const response = await fetch(url);
|
|
283
|
+
|
|
284
|
+
return {
|
|
285
|
+
healthy: response.ok,
|
|
286
|
+
timestamp: new Date(),
|
|
287
|
+
...(response.ok ? {} : { error: `HTTP ${response.status}` }),
|
|
288
|
+
};
|
|
289
|
+
} catch (error) {
|
|
290
|
+
return {
|
|
291
|
+
healthy: false,
|
|
292
|
+
error: error instanceof Error ? error.message : 'Unknown error',
|
|
293
|
+
timestamp: new Date(),
|
|
294
|
+
};
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
|
|
298
|
+
private buildRequest(request: LLMRequest): GeminiRequest {
|
|
299
|
+
// Extract system message
|
|
300
|
+
const systemMessage = request.messages.find((m) => m.role === 'system');
|
|
301
|
+
const otherMessages = request.messages.filter((m) => m.role !== 'system');
|
|
302
|
+
|
|
303
|
+
// Transform messages
|
|
304
|
+
const contents = otherMessages.map((msg) => ({
|
|
305
|
+
role: msg.role === 'assistant' ? 'model' as const : 'user' as const,
|
|
306
|
+
parts: [{ text: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content) }],
|
|
307
|
+
}));
|
|
308
|
+
|
|
309
|
+
const geminiRequest: GeminiRequest = { contents };
|
|
310
|
+
|
|
311
|
+
if (systemMessage) {
|
|
312
|
+
geminiRequest.systemInstruction = {
|
|
313
|
+
parts: [{
|
|
314
|
+
text: typeof systemMessage.content === 'string'
|
|
315
|
+
? systemMessage.content
|
|
316
|
+
: JSON.stringify(systemMessage.content),
|
|
317
|
+
}],
|
|
318
|
+
};
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
const generationConfig: GeminiRequest['generationConfig'] = {};
|
|
322
|
+
|
|
323
|
+
if (request.temperature !== undefined || this.config.temperature !== undefined) {
|
|
324
|
+
generationConfig.temperature = request.temperature ?? this.config.temperature;
|
|
325
|
+
}
|
|
326
|
+
if (request.topP !== undefined || this.config.topP !== undefined) {
|
|
327
|
+
generationConfig.topP = request.topP ?? this.config.topP;
|
|
328
|
+
}
|
|
329
|
+
if (request.topK !== undefined || this.config.topK !== undefined) {
|
|
330
|
+
generationConfig.topK = request.topK ?? this.config.topK;
|
|
331
|
+
}
|
|
332
|
+
if (request.maxTokens || this.config.maxTokens) {
|
|
333
|
+
generationConfig.maxOutputTokens = request.maxTokens || this.config.maxTokens;
|
|
334
|
+
}
|
|
335
|
+
if (request.stopSequences || this.config.stopSequences) {
|
|
336
|
+
generationConfig.stopSequences = request.stopSequences || this.config.stopSequences;
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
if (Object.keys(generationConfig).length > 0) {
|
|
340
|
+
geminiRequest.generationConfig = generationConfig;
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
if (request.tools) {
|
|
344
|
+
geminiRequest.tools = [{
|
|
345
|
+
functionDeclarations: request.tools.map((tool) => ({
|
|
346
|
+
name: tool.function.name,
|
|
347
|
+
description: tool.function.description,
|
|
348
|
+
parameters: tool.function.parameters,
|
|
349
|
+
})),
|
|
350
|
+
}];
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
return geminiRequest;
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
private transformResponse(data: GeminiResponse, request: LLMRequest): LLMResponse {
|
|
357
|
+
const candidate = data.candidates[0];
|
|
358
|
+
const model = request.model || this.config.model;
|
|
359
|
+
const pricing = this.capabilities.pricing[model];
|
|
360
|
+
|
|
361
|
+
const textParts = candidate.content.parts.filter((p) => p.text);
|
|
362
|
+
const content = textParts.map((p) => p.text).join('');
|
|
363
|
+
|
|
364
|
+
const toolCalls = candidate.content.parts
|
|
365
|
+
.filter((p) => p.functionCall)
|
|
366
|
+
.map((p) => ({
|
|
367
|
+
id: `tool_${Date.now()}`,
|
|
368
|
+
type: 'function' as const,
|
|
369
|
+
function: {
|
|
370
|
+
name: p.functionCall!.name,
|
|
371
|
+
arguments: JSON.stringify(p.functionCall!.args),
|
|
372
|
+
},
|
|
373
|
+
}));
|
|
374
|
+
|
|
375
|
+
const promptCost = (data.usageMetadata.promptTokenCount / 1000) * pricing.promptCostPer1k;
|
|
376
|
+
const completionCost =
|
|
377
|
+
(data.usageMetadata.candidatesTokenCount / 1000) * pricing.completionCostPer1k;
|
|
378
|
+
|
|
379
|
+
return {
|
|
380
|
+
id: `gemini-${Date.now()}`,
|
|
381
|
+
model: model as LLMModel,
|
|
382
|
+
provider: 'google',
|
|
383
|
+
content,
|
|
384
|
+
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
|
385
|
+
usage: {
|
|
386
|
+
promptTokens: data.usageMetadata.promptTokenCount,
|
|
387
|
+
completionTokens: data.usageMetadata.candidatesTokenCount,
|
|
388
|
+
totalTokens: data.usageMetadata.totalTokenCount,
|
|
389
|
+
},
|
|
390
|
+
cost: {
|
|
391
|
+
promptCost,
|
|
392
|
+
completionCost,
|
|
393
|
+
totalCost: promptCost + completionCost,
|
|
394
|
+
currency: 'USD',
|
|
395
|
+
},
|
|
396
|
+
finishReason: candidate.finishReason === 'STOP' ? 'stop' : 'length',
|
|
397
|
+
};
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
private async handleErrorResponse(response: Response): Promise<never> {
|
|
401
|
+
const errorText = await response.text();
|
|
402
|
+
let errorData: { error?: { message?: string } };
|
|
403
|
+
|
|
404
|
+
try {
|
|
405
|
+
errorData = JSON.parse(errorText);
|
|
406
|
+
} catch {
|
|
407
|
+
errorData = { error: { message: errorText } };
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
const message = errorData.error?.message || 'Unknown error';
|
|
411
|
+
|
|
412
|
+
switch (response.status) {
|
|
413
|
+
case 401:
|
|
414
|
+
case 403:
|
|
415
|
+
throw new AuthenticationError(message, 'google', errorData);
|
|
416
|
+
case 429:
|
|
417
|
+
throw new RateLimitError(message, 'google', undefined, errorData);
|
|
418
|
+
default:
|
|
419
|
+
throw new LLMProviderError(
|
|
420
|
+
message,
|
|
421
|
+
`GOOGLE_${response.status}`,
|
|
422
|
+
'google',
|
|
423
|
+
response.status,
|
|
424
|
+
response.status >= 500,
|
|
425
|
+
errorData
|
|
426
|
+
);
|
|
427
|
+
}
|
|
428
|
+
}
|
|
429
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @sparkleideas/providers
|
|
3
|
+
*
|
|
4
|
+
* Multi-LLM Provider System for Claude Flow V3
|
|
5
|
+
*
|
|
6
|
+
* Supports:
|
|
7
|
+
* - Anthropic (Claude 3.5, 3 Opus, Sonnet, Haiku)
|
|
8
|
+
* - OpenAI (GPT-4o, o1, GPT-4, GPT-3.5)
|
|
9
|
+
* - Google (Gemini 2.0, 1.5 Pro, Flash)
|
|
10
|
+
* - Cohere (Command R+, R, Light)
|
|
11
|
+
* - Ollama (Local: Llama, Mistral, CodeLlama, Phi)
|
|
12
|
+
*
|
|
13
|
+
* Features:
|
|
14
|
+
* - Load balancing (round-robin, latency, cost-based)
|
|
15
|
+
* - Automatic failover
|
|
16
|
+
* - Request caching
|
|
17
|
+
* - Cost optimization (85%+ savings with intelligent routing)
|
|
18
|
+
* - Circuit breaker protection
|
|
19
|
+
* - Health monitoring
|
|
20
|
+
*
|
|
21
|
+
* @module @sparkleideas/providers
|
|
22
|
+
*/
|
|
23
|
+
|
|
24
|
+
// Export types
|
|
25
|
+
export * from './types.js';
|
|
26
|
+
|
|
27
|
+
// Export base provider
|
|
28
|
+
export { BaseProvider, consoleLogger } from './base-provider.js';
|
|
29
|
+
export type { BaseProviderOptions, ILogger } from './base-provider.js';
|
|
30
|
+
|
|
31
|
+
// Export providers
|
|
32
|
+
export { AnthropicProvider } from './anthropic-provider.js';
|
|
33
|
+
export { OpenAIProvider } from './openai-provider.js';
|
|
34
|
+
export { GoogleProvider } from './google-provider.js';
|
|
35
|
+
export { CohereProvider } from './cohere-provider.js';
|
|
36
|
+
export { OllamaProvider } from './ollama-provider.js';
|
|
37
|
+
export { RuVectorProvider } from './ruvector-provider.js';
|
|
38
|
+
|
|
39
|
+
// Export provider manager
|
|
40
|
+
export { ProviderManager, createProviderManager } from './provider-manager.js';
|