@sparkleideas/providers 3.5.2-patch.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,596 @@
1
+ /**
2
+ * V3 Abstract Base Provider
3
+ *
4
+ * Provides common functionality for all LLM providers:
5
+ * - Circuit breaker protection
6
+ * - Health monitoring
7
+ * - Cost tracking
8
+ * - Request metrics
9
+ *
10
+ * @module @sparkleideas/providers/base-provider
11
+ */
12
+
13
+ import { EventEmitter } from 'events';
14
+ import {
15
+ ILLMProvider,
16
+ LLMProvider,
17
+ LLMProviderConfig,
18
+ LLMRequest,
19
+ LLMResponse,
20
+ LLMStreamEvent,
21
+ LLMModel,
22
+ ModelInfo,
23
+ ProviderCapabilities,
24
+ HealthCheckResult,
25
+ ProviderStatus,
26
+ CostEstimate,
27
+ UsageStats,
28
+ UsagePeriod,
29
+ LLMProviderError,
30
+ RateLimitError,
31
+ ProviderUnavailableError,
32
+ } from './types.js';
33
+
34
+ /**
35
+ * Simple circuit breaker implementation
36
+ */
37
+ class CircuitBreaker {
38
+ private failures = 0;
39
+ private lastFailure = 0;
40
+ private state: 'closed' | 'open' | 'half-open' = 'closed';
41
+
42
+ constructor(
43
+ private readonly name: string,
44
+ private readonly threshold: number = 5,
45
+ private readonly resetTimeout: number = 60000
46
+ ) {}
47
+
48
+ async execute<T>(fn: () => Promise<T>): Promise<T> {
49
+ if (this.state === 'open') {
50
+ if (Date.now() - this.lastFailure > this.resetTimeout) {
51
+ this.state = 'half-open';
52
+ } else {
53
+ throw new Error(`Circuit breaker ${this.name} is open`);
54
+ }
55
+ }
56
+
57
+ try {
58
+ const result = await fn();
59
+ this.onSuccess();
60
+ return result;
61
+ } catch (error) {
62
+ this.onFailure();
63
+ throw error;
64
+ }
65
+ }
66
+
67
+ private onSuccess(): void {
68
+ this.failures = 0;
69
+ this.state = 'closed';
70
+ }
71
+
72
+ private onFailure(): void {
73
+ this.failures++;
74
+ this.lastFailure = Date.now();
75
+
76
+ if (this.failures >= this.threshold) {
77
+ this.state = 'open';
78
+ }
79
+ }
80
+
81
+ getState(): string {
82
+ return this.state;
83
+ }
84
+ }
85
+
86
+ /**
87
+ * Logger interface
88
+ */
89
+ export interface ILogger {
90
+ info(message: string, meta?: Record<string, unknown>): void;
91
+ warn(message: string, meta?: Record<string, unknown>): void;
92
+ error(message: string, error?: unknown): void;
93
+ debug(message: string, meta?: Record<string, unknown>): void;
94
+ }
95
+
96
+ /**
97
+ * Console logger implementation
98
+ */
99
+ export const consoleLogger: ILogger = {
100
+ info: (msg, meta) => console.log(`[INFO] ${msg}`, meta || ''),
101
+ warn: (msg, meta) => console.warn(`[WARN] ${msg}`, meta || ''),
102
+ error: (msg, err) => console.error(`[ERROR] ${msg}`, err || ''),
103
+ debug: (msg, meta) => console.debug(`[DEBUG] ${msg}`, meta || ''),
104
+ };
105
+
106
+ /**
107
+ * Base provider options
108
+ */
109
+ export interface BaseProviderOptions {
110
+ logger?: ILogger;
111
+ config: LLMProviderConfig;
112
+ cacheTTL?: number;
113
+ circuitBreakerOptions?: {
114
+ threshold?: number;
115
+ resetTimeout?: number;
116
+ };
117
+ }
118
+
119
+ /**
120
+ * Abstract base class for LLM providers
121
+ */
122
+ export abstract class BaseProvider extends EventEmitter implements ILLMProvider {
123
+ abstract readonly name: LLMProvider;
124
+ abstract readonly capabilities: ProviderCapabilities;
125
+
126
+ protected logger: ILogger;
127
+ protected circuitBreaker: CircuitBreaker;
128
+ protected healthCheckInterval?: ReturnType<typeof setInterval>;
129
+ protected lastHealthCheck?: HealthCheckResult;
130
+
131
+ // Metrics
132
+ protected requestCount = 0;
133
+ protected errorCount = 0;
134
+ protected totalTokens = 0;
135
+ protected totalCost = 0;
136
+ protected requestMetrics: Map<string, {
137
+ timestamp: Date;
138
+ model: string;
139
+ tokens: number;
140
+ cost?: number;
141
+ latency: number;
142
+ }> = new Map();
143
+
144
+ public config: LLMProviderConfig;
145
+
146
+ constructor(options: BaseProviderOptions) {
147
+ super();
148
+ this.logger = options.logger || consoleLogger;
149
+ this.config = options.config;
150
+
151
+ // Initialize circuit breaker
152
+ this.circuitBreaker = new CircuitBreaker(
153
+ `llm-${this.config.provider}`,
154
+ options.circuitBreakerOptions?.threshold || 5,
155
+ options.circuitBreakerOptions?.resetTimeout || 60000
156
+ );
157
+ }
158
+
159
+ /**
160
+ * Initialize the provider
161
+ */
162
+ async initialize(): Promise<void> {
163
+ this.logger.info(`Initializing ${this.name} provider`, {
164
+ model: this.config.model,
165
+ temperature: this.config.temperature,
166
+ maxTokens: this.config.maxTokens,
167
+ });
168
+
169
+ // Validate configuration
170
+ this.validateConfig();
171
+
172
+ // Provider-specific initialization
173
+ await this.doInitialize();
174
+
175
+ // Start health checks if caching enabled
176
+ if (this.config.enableCaching) {
177
+ this.startHealthChecks();
178
+ }
179
+
180
+ // Initial health check
181
+ await this.healthCheck();
182
+ }
183
+
184
+ /**
185
+ * Provider-specific initialization (override in subclass)
186
+ */
187
+ protected abstract doInitialize(): Promise<void>;
188
+
189
+ /**
190
+ * Validate provider configuration
191
+ */
192
+ protected validateConfig(): void {
193
+ if (!this.config.model) {
194
+ throw new Error(`Model is required for ${this.name} provider`);
195
+ }
196
+
197
+ if (!this.validateModel(this.config.model)) {
198
+ this.logger.warn(`Model ${this.config.model} may not be supported by ${this.name}`);
199
+ }
200
+
201
+ if (this.config.temperature !== undefined) {
202
+ if (this.config.temperature < 0 || this.config.temperature > 2) {
203
+ throw new Error('Temperature must be between 0 and 2');
204
+ }
205
+ }
206
+ }
207
+
208
+ /**
209
+ * Complete a request
210
+ */
211
+ async complete(request: LLMRequest): Promise<LLMResponse> {
212
+ const startTime = Date.now();
213
+
214
+ try {
215
+ const response = await this.circuitBreaker.execute(async () => {
216
+ return await this.doComplete(request);
217
+ });
218
+
219
+ const latency = Date.now() - startTime;
220
+ this.trackRequest(request, response, latency);
221
+
222
+ this.emit('response', {
223
+ provider: this.name,
224
+ model: response.model,
225
+ latency,
226
+ tokens: response.usage.totalTokens,
227
+ cost: response.cost?.totalCost,
228
+ });
229
+
230
+ return response;
231
+ } catch (error) {
232
+ this.errorCount++;
233
+
234
+ const providerError = this.transformError(error);
235
+
236
+ this.emit('error', {
237
+ provider: this.name,
238
+ error: providerError,
239
+ request,
240
+ });
241
+
242
+ throw providerError;
243
+ }
244
+ }
245
+
246
+ /**
247
+ * Provider-specific completion (override in subclass)
248
+ */
249
+ protected abstract doComplete(request: LLMRequest): Promise<LLMResponse>;
250
+
251
+ /**
252
+ * Stream complete a request
253
+ */
254
+ async *streamComplete(request: LLMRequest): AsyncIterable<LLMStreamEvent> {
255
+ const startTime = Date.now();
256
+ let totalTokens = 0;
257
+ let totalCost = 0;
258
+
259
+ try {
260
+ if (!this.capabilities.supportsStreaming) {
261
+ throw new LLMProviderError(
262
+ 'Streaming not supported',
263
+ 'STREAMING_NOT_SUPPORTED',
264
+ this.name,
265
+ undefined,
266
+ false
267
+ );
268
+ }
269
+
270
+ const stream = await this.circuitBreaker.execute(async () => {
271
+ return this.doStreamComplete(request);
272
+ });
273
+
274
+ for await (const event of stream) {
275
+ if (event.usage) {
276
+ totalTokens = event.usage.totalTokens;
277
+ }
278
+ if (event.cost) {
279
+ totalCost = event.cost.totalCost;
280
+ }
281
+ yield event;
282
+ }
283
+
284
+ const latency = Date.now() - startTime;
285
+ this.trackStreamRequest(request, totalTokens, totalCost, latency);
286
+ } catch (error) {
287
+ this.errorCount++;
288
+ const providerError = this.transformError(error);
289
+
290
+ yield { type: 'error', error: providerError };
291
+ throw providerError;
292
+ }
293
+ }
294
+
295
+ /**
296
+ * Provider-specific stream completion (override in subclass)
297
+ */
298
+ protected abstract doStreamComplete(request: LLMRequest): AsyncIterable<LLMStreamEvent>;
299
+
300
+ /**
301
+ * List available models
302
+ */
303
+ abstract listModels(): Promise<LLMModel[]>;
304
+
305
+ /**
306
+ * Get model information
307
+ */
308
+ abstract getModelInfo(model: LLMModel): Promise<ModelInfo>;
309
+
310
+ /**
311
+ * Validate if a model is supported
312
+ */
313
+ validateModel(model: LLMModel): boolean {
314
+ return this.capabilities.supportedModels.includes(model);
315
+ }
316
+
317
+ /**
318
+ * Perform health check
319
+ */
320
+ async healthCheck(): Promise<HealthCheckResult> {
321
+ const startTime = Date.now();
322
+
323
+ try {
324
+ const result = await this.doHealthCheck();
325
+
326
+ this.lastHealthCheck = {
327
+ ...result,
328
+ latency: Date.now() - startTime,
329
+ timestamp: new Date(),
330
+ };
331
+
332
+ this.emit('health_check', this.lastHealthCheck);
333
+ return this.lastHealthCheck;
334
+ } catch (error) {
335
+ this.lastHealthCheck = {
336
+ healthy: false,
337
+ error: error instanceof Error ? error.message : 'Unknown error',
338
+ latency: Date.now() - startTime,
339
+ timestamp: new Date(),
340
+ };
341
+
342
+ this.emit('health_check', this.lastHealthCheck);
343
+ return this.lastHealthCheck;
344
+ }
345
+ }
346
+
347
+ /**
348
+ * Provider-specific health check (override in subclass)
349
+ */
350
+ protected abstract doHealthCheck(): Promise<HealthCheckResult>;
351
+
352
+ /**
353
+ * Get provider status
354
+ */
355
+ getStatus(): ProviderStatus {
356
+ const queueLength = this.requestMetrics.size;
357
+
358
+ return {
359
+ available: this.lastHealthCheck?.healthy ?? false,
360
+ currentLoad: Math.min(queueLength / 100, 1),
361
+ queueLength,
362
+ activeRequests: queueLength,
363
+ rateLimitRemaining: this.getRateLimitRemaining(),
364
+ rateLimitReset: this.getRateLimitReset(),
365
+ };
366
+ }
367
+
368
+ /**
369
+ * Get remaining rate limit (override in provider)
370
+ */
371
+ protected getRateLimitRemaining(): number | undefined {
372
+ return undefined;
373
+ }
374
+
375
+ /**
376
+ * Get rate limit reset time (override in provider)
377
+ */
378
+ protected getRateLimitReset(): Date | undefined {
379
+ return undefined;
380
+ }
381
+
382
+ /**
383
+ * Estimate cost for a request
384
+ */
385
+ async estimateCost(request: LLMRequest): Promise<CostEstimate> {
386
+ const model = request.model || this.config.model;
387
+ const pricing = this.capabilities.pricing?.[model];
388
+
389
+ if (!pricing) {
390
+ return {
391
+ estimatedPromptTokens: 0,
392
+ estimatedCompletionTokens: 0,
393
+ estimatedTotalTokens: 0,
394
+ estimatedCost: { prompt: 0, completion: 0, total: 0, currency: 'USD' },
395
+ confidence: 0,
396
+ };
397
+ }
398
+
399
+ const promptTokens = this.estimateTokens(JSON.stringify(request.messages));
400
+ const completionTokens = request.maxTokens || this.config.maxTokens || 1000;
401
+
402
+ const promptCost = (promptTokens / 1000) * pricing.promptCostPer1k;
403
+ const completionCost = (completionTokens / 1000) * pricing.completionCostPer1k;
404
+
405
+ return {
406
+ estimatedPromptTokens: promptTokens,
407
+ estimatedCompletionTokens: completionTokens,
408
+ estimatedTotalTokens: promptTokens + completionTokens,
409
+ estimatedCost: {
410
+ prompt: promptCost,
411
+ completion: completionCost,
412
+ total: promptCost + completionCost,
413
+ currency: pricing.currency,
414
+ },
415
+ confidence: 0.7,
416
+ };
417
+ }
418
+
419
+ /**
420
+ * Simple token estimation (4 chars ≈ 1 token)
421
+ */
422
+ protected estimateTokens(text: string): number {
423
+ return Math.ceil(text.length / 4);
424
+ }
425
+
426
+ /**
427
+ * Get usage statistics
428
+ */
429
+ async getUsage(period: UsagePeriod = 'day'): Promise<UsageStats> {
430
+ const now = new Date();
431
+ const start = this.getStartDate(now, period);
432
+
433
+ return {
434
+ period: { start, end: now },
435
+ requests: this.requestCount,
436
+ tokens: {
437
+ prompt: Math.floor(this.totalTokens * 0.7),
438
+ completion: Math.floor(this.totalTokens * 0.3),
439
+ total: this.totalTokens,
440
+ },
441
+ cost: {
442
+ prompt: this.totalCost * 0.7,
443
+ completion: this.totalCost * 0.3,
444
+ total: this.totalCost,
445
+ currency: 'USD',
446
+ },
447
+ errors: this.errorCount,
448
+ averageLatency: this.calculateAverageLatency(),
449
+ modelBreakdown: {},
450
+ };
451
+ }
452
+
453
+ private getStartDate(end: Date, period: UsagePeriod): Date {
454
+ const start = new Date(end);
455
+ switch (period) {
456
+ case 'hour':
457
+ start.setHours(start.getHours() - 1);
458
+ break;
459
+ case 'day':
460
+ start.setDate(start.getDate() - 1);
461
+ break;
462
+ case 'week':
463
+ start.setDate(start.getDate() - 7);
464
+ break;
465
+ case 'month':
466
+ start.setMonth(start.getMonth() - 1);
467
+ break;
468
+ case 'all':
469
+ start.setFullYear(2020);
470
+ break;
471
+ }
472
+ return start;
473
+ }
474
+
475
+ private calculateAverageLatency(): number {
476
+ if (this.requestMetrics.size === 0) return 0;
477
+
478
+ let total = 0;
479
+ let count = 0;
480
+
481
+ this.requestMetrics.forEach((metrics) => {
482
+ if (metrics.latency) {
483
+ total += metrics.latency;
484
+ count++;
485
+ }
486
+ });
487
+
488
+ return count > 0 ? total / count : 0;
489
+ }
490
+
491
+ /**
492
+ * Track successful request
493
+ */
494
+ protected trackRequest(request: LLMRequest, response: LLMResponse, latency: number): void {
495
+ this.requestCount++;
496
+ this.totalTokens += response.usage.totalTokens;
497
+
498
+ if (response.cost) {
499
+ this.totalCost += response.cost.totalCost;
500
+ }
501
+
502
+ this.requestMetrics.set(response.id, {
503
+ timestamp: new Date(),
504
+ model: response.model,
505
+ tokens: response.usage.totalTokens,
506
+ cost: response.cost?.totalCost,
507
+ latency,
508
+ });
509
+
510
+ // Keep last 1000 metrics
511
+ if (this.requestMetrics.size > 1000) {
512
+ const oldestKey = this.requestMetrics.keys().next().value;
513
+ if (oldestKey) this.requestMetrics.delete(oldestKey);
514
+ }
515
+ }
516
+
517
+ /**
518
+ * Track streaming request
519
+ */
520
+ protected trackStreamRequest(
521
+ request: LLMRequest,
522
+ totalTokens: number,
523
+ totalCost: number,
524
+ latency: number
525
+ ): void {
526
+ this.requestCount++;
527
+ this.totalTokens += totalTokens;
528
+ this.totalCost += totalCost;
529
+
530
+ this.requestMetrics.set(`stream-${Date.now()}`, {
531
+ timestamp: new Date(),
532
+ model: request.model || this.config.model,
533
+ tokens: totalTokens,
534
+ cost: totalCost,
535
+ latency,
536
+ });
537
+ }
538
+
539
+ /**
540
+ * Transform errors to provider errors
541
+ */
542
+ protected transformError(error: unknown): LLMProviderError {
543
+ if (error instanceof LLMProviderError) {
544
+ return error;
545
+ }
546
+
547
+ if (error instanceof Error) {
548
+ if (error.message.includes('rate limit')) {
549
+ return new RateLimitError(error.message, this.name);
550
+ }
551
+
552
+ if (error.message.includes('timeout') || error.message.includes('ETIMEDOUT')) {
553
+ return new LLMProviderError('Request timed out', 'TIMEOUT', this.name, undefined, true);
554
+ }
555
+
556
+ if (error.message.includes('ECONNREFUSED') || error.message.includes('fetch failed')) {
557
+ return new ProviderUnavailableError(this.name, { originalError: error.message });
558
+ }
559
+ }
560
+
561
+ return new LLMProviderError(
562
+ error instanceof Error ? error.message : String(error),
563
+ 'UNKNOWN',
564
+ this.name,
565
+ undefined,
566
+ true
567
+ );
568
+ }
569
+
570
+ /**
571
+ * Start periodic health checks
572
+ */
573
+ protected startHealthChecks(): void {
574
+ const interval = this.config.cacheTimeout || 300000;
575
+
576
+ this.healthCheckInterval = setInterval(() => {
577
+ this.healthCheck().catch((error) => {
578
+ this.logger.error(`Health check failed for ${this.name}`, error);
579
+ });
580
+ }, interval);
581
+ }
582
+
583
+ /**
584
+ * Clean up resources
585
+ */
586
+ destroy(): void {
587
+ if (this.healthCheckInterval) {
588
+ clearInterval(this.healthCheckInterval);
589
+ }
590
+
591
+ this.requestMetrics.clear();
592
+ this.removeAllListeners();
593
+
594
+ this.logger.info(`${this.name} provider destroyed`);
595
+ }
596
+ }