@flare-ai-sdk/server 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1196 @@
1
+ import EventEmitter from 'eventemitter3';
2
+
3
+ /**
4
+ * Circuit breaker configuration
5
+ */
6
+ interface CircuitBreakerConfig {
7
+ /** Number of failures before opening circuit */
8
+ failureThreshold?: number;
9
+ /** Time to wait before attempting reset (ms) */
10
+ resetTimeout?: number;
11
+ /** Half-open state request limit */
12
+ halfOpenRequests?: number;
13
+ }
14
+ /**
15
+ * Health check status
16
+ */
17
+ interface HealthStatus {
18
+ /** Overall health status */
19
+ status: 'healthy' | 'degraded' | 'unhealthy';
20
+ /** Timestamp */
21
+ timestamp: number;
22
+ /** Service uptime in ms */
23
+ uptime: number;
24
+ /** Circuit breaker state */
25
+ circuitBreaker?: 'closed' | 'open' | 'half-open';
26
+ /** Active connections */
27
+ activeConnections?: number;
28
+ /** Queue size */
29
+ queueSize?: number;
30
+ /** Metrics */
31
+ metrics?: MetricsData;
32
+ }
33
+ /**
34
+ * Performance metrics
35
+ */
36
+ interface MetricsData {
37
+ /** Total requests */
38
+ totalRequests: number;
39
+ /** Successful requests */
40
+ successfulRequests: number;
41
+ /** Failed requests */
42
+ failedRequests: number;
43
+ /** Average latency in ms */
44
+ avgLatency: number;
45
+ /** P95 latency in ms */
46
+ p95Latency: number;
47
+ /** P99 latency in ms */
48
+ p99Latency: number;
49
+ /** Requests per second */
50
+ rps: number;
51
+ }
52
+ /**
53
+ * Authentication context
54
+ */
55
+ interface AuthContext {
56
+ /** API key or token */
57
+ apiKey?: string;
58
+ /** User identifier */
59
+ userId?: string;
60
+ /** Custom headers */
61
+ headers?: Record<string, string>;
62
+ /** Validate function */
63
+ validate?: (req: any) => Promise<boolean>;
64
+ }
65
+ /**
66
+ * Content filter configuration
67
+ */
68
+ interface ContentFilter {
69
+ /** Input filter function */
70
+ filterInput?: (content: string) => Promise<string | null>;
71
+ /** Output filter function */
72
+ filterOutput?: (content: string) => Promise<string | null>;
73
+ /** Blocked patterns */
74
+ blockedPatterns?: RegExp[];
75
+ }
76
+
77
+ /**
78
+ * Rate limiting configuration
79
+ */
80
+ interface RateLimitConfig {
81
+ /** Maximum requests per window */
82
+ maxRequests?: number;
83
+ /** Time window in milliseconds */
84
+ windowMs?: number;
85
+ /** Client identifier for per-client limiting */
86
+ clientId?: string;
87
+ }
88
+
89
+ /**
90
+ * Retry configuration with exponential backoff
91
+ */
92
+ interface RetryConfig {
93
+ /** Maximum number of retry attempts */
94
+ maxRetries?: number;
95
+ /** Initial delay in milliseconds */
96
+ initialDelay?: number;
97
+ /** Maximum delay in milliseconds */
98
+ maxDelay?: number;
99
+ /** Backoff multiplier */
100
+ backoffMultiplier?: number;
101
+ /** Jitter to add randomness */
102
+ jitter?: boolean;
103
+ }
104
+ /**
105
+ * Request timeout configuration
106
+ */
107
+ interface TimeoutConfig {
108
+ /** Request timeout in milliseconds */
109
+ timeout?: number;
110
+ /** Connection timeout in milliseconds */
111
+ connectionTimeout?: number;
112
+ }
113
+
114
+ /**
115
+ * Core type definitions for paprflare-kit
116
+ * @module types
117
+ */
118
+
119
+ /**
120
+ * Supported AI providers
121
+ */
122
+ type AIProvider = 'openai' | 'anthropic' | 'google' | 'sarvam' | 'custom';
123
+ /**
124
+ * Configuration for AI model requests
125
+ */
126
+ interface ModelConfig {
127
+ /** Provider name */
128
+ provider: AIProvider;
129
+ /** Model identifier (e.g., 'gpt-4', 'claude-3-opus') */
130
+ model: string;
131
+ /** API key for authentication */
132
+ apiKey?: string;
133
+ /** Base URL for custom providers */
134
+ baseUrl?: string;
135
+ /** Maximum tokens to generate */
136
+ maxTokens?: number;
137
+ /** Temperature for randomness (0-2) */
138
+ temperature?: number;
139
+ /** Top P sampling parameter */
140
+ topP?: number;
141
+ /** Frequency penalty (-2 to 2) */
142
+ frequencyPenalty?: number;
143
+ /** Presence penalty (-2 to 2) */
144
+ presencePenalty?: number;
145
+ /** Stop sequences */
146
+ stop?: string[];
147
+ }
148
+ /**
149
+ * Complete SDK configuration
150
+ */
151
+ interface FlareConfig extends ModelConfig {
152
+ retry?: RetryConfig;
153
+ timeout?: TimeoutConfig;
154
+ rateLimit?: RateLimitConfig;
155
+ circuitBreaker?: CircuitBreakerConfig;
156
+ /** Enable request queuing */
157
+ enableQueueing?: boolean;
158
+ /** Queue priority (higher = processed first) */
159
+ queuePriority?: number;
160
+ /** Enable connection pooling */
161
+ enablePooling?: boolean;
162
+ /** Pool size for connections */
163
+ poolSize?: number;
164
+ /** Enable request deduplication */
165
+ enableDeduplication?: boolean;
166
+ /** Enable metrics collection */
167
+ enableMetrics?: boolean;
168
+ /** Enable debug logging */
169
+ debug?: boolean;
170
+ }
171
+ /**
172
+ * Message in a conversation
173
+ */
174
+ interface Message {
175
+ /** Message role */
176
+ role: 'system' | 'user' | 'assistant';
177
+ /** Message content */
178
+ content: string;
179
+ /** Optional message name */
180
+ name?: string;
181
+ /** Optional metadata */
182
+ metadata?: Record<string, any>;
183
+ }
184
+ /**
185
+ * Request for text generation
186
+ */
187
+ interface GenerateTextRequest {
188
+ /** Conversation messages */
189
+ messages: Message[];
190
+ /** Optional system prompt */
191
+ systemPrompt?: string;
192
+ /** Model configuration overrides */
193
+ config?: Partial<FlareConfig>;
194
+ }
195
+ /**
196
+ * Response from text generation
197
+ */
198
+ interface GenerateTextResponse {
199
+ /** Generated text content */
200
+ text: string;
201
+ /** Token usage information */
202
+ usage: {
203
+ promptTokens: number;
204
+ completionTokens: number;
205
+ totalTokens: number;
206
+ };
207
+ /** Finish reason */
208
+ finishReason: 'stop' | 'length' | 'content_filter' | 'tool_calls';
209
+ /** Response metadata */
210
+ metadata?: Record<string, any>;
211
+ }
212
+ /**
213
+ * Streaming chunk from text generation
214
+ */
215
+ interface TextStreamChunk {
216
+ /** Chunk type */
217
+ type: 'content' | 'metadata' | 'error' | 'done';
218
+ /** Text content (for content chunks) */
219
+ content?: string;
220
+ /** Metadata (for metadata chunks) */
221
+ metadata?: Record<string, any>;
222
+ /** Error information (for error chunks) */
223
+ error?: Error;
224
+ /** Completion data (for done chunks) */
225
+ done?: {
226
+ finishReason: string;
227
+ usage: GenerateTextResponse['usage'];
228
+ };
229
+ }
230
+ /**
231
+ * Schema for structured object generation
232
+ */
233
+ interface ObjectSchema<T> {
234
+ /** Schema type */
235
+ type: 'object' | 'array' | 'string' | 'number' | 'boolean';
236
+ /** Property definitions (for object type) */
237
+ properties?: Record<string, ObjectSchema<T>>;
238
+ /** Required properties */
239
+ required?: string[];
240
+ /** Items schema (for array type) */
241
+ items?: ObjectSchema<T>;
242
+ /** Description */
243
+ description?: string;
244
+ /** Enum values */
245
+ enum?: any[];
246
+ }
247
+ /**
248
+ * Request for object generation
249
+ */
250
+ interface GenerateObjectRequest<T> {
251
+ /** Conversation messages */
252
+ messages: Message[];
253
+ /** JSON schema describing T */
254
+ schema: ObjectSchema<T>;
255
+ /** Optional system prompt */
256
+ systemPrompt?: string;
257
+ /** Model configuration overrides */
258
+ config?: Partial<FlareConfig>;
259
+ }
260
+ /**
261
+ * Response from object generation
262
+ */
263
+ interface GenerateObjectResponse<T = any> {
264
+ /** Parsed object */
265
+ object: T;
266
+ /** Raw text response */
267
+ text: string;
268
+ /** Token usage */
269
+ usage: GenerateTextResponse['usage'];
270
+ /** Validation errors (if any) */
271
+ validationErrors?: string[];
272
+ }
273
+ /**
274
+ * Streaming chunk from object generation
275
+ */
276
+ interface ObjectStreamChunk<T = any> {
277
+ /** Chunk type */
278
+ type: 'partial' | 'complete' | 'error';
279
+ /** Partial object (incrementally parsed) */
280
+ partial?: Partial<T>;
281
+ /** Complete object (when done) */
282
+ object?: T;
283
+ /** Error information */
284
+ error?: Error;
285
+ /** Raw text accumulated so far */
286
+ text?: string;
287
+ }
288
+ /**
289
+ * Stream reader interface
290
+ */
291
+ interface StreamReader<T> {
292
+ /** Read next chunk */
293
+ read(): Promise<{
294
+ value: T;
295
+ done: boolean;
296
+ }>;
297
+ /** Cancel the stream */
298
+ cancel(): void;
299
+ /** Get async iterator */
300
+ [Symbol.asyncIterator](): AsyncIterableIterator<T>;
301
+ }
302
+ /**
303
+ * Event types for streaming
304
+ */
305
+ interface StreamEvents {
306
+ 'chunk': (chunk: any) => void;
307
+ 'error': (error: Error) => void;
308
+ 'done': (result: any) => void;
309
+ 'start': () => void;
310
+ 'abort': () => void;
311
+ }
312
+
313
+ /**
314
+ * Custom error classes for paprflare-kit
315
+ * @module errors
316
+ */
317
+ /**
318
+ * Base error class for all paprflare-kit errors
319
+ */
320
+ declare class FlareError extends Error {
321
+ readonly code: string;
322
+ readonly statusCode?: number;
323
+ readonly retryable: boolean;
324
+ readonly details?: Record<string, any>;
325
+ constructor(message: string, code: string, statusCode?: number, retryable?: boolean, details?: Record<string, any>);
326
+ }
327
+ /**
328
+ * Network-related errors
329
+ */
330
+ declare class NetworkError extends FlareError {
331
+ constructor(message: string, details?: Record<string, any>);
332
+ }
333
+ /**
334
+ * Timeout errors
335
+ */
336
+ declare class TimeoutError extends FlareError {
337
+ constructor(message: string, details?: Record<string, any>);
338
+ }
339
+ /**
340
+ * Rate limit exceeded errors
341
+ */
342
+ declare class RateLimitError extends FlareError {
343
+ readonly retryAfter?: number;
344
+ constructor(message: string, retryAfter?: number, details?: Record<string, any>);
345
+ }
346
+ /**
347
+ * Authentication/authorization errors
348
+ */
349
+ declare class AuthenticationError extends FlareError {
350
+ constructor(message: string, details?: Record<string, any>);
351
+ }
352
+ /**
353
+ * Invalid request errors
354
+ */
355
+ declare class ValidationError extends FlareError {
356
+ constructor(message: string, details?: Record<string, any>);
357
+ }
358
+ /**
359
+ * Circuit breaker open errors
360
+ */
361
+ declare class CircuitBreakerError extends FlareError {
362
+ constructor(message: string, details?: Record<string, any>);
363
+ }
364
+ /**
365
+ * Provider-specific errors
366
+ */
367
+ declare class ProviderError extends FlareError {
368
+ readonly provider: string;
369
+ constructor(provider: string, message: string, statusCode?: number, details?: Record<string, any>);
370
+ }
371
+ /**
372
+ * Streaming errors
373
+ */
374
+ declare class StreamError extends FlareError {
375
+ constructor(message: string, details?: Record<string, any>);
376
+ }
377
+ /**
378
+ * Parse/validation errors for structured output
379
+ */
380
+ declare class ParseError extends FlareError {
381
+ constructor(message: string, details?: Record<string, any>);
382
+ }
383
+ /**
384
+ * Content filter errors
385
+ */
386
+ declare class ContentFilterError extends FlareError {
387
+ constructor(message: string, details?: Record<string, any>);
388
+ }
389
+ /**
390
+ * Check if an error is retryable
391
+ */
392
+ declare function isRetryableError(error: Error): boolean;
393
+ /**
394
+ * Extract status code from error
395
+ */
396
+ declare function getErrorStatusCode(error: Error): number | undefined;
397
+
398
+ /**
399
+ * Metrics collection and reporting
400
+ * @module metrics
401
+ */
402
+
403
+ /**
404
+ * Metrics collector for monitoring SDK performance
405
+ *
406
+ * @example
407
+ * ```typescript
408
+ * const metrics = new MetricsCollector();
409
+ *
410
+ * const start = Date.now();
411
+ * try {
412
+ * await doRequest();
413
+ * metrics.recordSuccess(Date.now() - start);
414
+ * } catch (error) {
415
+ * metrics.recordFailure(Date.now() - start);
416
+ * }
417
+ *
418
+ * console.log(metrics.getMetrics());
419
+ * ```
420
+ */
421
+ declare class MetricsCollector {
422
+ private totalRequests;
423
+ private successfulRequests;
424
+ private failedRequests;
425
+ private latencyHistogram;
426
+ private startTime;
427
+ private windowStart;
428
+ private windowRequests;
429
+ private windowDuration;
430
+ /**
431
+ * Record successful request
432
+ * @param latency - Request latency in milliseconds
433
+ */
434
+ recordSuccess(latency: number): void;
435
+ /**
436
+ * Record failed request
437
+ * @param latency - Request latency in milliseconds
438
+ */
439
+ recordFailure(latency: number): void;
440
+ /**
441
+ * Update rolling window for RPS calculation
442
+ */
443
+ private updateWindow;
444
+ /**
445
+ * Get current metrics
446
+ */
447
+ getMetrics(): MetricsData;
448
+ /**
449
+ * Get success rate as percentage
450
+ */
451
+ getSuccessRate(): number;
452
+ /**
453
+ * Get uptime in milliseconds
454
+ */
455
+ getUptime(): number;
456
+ /**
457
+ * Reset all metrics
458
+ */
459
+ reset(): void;
460
+ /**
461
+ * Get detailed statistics
462
+ */
463
+ getDetailedStats(): {
464
+ successRate: number;
465
+ uptime: number;
466
+ timestamp: number;
467
+ totalRequests: number;
468
+ successfulRequests: number;
469
+ failedRequests: number;
470
+ avgLatency: number;
471
+ p95Latency: number;
472
+ p99Latency: number;
473
+ rps: number;
474
+ };
475
+ }
476
+ /**
477
+ * Get or create global metrics collector
478
+ */
479
+ declare function getGlobalMetrics(): MetricsCollector;
480
+ /**
481
+ * Create a new metrics collector
482
+ */
483
+ declare function createMetricsCollector(): MetricsCollector;
484
+ /**
485
+ * Measure function execution time and record metrics
486
+ * @param fn - Function to measure
487
+ * @param collector - Metrics collector (defaults to global)
488
+ * @returns Function result
489
+ */
490
+ declare function measureLatency<T>(fn: () => Promise<T>, collector?: MetricsCollector): Promise<T>;
491
+
492
+ /**
493
+ * Rate limiter implementation
494
+ * @module rateLimiter
495
+ */
496
+
497
+ /**
498
+ * Token bucket rate limiter
499
+ * Implements sliding window algorithm for smooth rate limiting
500
+ *
501
+ * @example
502
+ * ```typescript
503
+ * const limiter = new RateLimiter({ maxRequests: 10, windowMs: 1000 });
504
+ *
505
+ * try {
506
+ * await limiter.acquire('user-123');
507
+ * // Request allowed
508
+ * } catch (error) {
509
+ * // Rate limit exceeded
510
+ * }
511
+ * ```
512
+ */
513
+ declare class RateLimiter {
514
+ private buckets;
515
+ private config;
516
+ private cleanupInterval?;
517
+ constructor(config?: Partial<RateLimitConfig>);
518
+ /**
519
+ * Acquire a token for rate limiting
520
+ * @param clientId - Client identifier
521
+ * @throws RateLimitError if rate limit exceeded
522
+ */
523
+ acquire(clientId?: string): Promise<void>;
524
+ /**
525
+ * Check if request would be allowed without consuming token
526
+ * @param clientId - Client identifier
527
+ * @returns true if request would be allowed
528
+ */
529
+ check(clientId?: string): boolean;
530
+ /**
531
+ * Get remaining tokens for client
532
+ * @param clientId - Client identifier
533
+ * @returns Number of remaining requests
534
+ */
535
+ getRemaining(clientId?: string): number;
536
+ /**
537
+ * Reset rate limit for client
538
+ * @param clientId - Client identifier
539
+ */
540
+ reset(clientId?: string): void;
541
+ /**
542
+ * Get or create bucket for client
543
+ */
544
+ private getBucket;
545
+ /**
546
+ * Refill tokens based on elapsed time
547
+ */
548
+ private refillBucket;
549
+ /**
550
+ * Calculate retry-after time in milliseconds
551
+ */
552
+ private calculateRetryAfter;
553
+ /**
554
+ * Start periodic cleanup of old buckets
555
+ */
556
+ private startCleanup;
557
+ /**
558
+ * Get statistics for monitoring
559
+ */
560
+ getStats(): {
561
+ totalClients: number;
562
+ config: Required<RateLimitConfig>;
563
+ };
564
+ /**
565
+ * Clean up resources
566
+ */
567
+ destroy(): void;
568
+ }
569
+ /**
570
+ * Create a rate limiter instance
571
+ * @param config - Rate limit configuration
572
+ * @returns RateLimiter instance
573
+ */
574
+ declare function createRateLimiter(config?: Partial<RateLimitConfig>): RateLimiter;
575
+
576
+ /**
577
+ * Request queue with priority handling
578
+ * @module requestQueue
579
+ */
580
+
581
+ /**
582
+ * Queue configuration
583
+ */
584
+ interface QueueConfig {
585
+ /** Maximum concurrent requests */
586
+ concurrency?: number;
587
+ /** Maximum queue size */
588
+ maxQueueSize?: number;
589
+ /** Request timeout in ms */
590
+ timeout?: number;
591
+ }
592
+ /**
593
+ * Priority queue for managing concurrent requests
594
+ * Higher priority items are processed first
595
+ *
596
+ * @example
597
+ * ```typescript
598
+ * const queue = new RequestQueue({ concurrency: 5 });
599
+ *
600
+ * queue.on('drain', () => console.log('Queue empty'));
601
+ *
602
+ * const result = await queue.add(
603
+ * async () => fetchData(),
604
+ * { priority: 10 }
605
+ * );
606
+ * ```
607
+ */
608
+ declare class RequestQueue extends EventEmitter {
609
+ private queue;
610
+ private running;
611
+ private config;
612
+ private requestCounter;
613
+ constructor(config?: Partial<QueueConfig>);
614
+ /**
615
+ * Add request to queue
616
+ * @param fn - Async function to execute
617
+ * @param priority - Request priority (higher = processed first)
618
+ * @returns Promise resolving to function result
619
+ */
620
+ add<T>(fn: () => Promise<T>, priority?: number): Promise<T>;
621
+ /**
622
+ * Process queue items
623
+ */
624
+ private process;
625
+ /**
626
+ * Execute queue item with timeout
627
+ */
628
+ private executeItem;
629
+ /**
630
+ * Get queue size
631
+ */
632
+ size(): number;
633
+ /**
634
+ * Get number of running requests
635
+ */
636
+ getRunning(): number;
637
+ /**
638
+ * Check if queue is empty
639
+ */
640
+ isEmpty(): boolean;
641
+ /**
642
+ * Clear all pending requests
643
+ */
644
+ clear(): void;
645
+ /**
646
+ * Wait for queue to drain
647
+ */
648
+ drain(): Promise<void>;
649
+ /**
650
+ * Get queue statistics
651
+ */
652
+ getStats(): {
653
+ queueSize: number;
654
+ running: number;
655
+ concurrency: number;
656
+ maxQueueSize: number;
657
+ };
658
+ /**
659
+ * Update configuration
660
+ */
661
+ updateConfig(config: Partial<QueueConfig>): void;
662
+ }
663
+ /**
664
+ * Create a request queue instance
665
+ * @param config - Queue configuration
666
+ * @returns RequestQueue instance
667
+ */
668
+ declare function createRequestQueue(config?: Partial<QueueConfig>): RequestQueue;
669
+
670
+ /**
671
+ * Retry logic with exponential backoff
672
+ * @module retry
673
+ */
674
+
675
+ /**
676
+ * Retry a function with exponential backoff
677
+ * @param fn - Async function to retry
678
+ * @param config - Retry configuration
679
+ * @param onRetry - Optional callback on retry attempt
680
+ * @returns Promise resolving to function result
681
+ * @throws Error after all retries exhausted
682
+ *
683
+ * @example
684
+ * ```typescript
685
+ * const result = await withRetry(
686
+ * async () => fetchData(),
687
+ * { maxRetries: 3, initialDelay: 1000 },
688
+ * (attempt, error) => console.log(`Retry ${attempt}: ${error.message}`)
689
+ * );
690
+ * ```
691
+ */
692
+ declare function withRetry<T>(fn: () => Promise<T>, config?: Partial<RetryConfig>, onRetry?: (attempt: number, error: Error, delay: number) => void): Promise<T>;
693
+ /**
694
+ * Retry class with configurable behavior
695
+ */
696
+ declare class RetryHandler {
697
+ private config;
698
+ constructor(config?: Partial<RetryConfig>);
699
+ /**
700
+ * Execute function with retry logic
701
+ */
702
+ execute<T>(fn: () => Promise<T>, onRetry?: (attempt: number, error: Error, delay: number) => void): Promise<T>;
703
+ /**
704
+ * Update retry configuration
705
+ */
706
+ updateConfig(config: Partial<RetryConfig>): void;
707
+ /**
708
+ * Get current configuration
709
+ */
710
+ getConfig(): Required<RetryConfig>;
711
+ }
712
+ /**
713
+ * Create a retry handler with custom configuration
714
+ * @param config - Retry configuration
715
+ * @returns RetryHandler instance
716
+ *
717
+ * @example
718
+ * ```typescript
719
+ * const retry = createRetryHandler({ maxRetries: 5, initialDelay: 500 });
720
+ * const result = await retry.execute(() => apiCall());
721
+ * ```
722
+ */
723
+ declare function createRetryHandler(config?: Partial<RetryConfig>): RetryHandler;
724
+
725
+ /**
726
+ * Circuit breaker pattern implementation
727
+ * @module circuitBreaker
728
+ */
729
+
730
+ /**
731
+ * Circuit breaker states
732
+ */
733
+ declare enum CircuitState {
734
+ CLOSED = "closed",
735
+ OPEN = "open",
736
+ HALF_OPEN = "half-open"
737
+ }
738
+ /**
739
+ * Circuit breaker implementation for fault tolerance
740
+ * Prevents cascading failures by stopping requests to failing services
741
+ *
742
+ * States:
743
+ * - CLOSED: Normal operation, requests pass through
744
+ * - OPEN: Service failing, requests blocked
745
+ * - HALF_OPEN: Testing if service recovered, limited requests allowed
746
+ *
747
+ * @example
748
+ * ```typescript
749
+ * const breaker = new CircuitBreaker({ failureThreshold: 3 });
750
+ *
751
+ * breaker.on('open', () => console.log('Circuit opened'));
752
+ * breaker.on('close', () => console.log('Circuit closed'));
753
+ *
754
+ * const result = await breaker.execute(async () => {
755
+ * return await fetchData();
756
+ * });
757
+ * ```
758
+ */
759
+ declare class CircuitBreaker extends EventEmitter {
760
+ private state;
761
+ private failureCount;
762
+ private successCount;
763
+ private lastFailureTime?;
764
+ private resetTimer?;
765
+ private config;
766
+ constructor(config?: Partial<CircuitBreakerConfig>);
767
+ /**
768
+ * Execute a function with circuit breaker protection
769
+ * @param fn - Async function to execute
770
+ * @returns Promise resolving to function result
771
+ * @throws CircuitBreakerError if circuit is open
772
+ */
773
+ execute<T>(fn: () => Promise<T>): Promise<T>;
774
+ /**
775
+ * Handle successful execution
776
+ */
777
+ private onSuccess;
778
+ /**
779
+ * Handle failed execution
780
+ */
781
+ private onFailure;
782
+ /**
783
+ * Open the circuit (block requests)
784
+ */
785
+ private open;
786
+ /**
787
+ * Enter half-open state (test if service recovered)
788
+ */
789
+ private halfOpen;
790
+ /**
791
+ * Close the circuit (allow requests)
792
+ */
793
+ private close;
794
+ /**
795
+ * Get current circuit state
796
+ */
797
+ getState(): CircuitState;
798
+ /**
799
+ * Get circuit statistics
800
+ */
801
+ getStats(): {
802
+ state: CircuitState;
803
+ failureCount: number;
804
+ successCount: number;
805
+ lastFailureTime: number | undefined;
806
+ };
807
+ /**
808
+ * Manually reset the circuit breaker
809
+ */
810
+ reset(): void;
811
+ /**
812
+ * Clean up resources
813
+ */
814
+ destroy(): void;
815
+ }
816
+ /**
817
+ * Create a circuit breaker instance
818
+ * @param config - Circuit breaker configuration
819
+ * @returns CircuitBreaker instance
820
+ */
821
+ declare function createCircuitBreaker(config?: Partial<CircuitBreakerConfig>): CircuitBreaker;
822
+
823
+ /**
824
+ * Base provider interface and implementations
825
+ * @module providers
826
+ */
827
+
828
+ /**
829
+ * Base AI provider interface
830
+ */
831
+ interface AIProviderInterface {
832
+ generateText(request: GenerateTextRequest): Promise<GenerateTextResponse>;
833
+ streamText(request: GenerateTextRequest): AsyncIterable<any>;
834
+ generateObject<T>(request: GenerateObjectRequest<T>): Promise<GenerateObjectResponse<T>>;
835
+ streamObject<T>(request: GenerateObjectRequest<T>): AsyncIterable<any>;
836
+ }
837
+ /**
838
+ * Base provider class with common functionality
839
+ */
840
+ declare abstract class BaseProvider implements AIProviderInterface {
841
+ protected config: ModelConfig;
842
+ constructor(config: ModelConfig);
843
+ protected makeRequest(url: string, options: RequestInit, stream?: boolean): Promise<Response>;
844
+ protected formatMessages(request: GenerateTextRequest): Message[];
845
+ protected getEndpoint(path: string): string;
846
+ protected abstract getDefaultBaseUrl(): string;
847
+ abstract generateText(request: GenerateTextRequest): Promise<GenerateTextResponse>;
848
+ abstract streamText(request: GenerateTextRequest): AsyncIterable<any>;
849
+ abstract generateObject<T>(request: GenerateObjectRequest<T>): Promise<GenerateObjectResponse<T>>;
850
+ abstract streamObject<T>(request: GenerateObjectRequest<T>): AsyncIterable<any>;
851
+ }
852
+
853
+ /**
854
+ * Create provider instance based on configuration
855
+ */
856
+ declare function createProvider(config: ModelConfig): AIProviderInterface;
857
+
858
+ /**
859
+ * Anthropic provider implementation
860
+ */
861
+ declare class AnthropicProvider extends BaseProvider {
862
+ protected getDefaultBaseUrl(): string;
863
+ generateText(request: GenerateTextRequest): Promise<GenerateTextResponse>;
864
+ streamText(request: GenerateTextRequest): AsyncIterable<any>;
865
+ generateObject<T>(request: GenerateObjectRequest<T>): Promise<GenerateObjectResponse<T>>;
866
+ streamObject<T>(request: GenerateObjectRequest<T>): AsyncIterable<any>;
867
+ }
868
+
869
+ /**
870
+ * OpenAI provider implementation
871
+ */
872
+ declare class OpenAIProvider extends BaseProvider {
873
+ protected getDefaultBaseUrl(): string;
874
+ generateText(request: GenerateTextRequest): Promise<GenerateTextResponse>;
875
+ streamText(request: GenerateTextRequest): AsyncIterable<any>;
876
+ generateObject<T>(request: GenerateObjectRequest<T>): Promise<GenerateObjectResponse<T>>;
877
+ streamObject<T>(request: GenerateObjectRequest<T>): AsyncIterable<any>;
878
+ }
879
+
880
+ /**
881
+ * Google AI (Gemini) provider implementation
882
+ * Supports Google's Gemini models via their API
883
+ *
884
+ * @example
885
+ * ```typescript
886
+ * const provider = new GoogleProvider({
887
+ * provider: 'google',
888
+ * model: 'gemini-pro',
889
+ * apiKey: process.env.GOOGLE_API_KEY,
890
+ * });
891
+ * ```
892
+ */
893
+ declare class GoogleProvider extends BaseProvider {
894
+ protected getDefaultBaseUrl(): string;
895
+ /**
896
+ * Format messages for Google's API format
897
+ */
898
+ private formatGoogleMessages;
899
+ generateText(request: GenerateTextRequest): Promise<GenerateTextResponse>;
900
+ streamText(request: GenerateTextRequest): AsyncIterable<any>;
901
+ generateObject<T>(request: GenerateObjectRequest<T>): Promise<GenerateObjectResponse<T>>;
902
+ streamObject<T>(request: GenerateObjectRequest<T>): AsyncIterable<any>;
903
+ /**
904
+ * Map Google's finish reason to standard format
905
+ */
906
+ private mapGoogleFinishReason;
907
+ }
908
+
909
+ /**
910
+ * Sarvam AI provider implementation
911
+ * Supports Sarvam's Indian language models
912
+ *
913
+ * @example
914
+ * ```typescript
915
+ * const provider = new SarvamProvider({
916
+ * provider: 'sarvam',
917
+ * model: 'sarvam-2b',
918
+ * apiKey: process.env.SARVAM_API_KEY,
919
+ * });
920
+ * ```
921
+ */
922
+ declare class SarvamProvider extends BaseProvider {
923
+ protected getDefaultBaseUrl(): string;
924
+ generateText(request: GenerateTextRequest): Promise<GenerateTextResponse>;
925
+ streamText(request: GenerateTextRequest): AsyncIterable<any>;
926
+ generateObject<T>(request: GenerateObjectRequest<T>): Promise<GenerateObjectResponse<T>>;
927
+ streamObject<T>(request: GenerateObjectRequest<T>): AsyncIterable<any>;
928
+ /**
929
+ * Map Sarvam's finish reason to standard format
930
+ */
931
+ private mapSarvamFinishReason;
932
+ }
933
+
934
+ /**
935
+ * Custom provider implementation
936
+ * Allows integration with any OpenAI-compatible API
937
+ *
938
+ * @example
939
+ * ```typescript
940
+ * const provider = new CustomProvider({
941
+ * provider: 'custom',
942
+ * model: 'llama-3-70b',
943
+ * baseUrl: 'https://your-api.com/v1',
944
+ * apiKey: process.env.CUSTOM_API_KEY,
945
+ * });
946
+ * ```
947
+ */
948
+ declare class CustomProvider extends BaseProvider {
949
+ protected getDefaultBaseUrl(): string;
950
+ /**
951
+ * Request format for custom provider
952
+ */
953
+ private requestFormat;
954
+ /**
955
+ * Set request format for custom provider
956
+ */
957
+ setRequestFormat(format: 'openai' | 'anthropic' | 'custom'): void;
958
+ generateText(request: GenerateTextRequest): Promise<GenerateTextResponse>;
959
+ streamText(request: GenerateTextRequest): AsyncIterable<any>;
960
+ generateObject<T>(request: GenerateObjectRequest<T>): Promise<GenerateObjectResponse<T>>;
961
+ streamObject<T>(request: GenerateObjectRequest<T>): AsyncIterable<any>;
962
+ /**
963
+ * Format request body based on provider format
964
+ */
965
+ private formatRequestBody;
966
+ /**
967
+ * Parse text response based on provider format
968
+ */
969
+ private parseTextResponse;
970
+ /**
971
+ * Parse streaming response
972
+ */
973
+ private parseStreamResponse;
974
+ }
975
+
976
+ /**
977
+ * Streaming utilities for real-time data processing
978
+ * @module stream
979
+ */
980
+
981
+ /**
982
+ * Transform stream for processing chunks
983
+ */
984
+ declare class TransformStream<TInput, TOutput> {
985
+ private transformer;
986
+ constructor(transformer: (chunk: TInput) => TOutput | Promise<TOutput>);
987
+ transform(source: AsyncIterable<TInput>): AsyncIterable<TOutput>;
988
+ }
989
+ /**
990
+ * Stream reader implementation
991
+ */
992
+ declare class FlareStreamReader<T> implements StreamReader<T> {
993
+ private source;
994
+ private done;
995
+ constructor(source: AsyncIterable<T>);
996
+ read(): Promise<{
997
+ value: T;
998
+ done: boolean;
999
+ }>;
1000
+ cancel(): void;
1001
+ [Symbol.asyncIterator](): AsyncIterableIterator<T>;
1002
+ }
1003
+ /**
1004
+ * Create a data stream writer for Server-Sent Events
1005
+ * @returns Writer object with methods to push data
1006
+ *
1007
+ * @example
1008
+ * ```typescript
1009
+ * const writer = createDataStreamWriter();
1010
+ *
1011
+ * writer.writeData({ type: 'message', content: 'Hello' });
1012
+ * writer.writeError(new Error('Something went wrong'));
1013
+ * writer.close();
1014
+ *
1015
+ * return new Response(writer.readable);
1016
+ * ```
1017
+ */
1018
+ declare function createDataStreamWriter(): {
1019
+ readable: ReadableStream<Uint8Array<ArrayBufferLike>>;
1020
+ /**
1021
+ * Write data event
1022
+ */
1023
+ writeData(data: any): void;
1024
+ /**
1025
+ * Write error event
1026
+ */
1027
+ writeError(error: Error): void;
1028
+ /**
1029
+ * Write custom event
1030
+ */
1031
+ writeEvent(event: string, data: any): void;
1032
+ /**
1033
+ * Close the stream
1034
+ */
1035
+ close(): void;
1036
+ };
1037
+ /**
1038
+ * Convert async iterable to UI stream response
1039
+ * Transforms server stream into format suitable for client consumption
1040
+ *
1041
+ * @param stream - Async iterable of chunks
1042
+ * @returns Response object with streaming body
1043
+ *
1044
+ * @example
1045
+ * ```typescript
1046
+ * const stream = streamText({ messages, config });
1047
+ * return toUiStreamResponse(stream);
1048
+ * ```
1049
+ */
1050
+ declare function toUiStreamResponse<T>(stream: AsyncIterable<T>, headers?: Record<string, string>): Response;
1051
+ /**
1052
+ * Parse Server-Sent Events stream
1053
+ * @param response - Fetch Response with SSE stream
1054
+ * @returns Async iterable of parsed events
1055
+ *
1056
+ * @example
1057
+ * ```typescript
1058
+ * const response = await fetch('/api/stream');
1059
+ * for await (const event of parseSSEStream(response)) {
1060
+ * console.log(event);
1061
+ * }
1062
+ * ```
1063
+ */
1064
+ declare function parseSSEStream(response: Response): AsyncIterable<any>;
1065
+ /**
1066
+ * Merge multiple streams into one
1067
+ * @param streams - Array of async iterables
1068
+ * @returns Merged async iterable
1069
+ */
1070
+ declare function mergeStreams<T>(...streams: AsyncIterable<T>[]): AsyncIterable<T>;
1071
+ /**
1072
+ * Buffer stream chunks for batch processing
1073
+ * @param stream - Source stream
1074
+ * @param size - Buffer size
1075
+ * @returns Buffered stream
1076
+ */
1077
+ declare function bufferStream<T>(stream: AsyncIterable<T>, size: number): AsyncIterable<T[]>;
1078
+ /**
1079
+ * Throttle stream to limit rate
1080
+ * @param stream - Source stream
1081
+ * @param delayMs - Delay between chunks in milliseconds
1082
+ * @returns Throttled stream
1083
+ */
1084
+ declare function throttleStream<T>(stream: AsyncIterable<T>, delayMs: number): AsyncIterable<T>;
1085
+
1086
+ /**
1087
+ * Main FlareClient for AI interactions
1088
+ * @module client
1089
+ */
1090
+
1091
+ /**
1092
+ * Main FlareClient for AI interactions
1093
+ * Provides text generation, streaming, and structured output
1094
+ * with built-in retry, circuit breaker, and rate limiting
1095
+ *
1096
+ * @example
1097
+ * ```typescript
1098
+ * const client = new FlareClient({
1099
+ * provider: 'openai',
1100
+ * model: 'gpt-4',
1101
+ * apiKey: process.env.OPENAI_API_KEY,
1102
+ * });
1103
+ *
1104
+ * // Generate text
1105
+ * const response = await client.generateText({
1106
+ * messages: [{ role: 'user', content: 'Hello!' }],
1107
+ * });
1108
+ *
1109
+ * // Stream text
1110
+ * const stream = client.streamText({
1111
+ * messages: [{ role: 'user', content: 'Tell me a story' }],
1112
+ * });
1113
+ *
1114
+ * for await (const chunk of stream) {
1115
+ * console.log(chunk.content);
1116
+ * }
1117
+ * ```
1118
+ */
1119
+ declare class FlareClient extends EventEmitter {
1120
+ private config;
1121
+ private provider;
1122
+ private retryHandler;
1123
+ private circuitBreaker;
1124
+ private rateLimiter;
1125
+ private requestQueue?;
1126
+ private metrics;
1127
+ private startTime;
1128
+ constructor(config: FlareConfig);
1129
+ /**
1130
+ * Setup event listeners for monitoring
1131
+ */
1132
+ private setupEventListeners;
1133
+ /**
1134
+ * Generate text synchronously
1135
+ * @param request - Text generation request
1136
+ * @returns Generated text response
1137
+ */
1138
+ generateText(request: GenerateTextRequest): Promise<GenerateTextResponse>;
1139
+ /**
1140
+ * Stream text generation
1141
+ * @param request - Text generation request
1142
+ * @returns Async iterable of text chunks
1143
+ */
1144
+ streamText(request: GenerateTextRequest): AsyncIterable<any>;
1145
+ /**
1146
+ * Generate structured object
1147
+ * @param request - Object generation request
1148
+ * @returns Generated object response
1149
+ */
1150
+ generateObject<T = any>(request: GenerateObjectRequest<T>): Promise<GenerateObjectResponse<T>>;
1151
+ /**
1152
+ * Stream object generation
1153
+ * @param request - Object generation request
1154
+ * @returns Async iterable of object chunks
1155
+ */
1156
+ streamObject<T = any>(request: GenerateObjectRequest<T>): AsyncIterable<any>;
1157
+ /**
1158
+ * Get health status
1159
+ */
1160
+ getHealth(): HealthStatus;
1161
+ /**
1162
+ * Get performance metrics
1163
+ */
1164
+ getMetrics(): {
1165
+ successRate: number;
1166
+ uptime: number;
1167
+ timestamp: number;
1168
+ totalRequests: number;
1169
+ successfulRequests: number;
1170
+ failedRequests: number;
1171
+ avgLatency: number;
1172
+ p95Latency: number;
1173
+ p99Latency: number;
1174
+ rps: number;
1175
+ };
1176
+ /**
1177
+ * Update configuration
1178
+ */
1179
+ updateConfig(config: Partial<FlareConfig>): void;
1180
+ /**
1181
+ * Reset circuit breaker
1182
+ */
1183
+ resetCircuitBreaker(): void;
1184
+ /**
1185
+ * Clean up resources
1186
+ */
1187
+ destroy(): void;
1188
+ }
1189
+ /**
1190
+ * Create FlareClient instance
1191
+ * @param config - Client configuration
1192
+ * @returns FlareClient instance
1193
+ */
1194
+ declare function createFlareClient(config: FlareConfig): FlareClient;
1195
+
1196
+ export { type AIProvider, type AIProviderInterface, AnthropicProvider, type AuthContext, AuthenticationError, BaseProvider, CircuitBreaker, type CircuitBreakerConfig, CircuitBreakerError, CircuitState, type ContentFilter, ContentFilterError, CustomProvider, FlareClient, type FlareConfig, FlareError, FlareStreamReader, type GenerateObjectRequest, type GenerateObjectResponse, type GenerateTextRequest, type GenerateTextResponse, GoogleProvider, type HealthStatus, type Message, MetricsCollector, type MetricsData, type ModelConfig, NetworkError, type ObjectSchema, type ObjectStreamChunk, OpenAIProvider, ParseError, ProviderError, type QueueConfig, type RateLimitConfig, RateLimitError, RateLimiter, RequestQueue, type RetryConfig, RetryHandler, SarvamProvider, StreamError, type StreamEvents, type StreamReader, type TextStreamChunk, type TimeoutConfig, TimeoutError, TransformStream, ValidationError, bufferStream, createCircuitBreaker, createDataStreamWriter, createFlareClient, createMetricsCollector, createProvider, createRateLimiter, createRequestQueue, createRetryHandler, getErrorStatusCode, getGlobalMetrics, isRetryableError, measureLatency, mergeStreams, parseSSEStream, throttleStream, toUiStreamResponse, withRetry };