@gravito/stream 2.0.2 → 2.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. package/README.md +27 -1
  2. package/dist/BatchConsumer.d.ts +81 -0
  3. package/dist/Consumer.d.ts +215 -0
  4. package/dist/DashboardProvider.d.ts +29 -0
  5. package/dist/Job.d.ts +183 -0
  6. package/dist/OrbitStream.d.ts +151 -0
  7. package/dist/QueueManager.d.ts +321 -0
  8. package/dist/Queueable.d.ts +91 -0
  9. package/dist/Scheduler.d.ts +215 -0
  10. package/dist/StreamEventBackend.d.ts +120 -0
  11. package/dist/SystemEventJob.d.ts +41 -0
  12. package/dist/Worker.d.ts +139 -0
  13. package/dist/benchmarks/PerformanceReporter.d.ts +99 -0
  14. package/dist/consumer/ConcurrencyGate.d.ts +55 -0
  15. package/dist/consumer/ConsumerStrategy.d.ts +41 -0
  16. package/dist/consumer/GroupSequencer.d.ts +57 -0
  17. package/dist/consumer/HeartbeatManager.d.ts +65 -0
  18. package/dist/consumer/JobExecutor.d.ts +61 -0
  19. package/dist/consumer/JobSourceGenerator.d.ts +31 -0
  20. package/dist/consumer/PollingStrategy.d.ts +42 -0
  21. package/dist/consumer/ReactiveStrategy.d.ts +41 -0
  22. package/dist/consumer/StreamingConsumer.d.ts +90 -0
  23. package/dist/consumer/index.d.ts +13 -0
  24. package/dist/consumer/types.d.ts +102 -0
  25. package/dist/drivers/BinaryJobFrame.d.ts +78 -0
  26. package/dist/drivers/BullMQDriver.d.ts +237 -0
  27. package/dist/drivers/DatabaseDriver.d.ts +131 -0
  28. package/dist/drivers/GrpcDriver.d.ts +16 -0
  29. package/dist/drivers/KafkaDriver.d.ts +161 -0
  30. package/dist/drivers/MemoryDriver.d.ts +119 -0
  31. package/dist/drivers/QueueDriver.d.ts +250 -0
  32. package/dist/drivers/RabbitMQDriver.d.ts +140 -0
  33. package/dist/drivers/RedisDriver.d.ts +328 -0
  34. package/dist/drivers/SQSDriver.d.ts +114 -0
  35. package/dist/drivers/kafka/BackpressureController.d.ts +60 -0
  36. package/dist/drivers/kafka/BatchProcessor.d.ts +50 -0
  37. package/dist/drivers/kafka/ConsumerLifecycleManager.d.ts +80 -0
  38. package/dist/drivers/kafka/ErrorCategorizer.d.ts +39 -0
  39. package/dist/drivers/kafka/ErrorRecoveryManager.d.ts +100 -0
  40. package/dist/drivers/kafka/HeartbeatManager.d.ts +57 -0
  41. package/dist/drivers/kafka/KafkaDriver.d.ts +138 -0
  42. package/dist/drivers/kafka/KafkaMetrics.d.ts +88 -0
  43. package/dist/drivers/kafka/KafkaNotifier.d.ts +70 -0
  44. package/dist/drivers/kafka/MessageBuffer.d.ts +71 -0
  45. package/dist/drivers/kafka/OffsetTracker.d.ts +65 -0
  46. package/dist/drivers/kafka/PerformanceMonitor.d.ts +88 -0
  47. package/dist/drivers/kafka/RateLimiter.d.ts +52 -0
  48. package/dist/drivers/kafka/RebalanceHandler.d.ts +104 -0
  49. package/dist/drivers/kafka/RingBuffer.d.ts +63 -0
  50. package/dist/drivers/kafka/index.d.ts +22 -0
  51. package/dist/drivers/kafka/types.d.ts +553 -0
  52. package/dist/drivers/prepareJobForTransport.d.ts +10 -0
  53. package/dist/index.cjs +72 -7826
  54. package/dist/index.cjs.map +9 -0
  55. package/dist/index.d.ts +60 -4378
  56. package/dist/index.js +39 -7797
  57. package/dist/index.js.map +9 -0
  58. package/dist/locks/DistributedLock.d.ts +175 -0
  59. package/dist/persistence/BufferedPersistence.d.ts +130 -0
  60. package/dist/persistence/BunBufferedPersistence.d.ts +173 -0
  61. package/dist/persistence/MySQLPersistence.d.ts +134 -0
  62. package/dist/persistence/SQLitePersistence.d.ts +133 -0
  63. package/dist/serializers/BinarySerializer.d.ts +42 -0
  64. package/dist/serializers/CachedSerializer.d.ts +42 -0
  65. package/dist/serializers/CborNativeSerializer.d.ts +56 -0
  66. package/dist/serializers/ClassNameSerializer.d.ts +58 -0
  67. package/dist/serializers/JobSerializer.d.ts +33 -0
  68. package/dist/serializers/JsonSerializer.d.ts +28 -0
  69. package/dist/serializers/JsonlSerializer.d.ts +90 -0
  70. package/dist/serializers/MessagePackSerializer.d.ts +29 -0
  71. package/dist/types.d.ts +672 -0
  72. package/dist/workers/BinaryWorkerProtocol.d.ts +77 -0
  73. package/dist/workers/BunWorker.d.ts +179 -0
  74. package/dist/workers/SandboxedWorker.d.ts +132 -0
  75. package/dist/workers/WorkerFactory.d.ts +128 -0
  76. package/dist/workers/WorkerPool.d.ts +186 -0
  77. package/dist/workers/bun-job-executor.d.ts +14 -0
  78. package/dist/workers/index.d.ts +13 -0
  79. package/dist/workers/job-executor.d.ts +9 -0
  80. package/package.json +6 -4
  81. package/dist/index.d.cts +0 -4387
@@ -0,0 +1,80 @@
1
+ import { EventEmitter } from 'node:events';
2
+ import type { ConsumerLifecycleState, LifecycleEvent } from './types';
3
+ /**
4
+ * Manages consumer lifecycle state transitions and events.
5
+ *
6
+ * State transitions:
7
+ * - idle → starting → running → stopping → stopped
8
+ * - running → restarting → running
9
+ * - any state → error
10
+ *
11
+ * Emits lifecycle events on state changes for coordination with other components.
12
+ *
13
+ * @public
14
+ */
15
+ export declare class ConsumerLifecycleManager extends EventEmitter {
16
+ private currentState;
17
+ private previousState;
18
+ private lastTransitionTime;
19
+ private stateHistory;
20
+ /**
21
+ * Register a listener for lifecycle events.
22
+ * Event data: { state, previousState, timestamp, error? }
23
+ */
24
+ onStateChange(listener: (event: LifecycleEvent) => void): void;
25
+ /**
26
+ * Start the consumer (transition idle → starting → running).
27
+ */
28
+ start(): Promise<void>;
29
+ /**
30
+ * Restart the consumer (running → restarting → running).
31
+ */
32
+ restart(): Promise<void>;
33
+ /**
34
+ * Stop the consumer (running/starting → stopping → stopped).
35
+ */
36
+ stop(): Promise<void>;
37
+ /**
38
+ * Reset to idle state (for testing/reuse).
39
+ */
40
+ reset(): void;
41
+ /**
42
+ * Get current lifecycle state.
43
+ */
44
+ getState(): ConsumerLifecycleState;
45
+ /**
46
+ * Get previous lifecycle state.
47
+ */
48
+ getPreviousState(): ConsumerLifecycleState;
49
+ /**
50
+ * Get last state transition timestamp.
51
+ */
52
+ getLastTransitionTime(): number;
53
+ /**
54
+ * Get state transition history.
55
+ */
56
+ getStateHistory(): Array<{
57
+ state: ConsumerLifecycleState;
58
+ timestamp: number;
59
+ }>;
60
+ /**
61
+ * Check if consumer is in running state.
62
+ */
63
+ isRunning(): boolean;
64
+ /**
65
+ * Check if consumer is in error state.
66
+ */
67
+ isError(): boolean;
68
+ /**
69
+ * Check if consumer is stopped.
70
+ */
71
+ isStopped(): boolean;
72
+ /**
73
+ * Internal: transition to new state and emit event.
74
+ */
75
+ private transition;
76
+ /**
77
+ * Internal: transition to error state with error details.
78
+ */
79
+ private transitionError;
80
+ }
@@ -0,0 +1,39 @@
1
+ import type { ErrorCategory } from './types';
2
+ /**
3
+ * 根據錯誤訊息與型別分類 Kafka 操作錯誤。
4
+ *
5
+ * 錯誤分類:
6
+ * - `transient`(影響電路斷路器):連線失敗、逾時、網路錯誤
7
+ * - `serialization`(不影響電路):JSON parse 失敗、序列化錯誤
8
+ * - `business_logic`(不影響電路):回調拋出的一般錯誤
9
+ * - `permanent`(不影響電路):其他永久性失敗
10
+ *
11
+ * 使用者應根據錯誤分類決定是否影響電路斷路器狀態。
12
+ * 只有 `transient` 錯誤才應遞增電路的失敗計數。
13
+ *
14
+ * @public
15
+ */
16
+ export declare class ErrorCategorizer {
17
+ /**
18
+ * 將錯誤分類為標準分類之一。
19
+ *
20
+ * 分類規則(優先順序):
21
+ * 1. `transient` - ECONNREFUSED, ETIMEDOUT, ECONNRESET, 網路相關
22
+ * 2. `serialization` - SyntaxError, JSON.parse 相關
23
+ * 3. `business_logic` - 回調拋出的一般 Error(isBusinessLogic 為 true)
24
+ * 4. `permanent` - 其他所有錯誤
25
+ */
26
+ categorize(error: Error, isBusinessLogic?: boolean): ErrorCategory;
27
+ /**
28
+ * 檢查某個分類的錯誤是否應影響電路斷路器。
29
+ *
30
+ * 只有 `transient` 分類應遞增電路的失敗計數。
31
+ * 其他分類(`serialization`, `business_logic`, `permanent`)
32
+ * 應直接跳過電路斷路器邏輯。
33
+ */
34
+ shouldAffectCircuit(category: ErrorCategory): boolean;
35
+ /**
36
+ * 根據錯誤直接判斷是否應影響電路(便利方法)。
37
+ */
38
+ shouldRecordInCircuit(error: Error, isBusinessLogic?: boolean): boolean;
39
+ }
@@ -0,0 +1,100 @@
1
+ import { EventEmitter } from 'node:events';
2
+ import type { ErrorCategory, ErrorRecoveryConfig, ErrorRecoveryState } from './types';
3
+ /**
4
+ * 管理 Kafka 操作的錯誤恢復。
5
+ *
6
+ * 結合電路斷路器模式與指數退避,
7
+ * 在持續失敗時保護系統,並在恢復後自動復原。
8
+ *
9
+ * 電路狀態機:
10
+ * - CLOSED (正常) → OPEN (超過失敗閾值)
11
+ * - OPEN → HALF_OPEN (重置逾時後)
12
+ * - HALF_OPEN → CLOSED (探測成功)
13
+ * - HALF_OPEN → OPEN (探測失敗)
14
+ *
15
+ * 事件:
16
+ * - 'circuit:open' - 電路打開(失敗太多)
17
+ * - 'circuit:half-open' - 允許探測
18
+ * - 'circuit:closed' - 電路恢復
19
+ * - 'recovery:backoff' - 進入退避期
20
+ * - 'recovery:success' - 成功恢復
21
+ *
22
+ * @public
23
+ */
24
+ export declare class ErrorRecoveryManager extends EventEmitter {
25
+ private readonly failureThreshold;
26
+ private readonly resetTimeoutMs;
27
+ private readonly halfOpenMaxRequests;
28
+ private readonly initialBackoffMs;
29
+ private readonly maxBackoffMs;
30
+ private readonly backoffMultiplier;
31
+ private readonly jitter;
32
+ private readonly maxRetries;
33
+ private circuitState;
34
+ private consecutiveFailures;
35
+ private lastFailureTime;
36
+ private totalRecoveries;
37
+ private halfOpenRequests;
38
+ private resetTimer;
39
+ constructor(config?: ErrorRecoveryConfig);
40
+ /**
41
+ * 記錄操作成功。
42
+ *
43
+ * 重置連續失敗計數,如果在 HALF_OPEN 狀態則轉換到 CLOSED。
44
+ */
45
+ recordSuccess(): void;
46
+ /**
47
+ * 記錄操作失敗。
48
+ *
49
+ * 增加失敗計數,在 CLOSED 狀態超過閾值時打開電路,
50
+ * 在 HALF_OPEN 狀態失敗時重新打開電路。
51
+ *
52
+ * @param error - 失敗的錯誤物件
53
+ * @param category - 錯誤分類(可選)。如果提供且為非 transient 類別,
54
+ * 則不影響電路斷路器狀態
55
+ */
56
+ recordFailure(error: Error, category?: ErrorCategory): void;
57
+ /**
58
+ * 檢查是否可以繼續操作。
59
+ *
60
+ * - CLOSED: 總是允許
61
+ * - OPEN: 不允許
62
+ * - HALF_OPEN: 限制探測請求數量
63
+ */
64
+ canProceed(): boolean;
65
+ /**
66
+ * 計算當前退避延遲。
67
+ *
68
+ * 使用指數退避: delay = min(initial * multiplier^attempt, maxBackoff)
69
+ * 抖動: delay * (0.5 + random * 0.5)
70
+ */
71
+ getBackoffDelay(): number;
72
+ /**
73
+ * 等待退避期間。
74
+ */
75
+ waitForBackoff(): Promise<void>;
76
+ /**
77
+ * 返回當前錯誤恢復狀態快照(不可變)。
78
+ */
79
+ getState(): ErrorRecoveryState;
80
+ /**
81
+ * 強制重置為 CLOSED 狀態。
82
+ */
83
+ reset(): void;
84
+ /**
85
+ * 清理所有計時器。
86
+ */
87
+ destroy(): void;
88
+ /**
89
+ * 檢查是否已達最大重試次數。
90
+ */
91
+ hasExceededMaxRetries(): boolean;
92
+ /**
93
+ * 內部:執行狀態轉換並發射事件。
94
+ */
95
+ private transitionTo;
96
+ /**
97
+ * 內部:排程從 OPEN 到 HALF_OPEN 的轉換。
98
+ */
99
+ private scheduleHalfOpen;
100
+ }
@@ -0,0 +1,57 @@
1
+ import { EventEmitter } from 'node:events';
2
+ import type { HeartbeatConfig, HeartbeatStatus } from './types';
3
+ /**
4
+ * 管理 Kafka 消費者的心跳偵測。
5
+ *
6
+ * 定期檢查消費者是否活躍,如果連續遺漏心跳達到閾值,
7
+ * 則發出 'stale' 事件通知上層元件。
8
+ *
9
+ * 事件:
10
+ * - 'heartbeat': 每次心跳檢查時發出
11
+ * - 'stale': 當遺漏計數達到 maxMissed 時發出
12
+ *
13
+ * @public
14
+ */
15
+ export declare class HeartbeatManager extends EventEmitter {
16
+ private readonly interval;
17
+ private readonly sessionTimeout;
18
+ private readonly maxMissed;
19
+ private consumerId;
20
+ private queues;
21
+ private lastHeartbeat;
22
+ private missedCount;
23
+ private startTime;
24
+ private running;
25
+ private checkTimer;
26
+ constructor(config?: HeartbeatConfig);
27
+ /**
28
+ * 開始心跳迴圈。
29
+ *
30
+ * 冪等操作:如果已經在執行中,不會重複啟動。
31
+ */
32
+ start(consumerId: string, queues: string[]): Promise<void>;
33
+ /**
34
+ * 停止心跳迴圈並清理計時器。
35
+ *
36
+ * 冪等操作:重複呼叫不會出錯。
37
+ */
38
+ stop(): Promise<void>;
39
+ /**
40
+ * 手動心跳(重置遺漏計數)。
41
+ *
42
+ * 由外部元件在成功處理訊息後呼叫。
43
+ */
44
+ beat(): void;
45
+ /**
46
+ * 返回當前心跳狀態快照。
47
+ */
48
+ getStatus(): HeartbeatStatus;
49
+ /**
50
+ * 註冊心跳狀態變更監聽器。
51
+ */
52
+ onStateChange(listener: (status: HeartbeatStatus) => void): void;
53
+ /**
54
+ * 內部:定期檢查心跳狀態。
55
+ */
56
+ private check;
57
+ }
@@ -0,0 +1,138 @@
1
+ import type { JobPushOptions, QueueStats, SerializedJob, TopicOptions } from '../../types';
2
+ import type { QueueDriver } from '../QueueDriver';
3
+ import type { KafkaDriverFullConfig, KafkaDriverMetrics, PerformanceSnapshot, SerializationErrorRecord, SubscribeOptions } from './types';
4
+ /**
5
+ * Kafka-native queue driver implementing the QueueDriver interface.
6
+ *
7
+ * Bridges Kafka's push model with QueueDriver's pull model using:
8
+ * - MessageBuffer (FIFO buffering from Kafka consumer)
9
+ * - OffsetTracker (at-least-once semantic with continuous-ack)
10
+ * - KafkaNotifier (ReactiveStrategy integration)
11
+ *
12
+ * @public
13
+ */
14
+ export declare class KafkaDriver implements QueueDriver {
15
+ private producer;
16
+ private consumer;
17
+ private admin;
18
+ private readonly buffer;
19
+ private readonly offsetTracker;
20
+ private readonly notifier;
21
+ private readonly lifecycleManager;
22
+ private readonly backpressure;
23
+ private readonly heartbeatManager;
24
+ private readonly metrics;
25
+ private readonly errorRecovery;
26
+ private readonly rebalanceHandler;
27
+ private readonly batchProcessor;
28
+ private readonly performanceMonitor;
29
+ private readonly errorCategorizer;
30
+ private readonly subscribedTopics;
31
+ private readonly knownQueues;
32
+ private readonly dlqBuffer;
33
+ private readonly messageIdToMeta;
34
+ private consumerRunning;
35
+ private offsetCommitTimer;
36
+ private subscriptionCallbacks;
37
+ private readonly topicToMessageIds;
38
+ private readonly serializationCache;
39
+ private readonly serializationDlq;
40
+ private readonly partitionSerializationErrors;
41
+ private readonly callbackFailureCounts;
42
+ private dlqRetryTimer;
43
+ private readonly config;
44
+ constructor(config: KafkaDriverFullConfig);
45
+ private ensureProducer;
46
+ push(queue: string, job: SerializedJob, options?: JobPushOptions): Promise<void>;
47
+ /**
48
+ * Serialize a job with WeakMap caching.
49
+ * Avoids re-serializing the same object reference (e.g. retries).
50
+ */
51
+ private serializeJob;
52
+ pushMany(queue: string, jobs: SerializedJob[]): Promise<void>;
53
+ private handleIncomingMessage;
54
+ private ensureConsumer;
55
+ private ensureConsumerForTopic;
56
+ private restartConsumer;
57
+ private startConsumerLoop;
58
+ pop(queue: string): Promise<SerializedJob | null>;
59
+ popBlocking(queues: string | string[], timeout: number): Promise<SerializedJob | null>;
60
+ popMany(queue: string, count: number): Promise<SerializedJob[]>;
61
+ subscribe(queue: string, callback: (job: SerializedJob) => Promise<void>, options?: SubscribeOptions): Promise<void>;
62
+ /**
63
+ * Internal method to run the subscription message processing loop.
64
+ */
65
+ private runSubscription;
66
+ complete(_queue: string, job: SerializedJob): Promise<void>;
67
+ acknowledge(messageId: string): Promise<void>;
68
+ fail(queue: string, job: SerializedJob): Promise<void>;
69
+ private startOffsetCommitLoop;
70
+ private commitOffsets;
71
+ private ensureAdmin;
72
+ createTopic(topic: string, options?: TopicOptions): Promise<void>;
73
+ deleteTopic(topic: string): Promise<void>;
74
+ getQueues(): Promise<string[]>;
75
+ size(queue: string): Promise<number>;
76
+ stats(queue: string): Promise<QueueStats>;
77
+ /**
78
+ * Get full driver metrics snapshot.
79
+ */
80
+ getMetrics(): KafkaDriverMetrics;
81
+ /**
82
+ * Get aggregated performance snapshot.
83
+ */
84
+ getPerformanceSnapshot(): PerformanceSnapshot;
85
+ /**
86
+ * Get performance history.
87
+ */
88
+ getPerformanceHistory(count?: number): PerformanceSnapshot[];
89
+ /**
90
+ * Generate a performance report string.
91
+ */
92
+ getPerformanceReport(): string;
93
+ /**
94
+ * 取得序列化錯誤 DLQ 緩衝區的不可變副本。
95
+ */
96
+ getSerializationDlq(): readonly SerializationErrorRecord[];
97
+ /**
98
+ * 取得每個分區的序列化錯誤計數。
99
+ */
100
+ getPartitionSerializationErrors(): ReadonlyMap<string, number>;
101
+ /**
102
+ * 取得每個佇列的回呼連續失敗計數。
103
+ */
104
+ getCallbackFailureCounts(): ReadonlyMap<string, number>;
105
+ /**
106
+ * 取得 DLQ 緩衝區統計資訊。
107
+ */
108
+ getDlqStats(): {
109
+ totalBuffered: number;
110
+ perTopic: Record<string, number>;
111
+ retryEnabled: boolean;
112
+ retryIntervalMs: number;
113
+ };
114
+ /**
115
+ * 啟動 DLQ 重試迴圈,定期嘗試將緩衝區中的失敗訊息重新發送至 Kafka。
116
+ */
117
+ startDlqRetryLoop(): void;
118
+ /**
119
+ * 嘗試將所有緩衝的 DLQ 訊息重新發送至 Kafka。
120
+ * 成功發送的訊息會從緩衝區中移除。
121
+ */
122
+ flushDlqBuffer(): Promise<{
123
+ flushed: number;
124
+ remaining: number;
125
+ }>;
126
+ clear(queue: string): Promise<void>;
127
+ /**
128
+ * Remove message tracking from both primary and reverse indexes.
129
+ */
130
+ private removeMessageTracking;
131
+ enableNotifications(): Promise<void>;
132
+ disableNotifications(): Promise<void>;
133
+ onNotify(queues: string | string[], callback: (queue: string) => Promise<void>): Promise<void>;
134
+ getFailed(queue: string, start?: number, end?: number): Promise<SerializedJob[]>;
135
+ clearFailed(queue: string): Promise<void>;
136
+ retryFailed(queue: string, count?: number): Promise<number>;
137
+ disconnect(): Promise<void>;
138
+ }
@@ -0,0 +1,88 @@
1
+ import type { KafkaDriverMetrics, MetricsConfig } from './types';
2
+ /**
3
+ * Kafka driver metrics collector.
4
+ *
5
+ * Tracks throughput, latency, errors, buffer utilization, and rate limit statistics.
6
+ * Uses a circular buffer for latency histogram to prevent unbounded memory growth.
7
+ *
8
+ * Performance optimizations (Phase 6E-2):
9
+ * - Bucket counter throughput: O(buckets) snapshot instead of O(messages) timestamp filter
10
+ * - No per-message timestamp storage: constant memory for throughput tracking
11
+ * - recordMessage() hot path: O(1) bucket increment + circular buffer write
12
+ *
13
+ * @public
14
+ */
15
+ export declare class KafkaMetrics {
16
+ private readonly enabled;
17
+ private readonly histogramSize;
18
+ private readonly latencyBuffer;
19
+ private latencyIndex;
20
+ private latencyCount;
21
+ private readonly queueBuckets;
22
+ private readonly queueCounts;
23
+ private readonly errorCounts;
24
+ private readonly rateLimitCounts;
25
+ private rateLimitTotalAllowed;
26
+ private rateLimitTotalDenied;
27
+ private readonly bufferSizes;
28
+ private bufferCapacity;
29
+ private totalProcessed;
30
+ private totalFailed;
31
+ private inFlight;
32
+ private readonly lagMap;
33
+ constructor(config?: MetricsConfig);
34
+ /**
35
+ * 記錄已處理訊息(含延遲)。
36
+ * Hot path: O(1) 桶遞增 + 圓形緩衝區寫入。
37
+ */
38
+ recordMessage(queue: string, latencyMs: number): void;
39
+ /**
40
+ * 記錄錯誤(按類型追蹤)。
41
+ */
42
+ recordError(type: 'serialization' | 'callback' | 'connection' | 'timeout'): void;
43
+ /**
44
+ * 記錄速率限制決定。
45
+ */
46
+ recordRateLimitHit(queue: string, allowed: boolean): void;
47
+ /**
48
+ * 更新緩衝區大小(由外部呼叫以追蹤利用率)。
49
+ */
50
+ updateBufferSize(queue: string, size: number, capacity: number): void;
51
+ /**
52
+ * 更新 consumer lag。
53
+ */
54
+ updateLag(topicPartition: string, lag: number): void;
55
+ /**
56
+ * 更新 in-flight 計數。
57
+ */
58
+ setInFlight(count: number): void;
59
+ /**
60
+ * 計算並返回當前指標快照(不可變)。
61
+ * 吞吐量計算: O(queues * BUCKET_COUNT) 取代 O(total_messages)。
62
+ */
63
+ getSnapshot(): KafkaDriverMetrics;
64
+ /**
65
+ * 清除所有計數器。
66
+ */
67
+ reset(): void;
68
+ /**
69
+ * 從圓形緩衝區取出有效延遲資料。
70
+ */
71
+ private getLatencyData;
72
+ /**
73
+ * 計算百分位數(輸入必須已排序)。
74
+ */
75
+ private calculatePercentile;
76
+ /**
77
+ * 遞增當前時間桶的計數。O(1) 操作。
78
+ *
79
+ * 桶結構: 固定 BUCKET_COUNT 個桶,每桶代表 BUCKET_DURATION_MS 的時間段。
80
+ * 當時間推進到新桶時,清零已過期的桶。
81
+ */
82
+ private incrementBucket;
83
+ /**
84
+ * 計算每佇列吞吐量(messages/second)。
85
+ * O(queues * BUCKET_COUNT) - 固定成本,不隨訊息數增長。
86
+ */
87
+ private calculateThroughput;
88
+ }
@@ -0,0 +1,70 @@
1
+ import { EventEmitter } from 'node:events';
2
+ /**
3
+ * Kafka 通知橋接器。
4
+ *
5
+ * 將 Kafka Consumer 的訊息到達事件轉換為
6
+ * QueueDriver.onNotify() 需要的通知回呼。
7
+ * 用於整合 ReactiveStrategy。
8
+ *
9
+ * @public
10
+ */
11
+ export declare class KafkaNotifier extends EventEmitter {
12
+ private readonly callbacks;
13
+ private enabled;
14
+ /**
15
+ * 啟用通知。
16
+ *
17
+ * 允許 notify() 呼叫觸發已註冊的回呼。
18
+ */
19
+ enable(): void;
20
+ /**
21
+ * 停用通知。
22
+ *
23
+ * 呼叫 notify() 時將不觸發任何回呼。
24
+ */
25
+ disable(): void;
26
+ /**
27
+ * 是否已啟用。
28
+ *
29
+ * @returns 是否已啟用
30
+ */
31
+ isEnabled(): boolean;
32
+ /**
33
+ * 註冊通知回呼。
34
+ *
35
+ * @param queues 佇列名稱陣列
36
+ * @param callback 當訊息到達時呼叫的回呼函式
37
+ */
38
+ registerCallback(queues: string[], callback: (queue: string) => Promise<void>): void;
39
+ /**
40
+ * 註冊訊息到達事件監聽器。
41
+ *
42
+ * @param callback 當訊息到達時呼叫的回呼函式
43
+ */
44
+ onMessageArrived(callback: (event: {
45
+ queue: string;
46
+ count: number;
47
+ }) => void): void;
48
+ /**
49
+ * 觸發通知(由 Consumer 的 eachMessage 呼叫)。
50
+ *
51
+ * 執行已註冊的所有回呼,並發出 'notify' 事件。
52
+ * 如果回呼拋出錯誤,不會中斷其他回呼的執行。
53
+ *
54
+ * @param queue 佇列名稱
55
+ */
56
+ notify(queue: string): void;
57
+ /**
58
+ * 觸發帶有數量的通知。
59
+ *
60
+ * @param queue 佇列名稱
61
+ * @param count 訊息數量
62
+ */
63
+ notifyWithCount(queue: string, count: number): void;
64
+ /**
65
+ * 清除所有已註冊的回呼。
66
+ *
67
+ * 用於優雅關閉時調用。
68
+ */
69
+ clearCallbacks(): void;
70
+ }
@@ -0,0 +1,71 @@
1
+ import type { BufferedMessage } from './types';
2
+ /**
3
+ * Kafka 訊息緩衝區。
4
+ *
5
+ * 橋接 Kafka 的 push 模型與 QueueDriver 的 pull(pop)模型。
6
+ * 每個 topic 維護獨立的 FIFO 緩衝區,支援容量限制和超時等待。
7
+ *
8
+ * @public
9
+ */
10
+ export declare class MessageBuffer {
11
+ private readonly maxSize;
12
+ private readonly buffers;
13
+ private readonly waiters;
14
+ constructor(maxSize?: number);
15
+ /**
16
+ * 推入一條訊息到指定 topic 的緩衝區。
17
+ *
18
+ * @param topic 主題名稱
19
+ * @param message 要推入的訊息
20
+ * @returns 是否成功推入(false 表示緩衝區已滿)
21
+ */
22
+ enqueue(topic: string, message: BufferedMessage): boolean;
23
+ /**
24
+ * 從指定 topic 取出一條訊息(FIFO),立即返回或 null。
25
+ *
26
+ * @param topic 主題名稱
27
+ * @returns 取出的訊息或 null
28
+ */
29
+ dequeue(topic: string): BufferedMessage | null;
30
+ /**
31
+ * 等待直到有訊息或超時。
32
+ *
33
+ * @param topic 主題名稱
34
+ * @param timeoutMs 超時時間(毫秒)
35
+ * @returns 取出的訊息或 null
36
+ */
37
+ dequeueBlocking(topic: string, timeoutMs: number): Promise<BufferedMessage | null>;
38
+ /**
39
+ * 批次取出。
40
+ *
41
+ * @param topic 主題名稱
42
+ * @param count 最多取出的訊息數
43
+ * @returns 取出的訊息陣列(不足時回傳可用數量)
44
+ */
45
+ dequeueMany(topic: string, count: number): BufferedMessage[];
46
+ /**
47
+ * 取得指定 topic 的緩衝數量。
48
+ *
49
+ * @param topic 主題名稱
50
+ * @returns 緩衝區中的訊息數
51
+ */
52
+ size(topic: string): number;
53
+ /**
54
+ * 清空指定 topic 的緩衝區。
55
+ *
56
+ * @param topic 主題名稱
57
+ */
58
+ clear(topic: string): void;
59
+ /**
60
+ * 清空所有緩衝區並取消所有等待者。
61
+ *
62
+ * 用於優雅關閉時調用。
63
+ */
64
+ destroy(): void;
65
+ /**
66
+ * 喚醒指定 topic 的第一個等待者。
67
+ *
68
+ * @private
69
+ */
70
+ private notifyWaiters;
71
+ }