@gravito/stream 2.0.1 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +127 -285
- package/README.zh-TW.md +146 -13
- package/dist/BatchConsumer.d.ts +81 -0
- package/dist/Consumer.d.ts +215 -0
- package/dist/DashboardProvider.d.ts +20 -0
- package/dist/Job.d.ts +183 -0
- package/dist/OrbitStream.d.ts +151 -0
- package/dist/QueueManager.d.ts +319 -0
- package/dist/Queueable.d.ts +91 -0
- package/dist/Scheduler.d.ts +214 -0
- package/dist/StreamEventBackend.d.ts +114 -0
- package/dist/SystemEventJob.d.ts +33 -0
- package/dist/Worker.d.ts +139 -0
- package/dist/benchmarks/PerformanceReporter.d.ts +99 -0
- package/dist/consumer/ConcurrencyGate.d.ts +55 -0
- package/dist/consumer/ConsumerStrategy.d.ts +41 -0
- package/dist/consumer/GroupSequencer.d.ts +57 -0
- package/dist/consumer/HeartbeatManager.d.ts +65 -0
- package/dist/consumer/JobExecutor.d.ts +61 -0
- package/dist/consumer/JobSourceGenerator.d.ts +31 -0
- package/dist/consumer/PollingStrategy.d.ts +42 -0
- package/dist/consumer/ReactiveStrategy.d.ts +41 -0
- package/dist/consumer/StreamingConsumer.d.ts +88 -0
- package/dist/consumer/index.d.ts +13 -0
- package/dist/consumer/types.d.ts +102 -0
- package/dist/drivers/BinaryJobFrame.d.ts +78 -0
- package/dist/drivers/BullMQDriver.d.ts +186 -0
- package/dist/drivers/DatabaseDriver.d.ts +131 -0
- package/dist/drivers/GrpcDriver.d.ts +16 -0
- package/dist/drivers/KafkaDriver.d.ts +148 -0
- package/dist/drivers/MemoryDriver.d.ts +108 -0
- package/dist/drivers/QueueDriver.d.ts +250 -0
- package/dist/drivers/RabbitMQDriver.d.ts +102 -0
- package/dist/drivers/RedisDriver.d.ts +294 -0
- package/dist/drivers/SQSDriver.d.ts +111 -0
- package/dist/drivers/kafka/BackpressureController.d.ts +60 -0
- package/dist/drivers/kafka/BatchProcessor.d.ts +50 -0
- package/dist/drivers/kafka/ConsumerLifecycleManager.d.ts +80 -0
- package/dist/drivers/kafka/ErrorCategorizer.d.ts +39 -0
- package/dist/drivers/kafka/ErrorRecoveryManager.d.ts +100 -0
- package/dist/drivers/kafka/HeartbeatManager.d.ts +57 -0
- package/dist/drivers/kafka/KafkaDriver.d.ts +138 -0
- package/dist/drivers/kafka/KafkaMetrics.d.ts +88 -0
- package/dist/drivers/kafka/KafkaNotifier.d.ts +54 -0
- package/dist/drivers/kafka/MessageBuffer.d.ts +71 -0
- package/dist/drivers/kafka/OffsetTracker.d.ts +63 -0
- package/dist/drivers/kafka/PerformanceMonitor.d.ts +88 -0
- package/dist/drivers/kafka/RateLimiter.d.ts +52 -0
- package/dist/drivers/kafka/RebalanceHandler.d.ts +104 -0
- package/dist/drivers/kafka/RingBuffer.d.ts +63 -0
- package/dist/drivers/kafka/index.d.ts +22 -0
- package/dist/drivers/kafka/types.d.ts +553 -0
- package/dist/drivers/prepareJobForTransport.d.ts +10 -0
- package/dist/index.cjs +6274 -3777
- package/dist/index.cjs.map +71 -0
- package/dist/index.d.ts +60 -2233
- package/dist/index.js +6955 -4446
- package/dist/index.js.map +71 -0
- package/dist/locks/DistributedLock.d.ts +175 -0
- package/dist/persistence/BufferedPersistence.d.ts +130 -0
- package/dist/persistence/BunBufferedPersistence.d.ts +173 -0
- package/dist/persistence/MySQLPersistence.d.ts +134 -0
- package/dist/persistence/SQLitePersistence.d.ts +133 -0
- package/dist/serializers/BinarySerializer.d.ts +42 -0
- package/dist/serializers/CachedSerializer.d.ts +38 -0
- package/dist/serializers/CborNativeSerializer.d.ts +56 -0
- package/dist/serializers/ClassNameSerializer.d.ts +58 -0
- package/dist/serializers/JobSerializer.d.ts +33 -0
- package/dist/serializers/JsonSerializer.d.ts +28 -0
- package/dist/serializers/JsonlSerializer.d.ts +90 -0
- package/dist/serializers/MessagePackSerializer.d.ts +29 -0
- package/dist/types.d.ts +653 -0
- package/dist/workers/BinaryWorkerProtocol.d.ts +77 -0
- package/dist/workers/BunWorker.d.ts +179 -0
- package/dist/workers/SandboxedWorker.d.ts +132 -0
- package/dist/workers/WorkerFactory.d.ts +128 -0
- package/dist/workers/WorkerPool.d.ts +186 -0
- package/dist/workers/bun-job-executor.d.ts +14 -0
- package/dist/workers/index.d.ts +13 -0
- package/dist/workers/job-executor.d.ts +9 -0
- package/package.json +13 -6
- package/proto/queue.proto +101 -0
- package/dist/index.d.cts +0 -2242
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import { EventEmitter } from 'node:events';
|
|
2
|
+
import type { ErrorCategory, ErrorRecoveryConfig, ErrorRecoveryState } from './types';
|
|
3
|
+
/**
|
|
4
|
+
* 管理 Kafka 操作的錯誤恢復。
|
|
5
|
+
*
|
|
6
|
+
* 結合電路斷路器模式與指數退避,
|
|
7
|
+
* 在持續失敗時保護系統,並在恢復後自動復原。
|
|
8
|
+
*
|
|
9
|
+
* 電路狀態機:
|
|
10
|
+
* - CLOSED (正常) → OPEN (超過失敗閾值)
|
|
11
|
+
* - OPEN → HALF_OPEN (重置逾時後)
|
|
12
|
+
* - HALF_OPEN → CLOSED (探測成功)
|
|
13
|
+
* - HALF_OPEN → OPEN (探測失敗)
|
|
14
|
+
*
|
|
15
|
+
* 事件:
|
|
16
|
+
* - 'circuit:open' - 電路打開(失敗太多)
|
|
17
|
+
* - 'circuit:half-open' - 允許探測
|
|
18
|
+
* - 'circuit:closed' - 電路恢復
|
|
19
|
+
* - 'recovery:backoff' - 進入退避期
|
|
20
|
+
* - 'recovery:success' - 成功恢復
|
|
21
|
+
*
|
|
22
|
+
* @public
|
|
23
|
+
*/
|
|
24
|
+
export declare class ErrorRecoveryManager extends EventEmitter {
|
|
25
|
+
private readonly failureThreshold;
|
|
26
|
+
private readonly resetTimeoutMs;
|
|
27
|
+
private readonly halfOpenMaxRequests;
|
|
28
|
+
private readonly initialBackoffMs;
|
|
29
|
+
private readonly maxBackoffMs;
|
|
30
|
+
private readonly backoffMultiplier;
|
|
31
|
+
private readonly jitter;
|
|
32
|
+
private readonly maxRetries;
|
|
33
|
+
private circuitState;
|
|
34
|
+
private consecutiveFailures;
|
|
35
|
+
private lastFailureTime;
|
|
36
|
+
private totalRecoveries;
|
|
37
|
+
private halfOpenRequests;
|
|
38
|
+
private resetTimer;
|
|
39
|
+
constructor(config?: ErrorRecoveryConfig);
|
|
40
|
+
/**
|
|
41
|
+
* 記錄操作成功。
|
|
42
|
+
*
|
|
43
|
+
* 重置連續失敗計數,如果在 HALF_OPEN 狀態則轉換到 CLOSED。
|
|
44
|
+
*/
|
|
45
|
+
recordSuccess(): void;
|
|
46
|
+
/**
|
|
47
|
+
* 記錄操作失敗。
|
|
48
|
+
*
|
|
49
|
+
* 增加失敗計數,在 CLOSED 狀態超過閾值時打開電路,
|
|
50
|
+
* 在 HALF_OPEN 狀態失敗時重新打開電路。
|
|
51
|
+
*
|
|
52
|
+
* @param error - 失敗的錯誤物件
|
|
53
|
+
* @param category - 錯誤分類(可選)。如果提供且為非 transient 類別,
|
|
54
|
+
* 則不影響電路斷路器狀態
|
|
55
|
+
*/
|
|
56
|
+
recordFailure(error: Error, category?: ErrorCategory): void;
|
|
57
|
+
/**
|
|
58
|
+
* 檢查是否可以繼續操作。
|
|
59
|
+
*
|
|
60
|
+
* - CLOSED: 總是允許
|
|
61
|
+
* - OPEN: 不允許
|
|
62
|
+
* - HALF_OPEN: 限制探測請求數量
|
|
63
|
+
*/
|
|
64
|
+
canProceed(): boolean;
|
|
65
|
+
/**
|
|
66
|
+
* 計算當前退避延遲。
|
|
67
|
+
*
|
|
68
|
+
* 使用指數退避: delay = min(initial * multiplier^attempt, maxBackoff)
|
|
69
|
+
* 抖動: delay * (0.5 + random * 0.5)
|
|
70
|
+
*/
|
|
71
|
+
getBackoffDelay(): number;
|
|
72
|
+
/**
|
|
73
|
+
* 等待退避期間。
|
|
74
|
+
*/
|
|
75
|
+
waitForBackoff(): Promise<void>;
|
|
76
|
+
/**
|
|
77
|
+
* 返回當前錯誤恢復狀態快照(不可變)。
|
|
78
|
+
*/
|
|
79
|
+
getState(): ErrorRecoveryState;
|
|
80
|
+
/**
|
|
81
|
+
* 強制重置為 CLOSED 狀態。
|
|
82
|
+
*/
|
|
83
|
+
reset(): void;
|
|
84
|
+
/**
|
|
85
|
+
* 清理所有計時器。
|
|
86
|
+
*/
|
|
87
|
+
destroy(): void;
|
|
88
|
+
/**
|
|
89
|
+
* 檢查是否已達最大重試次數。
|
|
90
|
+
*/
|
|
91
|
+
hasExceededMaxRetries(): boolean;
|
|
92
|
+
/**
|
|
93
|
+
* 內部:執行狀態轉換並發射事件。
|
|
94
|
+
*/
|
|
95
|
+
private transitionTo;
|
|
96
|
+
/**
|
|
97
|
+
* 內部:排程從 OPEN 到 HALF_OPEN 的轉換。
|
|
98
|
+
*/
|
|
99
|
+
private scheduleHalfOpen;
|
|
100
|
+
}
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import { EventEmitter } from 'node:events';
|
|
2
|
+
import type { HeartbeatConfig, HeartbeatStatus } from './types';
|
|
3
|
+
/**
|
|
4
|
+
* 管理 Kafka 消費者的心跳偵測。
|
|
5
|
+
*
|
|
6
|
+
* 定期檢查消費者是否活躍,如果連續遺漏心跳達到閾值,
|
|
7
|
+
* 則發出 'stale' 事件通知上層元件。
|
|
8
|
+
*
|
|
9
|
+
* 事件:
|
|
10
|
+
* - 'heartbeat': 每次心跳檢查時發出
|
|
11
|
+
* - 'stale': 當遺漏計數達到 maxMissed 時發出
|
|
12
|
+
*
|
|
13
|
+
* @public
|
|
14
|
+
*/
|
|
15
|
+
export declare class HeartbeatManager extends EventEmitter {
|
|
16
|
+
private readonly interval;
|
|
17
|
+
private readonly sessionTimeout;
|
|
18
|
+
private readonly maxMissed;
|
|
19
|
+
private consumerId;
|
|
20
|
+
private queues;
|
|
21
|
+
private lastHeartbeat;
|
|
22
|
+
private missedCount;
|
|
23
|
+
private startTime;
|
|
24
|
+
private running;
|
|
25
|
+
private checkTimer;
|
|
26
|
+
constructor(config?: HeartbeatConfig);
|
|
27
|
+
/**
|
|
28
|
+
* 開始心跳迴圈。
|
|
29
|
+
*
|
|
30
|
+
* 冪等操作:如果已經在執行中,不會重複啟動。
|
|
31
|
+
*/
|
|
32
|
+
start(consumerId: string, queues: string[]): Promise<void>;
|
|
33
|
+
/**
|
|
34
|
+
* 停止心跳迴圈並清理計時器。
|
|
35
|
+
*
|
|
36
|
+
* 冪等操作:重複呼叫不會出錯。
|
|
37
|
+
*/
|
|
38
|
+
stop(): Promise<void>;
|
|
39
|
+
/**
|
|
40
|
+
* 手動心跳(重置遺漏計數)。
|
|
41
|
+
*
|
|
42
|
+
* 由外部元件在成功處理訊息後呼叫。
|
|
43
|
+
*/
|
|
44
|
+
beat(): void;
|
|
45
|
+
/**
|
|
46
|
+
* 返回當前心跳狀態快照。
|
|
47
|
+
*/
|
|
48
|
+
getStatus(): HeartbeatStatus;
|
|
49
|
+
/**
|
|
50
|
+
* 註冊心跳狀態變更監聽器。
|
|
51
|
+
*/
|
|
52
|
+
onStateChange(listener: (status: HeartbeatStatus) => void): void;
|
|
53
|
+
/**
|
|
54
|
+
* 內部:定期檢查心跳狀態。
|
|
55
|
+
*/
|
|
56
|
+
private check;
|
|
57
|
+
}
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
import type { JobPushOptions, QueueStats, SerializedJob, TopicOptions } from '../../types';
|
|
2
|
+
import type { QueueDriver } from '../QueueDriver';
|
|
3
|
+
import type { KafkaDriverFullConfig, KafkaDriverMetrics, PerformanceSnapshot, SerializationErrorRecord, SubscribeOptions } from './types';
|
|
4
|
+
/**
|
|
5
|
+
* Kafka-native queue driver implementing the QueueDriver interface.
|
|
6
|
+
*
|
|
7
|
+
* Bridges Kafka's push model with QueueDriver's pull model using:
|
|
8
|
+
* - MessageBuffer (FIFO buffering from Kafka consumer)
|
|
9
|
+
* - OffsetTracker (at-least-once semantic with continuous-ack)
|
|
10
|
+
* - KafkaNotifier (ReactiveStrategy integration)
|
|
11
|
+
*
|
|
12
|
+
* @public
|
|
13
|
+
*/
|
|
14
|
+
export declare class KafkaDriver implements QueueDriver {
|
|
15
|
+
private producer;
|
|
16
|
+
private consumer;
|
|
17
|
+
private admin;
|
|
18
|
+
private readonly buffer;
|
|
19
|
+
private readonly offsetTracker;
|
|
20
|
+
private readonly notifier;
|
|
21
|
+
private readonly lifecycleManager;
|
|
22
|
+
private readonly backpressure;
|
|
23
|
+
private readonly heartbeatManager;
|
|
24
|
+
private readonly metrics;
|
|
25
|
+
private readonly errorRecovery;
|
|
26
|
+
private readonly rebalanceHandler;
|
|
27
|
+
private readonly batchProcessor;
|
|
28
|
+
private readonly performanceMonitor;
|
|
29
|
+
private readonly errorCategorizer;
|
|
30
|
+
private readonly subscribedTopics;
|
|
31
|
+
private readonly knownQueues;
|
|
32
|
+
private readonly dlqBuffer;
|
|
33
|
+
private readonly messageIdToMeta;
|
|
34
|
+
private consumerRunning;
|
|
35
|
+
private offsetCommitTimer;
|
|
36
|
+
private subscriptionCallbacks;
|
|
37
|
+
private readonly topicToMessageIds;
|
|
38
|
+
private readonly serializationCache;
|
|
39
|
+
private readonly serializationDlq;
|
|
40
|
+
private readonly partitionSerializationErrors;
|
|
41
|
+
private readonly callbackFailureCounts;
|
|
42
|
+
private dlqRetryTimer;
|
|
43
|
+
private readonly config;
|
|
44
|
+
constructor(config: KafkaDriverFullConfig);
|
|
45
|
+
private ensureProducer;
|
|
46
|
+
push(queue: string, job: SerializedJob, options?: JobPushOptions): Promise<void>;
|
|
47
|
+
/**
|
|
48
|
+
* Serialize a job with WeakMap caching.
|
|
49
|
+
* Avoids re-serializing the same object reference (e.g. retries).
|
|
50
|
+
*/
|
|
51
|
+
private serializeJob;
|
|
52
|
+
pushMany(queue: string, jobs: SerializedJob[]): Promise<void>;
|
|
53
|
+
private handleIncomingMessage;
|
|
54
|
+
private ensureConsumer;
|
|
55
|
+
private ensureConsumerForTopic;
|
|
56
|
+
private restartConsumer;
|
|
57
|
+
private startConsumerLoop;
|
|
58
|
+
pop(queue: string): Promise<SerializedJob | null>;
|
|
59
|
+
popBlocking(queues: string | string[], timeout: number): Promise<SerializedJob | null>;
|
|
60
|
+
popMany(queue: string, count: number): Promise<SerializedJob[]>;
|
|
61
|
+
subscribe(queue: string, callback: (job: SerializedJob) => Promise<void>, options?: SubscribeOptions): Promise<void>;
|
|
62
|
+
/**
|
|
63
|
+
* Internal method to run the subscription message processing loop.
|
|
64
|
+
*/
|
|
65
|
+
private runSubscription;
|
|
66
|
+
complete(_queue: string, job: SerializedJob): Promise<void>;
|
|
67
|
+
acknowledge(messageId: string): Promise<void>;
|
|
68
|
+
fail(queue: string, job: SerializedJob): Promise<void>;
|
|
69
|
+
private startOffsetCommitLoop;
|
|
70
|
+
private commitOffsets;
|
|
71
|
+
private ensureAdmin;
|
|
72
|
+
createTopic(topic: string, options?: TopicOptions): Promise<void>;
|
|
73
|
+
deleteTopic(topic: string): Promise<void>;
|
|
74
|
+
getQueues(): Promise<string[]>;
|
|
75
|
+
size(queue: string): Promise<number>;
|
|
76
|
+
stats(queue: string): Promise<QueueStats>;
|
|
77
|
+
/**
|
|
78
|
+
* Get full driver metrics snapshot.
|
|
79
|
+
*/
|
|
80
|
+
getMetrics(): KafkaDriverMetrics;
|
|
81
|
+
/**
|
|
82
|
+
* Get aggregated performance snapshot.
|
|
83
|
+
*/
|
|
84
|
+
getPerformanceSnapshot(): PerformanceSnapshot;
|
|
85
|
+
/**
|
|
86
|
+
* Get performance history.
|
|
87
|
+
*/
|
|
88
|
+
getPerformanceHistory(count?: number): PerformanceSnapshot[];
|
|
89
|
+
/**
|
|
90
|
+
* Generate a performance report string.
|
|
91
|
+
*/
|
|
92
|
+
getPerformanceReport(): string;
|
|
93
|
+
/**
|
|
94
|
+
* 取得序列化錯誤 DLQ 緩衝區的不可變副本。
|
|
95
|
+
*/
|
|
96
|
+
getSerializationDlq(): readonly SerializationErrorRecord[];
|
|
97
|
+
/**
|
|
98
|
+
* 取得每個分區的序列化錯誤計數。
|
|
99
|
+
*/
|
|
100
|
+
getPartitionSerializationErrors(): ReadonlyMap<string, number>;
|
|
101
|
+
/**
|
|
102
|
+
* 取得每個佇列的回呼連續失敗計數。
|
|
103
|
+
*/
|
|
104
|
+
getCallbackFailureCounts(): ReadonlyMap<string, number>;
|
|
105
|
+
/**
|
|
106
|
+
* 取得 DLQ 緩衝區統計資訊。
|
|
107
|
+
*/
|
|
108
|
+
getDlqStats(): {
|
|
109
|
+
totalBuffered: number;
|
|
110
|
+
perTopic: Record<string, number>;
|
|
111
|
+
retryEnabled: boolean;
|
|
112
|
+
retryIntervalMs: number;
|
|
113
|
+
};
|
|
114
|
+
/**
|
|
115
|
+
* 啟動 DLQ 重試迴圈,定期嘗試將緩衝區中的失敗訊息重新發送至 Kafka。
|
|
116
|
+
*/
|
|
117
|
+
startDlqRetryLoop(): void;
|
|
118
|
+
/**
|
|
119
|
+
* 嘗試將所有緩衝的 DLQ 訊息重新發送至 Kafka。
|
|
120
|
+
* 成功發送的訊息會從緩衝區中移除。
|
|
121
|
+
*/
|
|
122
|
+
flushDlqBuffer(): Promise<{
|
|
123
|
+
flushed: number;
|
|
124
|
+
remaining: number;
|
|
125
|
+
}>;
|
|
126
|
+
clear(queue: string): Promise<void>;
|
|
127
|
+
/**
|
|
128
|
+
* Remove message tracking from both primary and reverse indexes.
|
|
129
|
+
*/
|
|
130
|
+
private removeMessageTracking;
|
|
131
|
+
enableNotifications(): Promise<void>;
|
|
132
|
+
disableNotifications(): Promise<void>;
|
|
133
|
+
onNotify(queues: string | string[], callback: (queue: string) => Promise<void>): Promise<void>;
|
|
134
|
+
getFailed(queue: string, start?: number, end?: number): Promise<SerializedJob[]>;
|
|
135
|
+
clearFailed(queue: string): Promise<void>;
|
|
136
|
+
retryFailed(queue: string, count?: number): Promise<number>;
|
|
137
|
+
disconnect(): Promise<void>;
|
|
138
|
+
}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import type { KafkaDriverMetrics, MetricsConfig } from './types';
|
|
2
|
+
/**
|
|
3
|
+
* Kafka driver metrics collector.
|
|
4
|
+
*
|
|
5
|
+
* Tracks throughput, latency, errors, buffer utilization, and rate limit statistics.
|
|
6
|
+
* Uses a circular buffer for latency histogram to prevent unbounded memory growth.
|
|
7
|
+
*
|
|
8
|
+
* Performance optimizations (Phase 6E-2):
|
|
9
|
+
* - Bucket counter throughput: O(buckets) snapshot instead of O(messages) timestamp filter
|
|
10
|
+
* - No per-message timestamp storage: constant memory for throughput tracking
|
|
11
|
+
* - recordMessage() hot path: O(1) bucket increment + circular buffer write
|
|
12
|
+
*
|
|
13
|
+
* @public
|
|
14
|
+
*/
|
|
15
|
+
export declare class KafkaMetrics {
|
|
16
|
+
private readonly enabled;
|
|
17
|
+
private readonly histogramSize;
|
|
18
|
+
private readonly latencyBuffer;
|
|
19
|
+
private latencyIndex;
|
|
20
|
+
private latencyCount;
|
|
21
|
+
private readonly queueBuckets;
|
|
22
|
+
private readonly queueCounts;
|
|
23
|
+
private readonly errorCounts;
|
|
24
|
+
private readonly rateLimitCounts;
|
|
25
|
+
private rateLimitTotalAllowed;
|
|
26
|
+
private rateLimitTotalDenied;
|
|
27
|
+
private readonly bufferSizes;
|
|
28
|
+
private bufferCapacity;
|
|
29
|
+
private totalProcessed;
|
|
30
|
+
private totalFailed;
|
|
31
|
+
private inFlight;
|
|
32
|
+
private readonly lagMap;
|
|
33
|
+
constructor(config?: MetricsConfig);
|
|
34
|
+
/**
|
|
35
|
+
* 記錄已處理訊息(含延遲)。
|
|
36
|
+
* Hot path: O(1) 桶遞增 + 圓形緩衝區寫入。
|
|
37
|
+
*/
|
|
38
|
+
recordMessage(queue: string, latencyMs: number): void;
|
|
39
|
+
/**
|
|
40
|
+
* 記錄錯誤(按類型追蹤)。
|
|
41
|
+
*/
|
|
42
|
+
recordError(type: 'serialization' | 'callback' | 'connection' | 'timeout'): void;
|
|
43
|
+
/**
|
|
44
|
+
* 記錄速率限制決定。
|
|
45
|
+
*/
|
|
46
|
+
recordRateLimitHit(queue: string, allowed: boolean): void;
|
|
47
|
+
/**
|
|
48
|
+
* 更新緩衝區大小(由外部呼叫以追蹤利用率)。
|
|
49
|
+
*/
|
|
50
|
+
updateBufferSize(queue: string, size: number, capacity: number): void;
|
|
51
|
+
/**
|
|
52
|
+
* 更新 consumer lag。
|
|
53
|
+
*/
|
|
54
|
+
updateLag(topicPartition: string, lag: number): void;
|
|
55
|
+
/**
|
|
56
|
+
* 更新 in-flight 計數。
|
|
57
|
+
*/
|
|
58
|
+
setInFlight(count: number): void;
|
|
59
|
+
/**
|
|
60
|
+
* 計算並返回當前指標快照(不可變)。
|
|
61
|
+
* 吞吐量計算: O(queues * BUCKET_COUNT) 取代 O(total_messages)。
|
|
62
|
+
*/
|
|
63
|
+
getSnapshot(): KafkaDriverMetrics;
|
|
64
|
+
/**
|
|
65
|
+
* 清除所有計數器。
|
|
66
|
+
*/
|
|
67
|
+
reset(): void;
|
|
68
|
+
/**
|
|
69
|
+
* 從圓形緩衝區取出有效延遲資料。
|
|
70
|
+
*/
|
|
71
|
+
private getLatencyData;
|
|
72
|
+
/**
|
|
73
|
+
* 計算百分位數(輸入必須已排序)。
|
|
74
|
+
*/
|
|
75
|
+
private calculatePercentile;
|
|
76
|
+
/**
|
|
77
|
+
* 遞增當前時間桶的計數。O(1) 操作。
|
|
78
|
+
*
|
|
79
|
+
* 桶結構: 固定 BUCKET_COUNT 個桶,每桶代表 BUCKET_DURATION_MS 的時間段。
|
|
80
|
+
* 當時間推進到新桶時,清零已過期的桶。
|
|
81
|
+
*/
|
|
82
|
+
private incrementBucket;
|
|
83
|
+
/**
|
|
84
|
+
* 計算每佇列吞吐量(messages/second)。
|
|
85
|
+
* O(queues * BUCKET_COUNT) - 固定成本,不隨訊息數增長。
|
|
86
|
+
*/
|
|
87
|
+
private calculateThroughput;
|
|
88
|
+
}
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import { EventEmitter } from 'node:events';
|
|
2
|
+
/**
|
|
3
|
+
* Kafka 通知橋接器。
|
|
4
|
+
*
|
|
5
|
+
* 將 Kafka Consumer 的訊息到達事件轉換為
|
|
6
|
+
* QueueDriver.onNotify() 需要的通知回呼。
|
|
7
|
+
* 用於整合 ReactiveStrategy。
|
|
8
|
+
*
|
|
9
|
+
* @public
|
|
10
|
+
*/
|
|
11
|
+
export declare class KafkaNotifier extends EventEmitter {
|
|
12
|
+
private readonly callbacks;
|
|
13
|
+
private enabled;
|
|
14
|
+
/**
|
|
15
|
+
* 啟用通知。
|
|
16
|
+
*
|
|
17
|
+
* 允許 notify() 呼叫觸發已註冊的回呼。
|
|
18
|
+
*/
|
|
19
|
+
enable(): void;
|
|
20
|
+
/**
|
|
21
|
+
* 停用通知。
|
|
22
|
+
*
|
|
23
|
+
* 呼叫 notify() 時將不觸發任何回呼。
|
|
24
|
+
*/
|
|
25
|
+
disable(): void;
|
|
26
|
+
/**
|
|
27
|
+
* 是否已啟用。
|
|
28
|
+
*
|
|
29
|
+
* @returns 是否已啟用
|
|
30
|
+
*/
|
|
31
|
+
isEnabled(): boolean;
|
|
32
|
+
/**
|
|
33
|
+
* 註冊通知回呼。
|
|
34
|
+
*
|
|
35
|
+
* @param queues 佇列名稱陣列
|
|
36
|
+
* @param callback 當訊息到達時呼叫的回呼函式
|
|
37
|
+
*/
|
|
38
|
+
registerCallback(queues: string[], callback: (queue: string) => Promise<void>): void;
|
|
39
|
+
/**
|
|
40
|
+
* 觸發通知(由 Consumer 的 eachMessage 呼叫)。
|
|
41
|
+
*
|
|
42
|
+
* 執行已註冊的所有回呼,並發出 'notify' 事件。
|
|
43
|
+
* 如果回呼拋出錯誤,不會中斷其他回呼的執行。
|
|
44
|
+
*
|
|
45
|
+
* @param queue 佇列名稱
|
|
46
|
+
*/
|
|
47
|
+
notify(queue: string): void;
|
|
48
|
+
/**
|
|
49
|
+
* 清除所有已註冊的回呼。
|
|
50
|
+
*
|
|
51
|
+
* 用於優雅關閉時調用。
|
|
52
|
+
*/
|
|
53
|
+
clearCallbacks(): void;
|
|
54
|
+
}
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import type { BufferedMessage } from './types';
|
|
2
|
+
/**
|
|
3
|
+
* Kafka 訊息緩衝區。
|
|
4
|
+
*
|
|
5
|
+
* 橋接 Kafka 的 push 模型與 QueueDriver 的 pull(pop)模型。
|
|
6
|
+
* 每個 topic 維護獨立的 FIFO 緩衝區,支援容量限制和超時等待。
|
|
7
|
+
*
|
|
8
|
+
* @public
|
|
9
|
+
*/
|
|
10
|
+
export declare class MessageBuffer {
|
|
11
|
+
private readonly maxSize;
|
|
12
|
+
private readonly buffers;
|
|
13
|
+
private readonly waiters;
|
|
14
|
+
constructor(maxSize?: number);
|
|
15
|
+
/**
|
|
16
|
+
* 推入一條訊息到指定 topic 的緩衝區。
|
|
17
|
+
*
|
|
18
|
+
* @param topic 主題名稱
|
|
19
|
+
* @param message 要推入的訊息
|
|
20
|
+
* @returns 是否成功推入(false 表示緩衝區已滿)
|
|
21
|
+
*/
|
|
22
|
+
enqueue(topic: string, message: BufferedMessage): boolean;
|
|
23
|
+
/**
|
|
24
|
+
* 從指定 topic 取出一條訊息(FIFO),立即返回或 null。
|
|
25
|
+
*
|
|
26
|
+
* @param topic 主題名稱
|
|
27
|
+
* @returns 取出的訊息或 null
|
|
28
|
+
*/
|
|
29
|
+
dequeue(topic: string): BufferedMessage | null;
|
|
30
|
+
/**
|
|
31
|
+
* 等待直到有訊息或超時。
|
|
32
|
+
*
|
|
33
|
+
* @param topic 主題名稱
|
|
34
|
+
* @param timeoutMs 超時時間(毫秒)
|
|
35
|
+
* @returns 取出的訊息或 null
|
|
36
|
+
*/
|
|
37
|
+
dequeueBlocking(topic: string, timeoutMs: number): Promise<BufferedMessage | null>;
|
|
38
|
+
/**
|
|
39
|
+
* 批次取出。
|
|
40
|
+
*
|
|
41
|
+
* @param topic 主題名稱
|
|
42
|
+
* @param count 最多取出的訊息數
|
|
43
|
+
* @returns 取出的訊息陣列(不足時回傳可用數量)
|
|
44
|
+
*/
|
|
45
|
+
dequeueMany(topic: string, count: number): BufferedMessage[];
|
|
46
|
+
/**
|
|
47
|
+
* 取得指定 topic 的緩衝數量。
|
|
48
|
+
*
|
|
49
|
+
* @param topic 主題名稱
|
|
50
|
+
* @returns 緩衝區中的訊息數
|
|
51
|
+
*/
|
|
52
|
+
size(topic: string): number;
|
|
53
|
+
/**
|
|
54
|
+
* 清空指定 topic 的緩衝區。
|
|
55
|
+
*
|
|
56
|
+
* @param topic 主題名稱
|
|
57
|
+
*/
|
|
58
|
+
clear(topic: string): void;
|
|
59
|
+
/**
|
|
60
|
+
* 清空所有緩衝區並取消所有等待者。
|
|
61
|
+
*
|
|
62
|
+
* 用於優雅關閉時調用。
|
|
63
|
+
*/
|
|
64
|
+
destroy(): void;
|
|
65
|
+
/**
|
|
66
|
+
* 喚醒指定 topic 的第一個等待者。
|
|
67
|
+
*
|
|
68
|
+
* @private
|
|
69
|
+
*/
|
|
70
|
+
private notifyWaiters;
|
|
71
|
+
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Kafka Offset 追蹤器。
|
|
3
|
+
*
|
|
4
|
+
* 追蹤每個 topic-partition 的已處理 offset,
|
|
5
|
+
* 支援手動 commit 和 at-least-once 語意。
|
|
6
|
+
*
|
|
7
|
+
* 使用「連續確認」演算法:
|
|
8
|
+
* - 只有當 offset 0, 1, 2 都已 resolve 時才 commit offset 3
|
|
9
|
+
* - 確保未來 consumer 重啟時不會跳過未處理的訊息
|
|
10
|
+
*
|
|
11
|
+
* @public
|
|
12
|
+
*/
|
|
13
|
+
export declare class OffsetTracker {
|
|
14
|
+
/** topic -> partition -> 最高已確認 offset */
|
|
15
|
+
private readonly committed;
|
|
16
|
+
/** topic -> partition -> 待確認 offset 集合 */
|
|
17
|
+
private readonly pending;
|
|
18
|
+
/**
|
|
19
|
+
* 標記一個 offset 為待處理。
|
|
20
|
+
*
|
|
21
|
+
* @param topic 主題名稱
|
|
22
|
+
* @param partition 分區編號
|
|
23
|
+
* @param offset Offset 值
|
|
24
|
+
*/
|
|
25
|
+
track(topic: string, partition: number, offset: string): void;
|
|
26
|
+
/**
|
|
27
|
+
* 標記一個 offset 為已完成。
|
|
28
|
+
*
|
|
29
|
+
* @param topic 主題名稱
|
|
30
|
+
* @param partition 分區編號
|
|
31
|
+
* @param offset Offset 值
|
|
32
|
+
*/
|
|
33
|
+
resolve(topic: string, partition: number, offset: string): void;
|
|
34
|
+
/**
|
|
35
|
+
* 取得可安全 commit 的 offset(連續已完成的最高值)。
|
|
36
|
+
*
|
|
37
|
+
* 使用連續確認演算法:
|
|
38
|
+
* 只回傳連續完成的 offset(即從 0 開始的連續序列)。
|
|
39
|
+
*
|
|
40
|
+
* @returns 可提交的 offset 陣列
|
|
41
|
+
*/
|
|
42
|
+
getCommittableOffsets(): Array<{
|
|
43
|
+
topic: string;
|
|
44
|
+
partition: number;
|
|
45
|
+
offset: string;
|
|
46
|
+
}>;
|
|
47
|
+
/**
|
|
48
|
+
* 清除指定 topic 的追蹤資料。
|
|
49
|
+
*
|
|
50
|
+
* @param topic 主題名稱
|
|
51
|
+
*/
|
|
52
|
+
clear(topic: string): void;
|
|
53
|
+
/**
|
|
54
|
+
* 取得統計資訊。
|
|
55
|
+
*
|
|
56
|
+
* @returns 統計物件
|
|
57
|
+
*/
|
|
58
|
+
getStats(): {
|
|
59
|
+
tracked: number;
|
|
60
|
+
committed: number;
|
|
61
|
+
pending: number;
|
|
62
|
+
};
|
|
63
|
+
}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import { EventEmitter } from 'node:events';
|
|
2
|
+
import type { BackpressureController } from './BackpressureController';
|
|
3
|
+
import type { ConsumerLifecycleManager } from './ConsumerLifecycleManager';
|
|
4
|
+
import type { ErrorRecoveryManager } from './ErrorRecoveryManager';
|
|
5
|
+
import type { HeartbeatManager } from './HeartbeatManager';
|
|
6
|
+
import type { KafkaMetrics } from './KafkaMetrics';
|
|
7
|
+
import type { PerformanceConfig, PerformanceSnapshot } from './types';
|
|
8
|
+
/**
|
|
9
|
+
* 效能監控元件。
|
|
10
|
+
*
|
|
11
|
+
* 定期收集所有子系統的指標,產生聚合快照,
|
|
12
|
+
* 並維護歷史記錄以供趨勢分析。
|
|
13
|
+
*
|
|
14
|
+
* 健康判定邏輯:
|
|
15
|
+
* - healthy: 電路 CLOSED、心跳正常、無背壓
|
|
16
|
+
* - degraded: 電路 HALF_OPEN 或背壓啟動 或 錯誤率 > 5%
|
|
17
|
+
* - unhealthy: 電路 OPEN 或心跳失敗 或消費者非 running
|
|
18
|
+
*
|
|
19
|
+
* 事件:
|
|
20
|
+
* - 'snapshot': 每次快照收集時發出
|
|
21
|
+
* - 'health:changed': 健康狀態變更時發出
|
|
22
|
+
*
|
|
23
|
+
* @public
|
|
24
|
+
*/
|
|
25
|
+
export declare class PerformanceMonitor extends EventEmitter {
|
|
26
|
+
private readonly snapshotIntervalMs;
|
|
27
|
+
private readonly historySize;
|
|
28
|
+
private readonly enabled;
|
|
29
|
+
private readonly history;
|
|
30
|
+
private lastHealth;
|
|
31
|
+
private timer;
|
|
32
|
+
private running;
|
|
33
|
+
private metricsRef;
|
|
34
|
+
private errorRecoveryRef;
|
|
35
|
+
private heartbeatRef;
|
|
36
|
+
private backpressureRef;
|
|
37
|
+
private lifecycleRef;
|
|
38
|
+
constructor(config?: PerformanceConfig);
|
|
39
|
+
/**
|
|
40
|
+
* 綁定監控目標元件。
|
|
41
|
+
*/
|
|
42
|
+
bind(components: {
|
|
43
|
+
metrics: KafkaMetrics;
|
|
44
|
+
errorRecovery: ErrorRecoveryManager;
|
|
45
|
+
heartbeat: HeartbeatManager;
|
|
46
|
+
backpressure: BackpressureController;
|
|
47
|
+
lifecycle: ConsumerLifecycleManager;
|
|
48
|
+
}): void;
|
|
49
|
+
/**
|
|
50
|
+
* 開始定期快照收集。
|
|
51
|
+
*/
|
|
52
|
+
start(): void;
|
|
53
|
+
/**
|
|
54
|
+
* 停止快照收集並清理計時器。
|
|
55
|
+
*/
|
|
56
|
+
stop(): void;
|
|
57
|
+
/**
|
|
58
|
+
* 手動觸發一次快照收集。
|
|
59
|
+
*/
|
|
60
|
+
collectSnapshot(): PerformanceSnapshot;
|
|
61
|
+
/**
|
|
62
|
+
* 取得最近 N 筆歷史快照(不可變副本)。
|
|
63
|
+
*/
|
|
64
|
+
getHistory(count?: number): PerformanceSnapshot[];
|
|
65
|
+
/**
|
|
66
|
+
* 取得最新一筆快照。
|
|
67
|
+
*/
|
|
68
|
+
getLatestSnapshot(): PerformanceSnapshot | null;
|
|
69
|
+
/**
|
|
70
|
+
* 產生文字報告摘要。
|
|
71
|
+
*/
|
|
72
|
+
generateReport(): string;
|
|
73
|
+
/**
|
|
74
|
+
* 是否正在執行中。
|
|
75
|
+
*/
|
|
76
|
+
isRunning(): boolean;
|
|
77
|
+
/**
|
|
78
|
+
* 清除歷史記錄並停止。
|
|
79
|
+
*/
|
|
80
|
+
destroy(): void;
|
|
81
|
+
/**
|
|
82
|
+
* 內部:評估整體健康狀態。
|
|
83
|
+
*
|
|
84
|
+
* 當消費者尚未啟動(idle)時,心跳未活躍是預期的,不視為 unhealthy。
|
|
85
|
+
* 只有在消費者已啟動(running/restarting)的情況下,心跳死亡才算 unhealthy。
|
|
86
|
+
*/
|
|
87
|
+
private evaluateHealth;
|
|
88
|
+
}
|