@gravito/stream 2.0.2 → 2.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +27 -1
- package/dist/BatchConsumer.d.ts +81 -0
- package/dist/Consumer.d.ts +215 -0
- package/dist/DashboardProvider.d.ts +29 -0
- package/dist/Job.d.ts +183 -0
- package/dist/OrbitStream.d.ts +151 -0
- package/dist/QueueManager.d.ts +321 -0
- package/dist/Queueable.d.ts +91 -0
- package/dist/Scheduler.d.ts +215 -0
- package/dist/StreamEventBackend.d.ts +120 -0
- package/dist/SystemEventJob.d.ts +41 -0
- package/dist/Worker.d.ts +139 -0
- package/dist/benchmarks/PerformanceReporter.d.ts +99 -0
- package/dist/consumer/ConcurrencyGate.d.ts +55 -0
- package/dist/consumer/ConsumerStrategy.d.ts +41 -0
- package/dist/consumer/GroupSequencer.d.ts +57 -0
- package/dist/consumer/HeartbeatManager.d.ts +65 -0
- package/dist/consumer/JobExecutor.d.ts +61 -0
- package/dist/consumer/JobSourceGenerator.d.ts +31 -0
- package/dist/consumer/PollingStrategy.d.ts +42 -0
- package/dist/consumer/ReactiveStrategy.d.ts +41 -0
- package/dist/consumer/StreamingConsumer.d.ts +90 -0
- package/dist/consumer/index.d.ts +13 -0
- package/dist/consumer/types.d.ts +102 -0
- package/dist/drivers/BinaryJobFrame.d.ts +78 -0
- package/dist/drivers/BullMQDriver.d.ts +237 -0
- package/dist/drivers/DatabaseDriver.d.ts +131 -0
- package/dist/drivers/GrpcDriver.d.ts +16 -0
- package/dist/drivers/KafkaDriver.d.ts +161 -0
- package/dist/drivers/MemoryDriver.d.ts +119 -0
- package/dist/drivers/QueueDriver.d.ts +250 -0
- package/dist/drivers/RabbitMQDriver.d.ts +140 -0
- package/dist/drivers/RedisDriver.d.ts +328 -0
- package/dist/drivers/SQSDriver.d.ts +114 -0
- package/dist/drivers/kafka/BackpressureController.d.ts +60 -0
- package/dist/drivers/kafka/BatchProcessor.d.ts +50 -0
- package/dist/drivers/kafka/ConsumerLifecycleManager.d.ts +80 -0
- package/dist/drivers/kafka/ErrorCategorizer.d.ts +39 -0
- package/dist/drivers/kafka/ErrorRecoveryManager.d.ts +100 -0
- package/dist/drivers/kafka/HeartbeatManager.d.ts +57 -0
- package/dist/drivers/kafka/KafkaDriver.d.ts +138 -0
- package/dist/drivers/kafka/KafkaMetrics.d.ts +88 -0
- package/dist/drivers/kafka/KafkaNotifier.d.ts +70 -0
- package/dist/drivers/kafka/MessageBuffer.d.ts +71 -0
- package/dist/drivers/kafka/OffsetTracker.d.ts +65 -0
- package/dist/drivers/kafka/PerformanceMonitor.d.ts +88 -0
- package/dist/drivers/kafka/RateLimiter.d.ts +52 -0
- package/dist/drivers/kafka/RebalanceHandler.d.ts +104 -0
- package/dist/drivers/kafka/RingBuffer.d.ts +63 -0
- package/dist/drivers/kafka/index.d.ts +22 -0
- package/dist/drivers/kafka/types.d.ts +553 -0
- package/dist/drivers/prepareJobForTransport.d.ts +10 -0
- package/dist/index.cjs +72 -7826
- package/dist/index.cjs.map +9 -0
- package/dist/index.d.ts +60 -4378
- package/dist/index.js +39 -7797
- package/dist/index.js.map +9 -0
- package/dist/locks/DistributedLock.d.ts +175 -0
- package/dist/persistence/BufferedPersistence.d.ts +130 -0
- package/dist/persistence/BunBufferedPersistence.d.ts +173 -0
- package/dist/persistence/MySQLPersistence.d.ts +134 -0
- package/dist/persistence/SQLitePersistence.d.ts +133 -0
- package/dist/serializers/BinarySerializer.d.ts +42 -0
- package/dist/serializers/CachedSerializer.d.ts +42 -0
- package/dist/serializers/CborNativeSerializer.d.ts +56 -0
- package/dist/serializers/ClassNameSerializer.d.ts +58 -0
- package/dist/serializers/JobSerializer.d.ts +33 -0
- package/dist/serializers/JsonSerializer.d.ts +28 -0
- package/dist/serializers/JsonlSerializer.d.ts +90 -0
- package/dist/serializers/MessagePackSerializer.d.ts +29 -0
- package/dist/types.d.ts +672 -0
- package/dist/workers/BinaryWorkerProtocol.d.ts +77 -0
- package/dist/workers/BunWorker.d.ts +179 -0
- package/dist/workers/SandboxedWorker.d.ts +132 -0
- package/dist/workers/WorkerFactory.d.ts +128 -0
- package/dist/workers/WorkerPool.d.ts +186 -0
- package/dist/workers/bun-job-executor.d.ts +14 -0
- package/dist/workers/index.d.ts +13 -0
- package/dist/workers/job-executor.d.ts +9 -0
- package/package.json +6 -4
- package/dist/index.d.cts +0 -4387
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Kafka Offset 追蹤器。
|
|
3
|
+
*
|
|
4
|
+
* 追蹤每個 topic-partition 的已處理 offset,
|
|
5
|
+
* 支援手動 commit 和 at-least-once 語意。
|
|
6
|
+
*
|
|
7
|
+
* 使用「連續確認」演算法:
|
|
8
|
+
* - 只有當 offset 0, 1, 2 都已 resolve 時才 commit offset 3
|
|
9
|
+
* - 確保未來 consumer 重啟時不會跳過未處理的訊息
|
|
10
|
+
*
|
|
11
|
+
* @public
|
|
12
|
+
*/
|
|
13
|
+
export declare class OffsetTracker {
|
|
14
|
+
/** topic -> partition -> 最高已確認 offset */
|
|
15
|
+
private readonly committed;
|
|
16
|
+
/** topic -> partition -> 待確認 offset 集合 */
|
|
17
|
+
private readonly pending;
|
|
18
|
+
/** topic -> partition -> 統計資訊 */
|
|
19
|
+
private readonly stats;
|
|
20
|
+
/**
|
|
21
|
+
* 標記一個 offset 為待處理。
|
|
22
|
+
*
|
|
23
|
+
* @param topic 主題名稱
|
|
24
|
+
* @param partition 分區編號
|
|
25
|
+
* @param offset Offset 值
|
|
26
|
+
*/
|
|
27
|
+
track(topic: string, partition: number, offset: string): void;
|
|
28
|
+
/**
|
|
29
|
+
* 標記一個 offset 為已完成。
|
|
30
|
+
*
|
|
31
|
+
* @param topic 主題名稱
|
|
32
|
+
* @param partition 分區編號
|
|
33
|
+
* @param offset Offset 值
|
|
34
|
+
*/
|
|
35
|
+
resolve(topic: string, partition: number, offset: string): void;
|
|
36
|
+
/**
|
|
37
|
+
* 取得可安全 commit 的 offset(連續已完成的最高值)。
|
|
38
|
+
*
|
|
39
|
+
* 使用連續確認演算法:
|
|
40
|
+
* 只回傳連續完成的 offset(即從 0 開始的連續序列)。
|
|
41
|
+
*
|
|
42
|
+
* @returns 可提交的 offset 陣列
|
|
43
|
+
*/
|
|
44
|
+
getCommittableOffsets(): Array<{
|
|
45
|
+
topic: string;
|
|
46
|
+
partition: number;
|
|
47
|
+
offset: string;
|
|
48
|
+
}>;
|
|
49
|
+
/**
|
|
50
|
+
* 清除指定 topic 的追蹤資料。
|
|
51
|
+
*
|
|
52
|
+
* @param topic 主題名稱
|
|
53
|
+
*/
|
|
54
|
+
clear(topic: string): void;
|
|
55
|
+
/**
|
|
56
|
+
* 取得統計資訊。
|
|
57
|
+
*
|
|
58
|
+
* @returns 統計物件
|
|
59
|
+
*/
|
|
60
|
+
getStats(): {
|
|
61
|
+
tracked: number;
|
|
62
|
+
committed: number;
|
|
63
|
+
pending: number;
|
|
64
|
+
};
|
|
65
|
+
}
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
import { EventEmitter } from 'node:events';
|
|
2
|
+
import type { BackpressureController } from './BackpressureController';
|
|
3
|
+
import type { ConsumerLifecycleManager } from './ConsumerLifecycleManager';
|
|
4
|
+
import type { ErrorRecoveryManager } from './ErrorRecoveryManager';
|
|
5
|
+
import type { HeartbeatManager } from './HeartbeatManager';
|
|
6
|
+
import type { KafkaMetrics } from './KafkaMetrics';
|
|
7
|
+
import type { PerformanceConfig, PerformanceSnapshot } from './types';
|
|
8
|
+
/**
|
|
9
|
+
* 效能監控元件。
|
|
10
|
+
*
|
|
11
|
+
* 定期收集所有子系統的指標,產生聚合快照,
|
|
12
|
+
* 並維護歷史記錄以供趨勢分析。
|
|
13
|
+
*
|
|
14
|
+
* 健康判定邏輯:
|
|
15
|
+
* - healthy: 電路 CLOSED、心跳正常、無背壓
|
|
16
|
+
* - degraded: 電路 HALF_OPEN 或背壓啟動 或 錯誤率 > 5%
|
|
17
|
+
* - unhealthy: 電路 OPEN 或心跳失敗 或消費者非 running
|
|
18
|
+
*
|
|
19
|
+
* 事件:
|
|
20
|
+
* - 'snapshot': 每次快照收集時發出
|
|
21
|
+
* - 'health:changed': 健康狀態變更時發出
|
|
22
|
+
*
|
|
23
|
+
* @public
|
|
24
|
+
*/
|
|
25
|
+
export declare class PerformanceMonitor extends EventEmitter {
|
|
26
|
+
private readonly snapshotIntervalMs;
|
|
27
|
+
private readonly historySize;
|
|
28
|
+
private readonly enabled;
|
|
29
|
+
private readonly history;
|
|
30
|
+
private lastHealth;
|
|
31
|
+
private timer;
|
|
32
|
+
private running;
|
|
33
|
+
private metricsRef;
|
|
34
|
+
private errorRecoveryRef;
|
|
35
|
+
private heartbeatRef;
|
|
36
|
+
private backpressureRef;
|
|
37
|
+
private lifecycleRef;
|
|
38
|
+
constructor(config?: PerformanceConfig);
|
|
39
|
+
/**
|
|
40
|
+
* 綁定監控目標元件。
|
|
41
|
+
*/
|
|
42
|
+
bind(components: {
|
|
43
|
+
metrics: KafkaMetrics;
|
|
44
|
+
errorRecovery: ErrorRecoveryManager;
|
|
45
|
+
heartbeat: HeartbeatManager;
|
|
46
|
+
backpressure: BackpressureController;
|
|
47
|
+
lifecycle: ConsumerLifecycleManager;
|
|
48
|
+
}): void;
|
|
49
|
+
/**
|
|
50
|
+
* 開始定期快照收集。
|
|
51
|
+
*/
|
|
52
|
+
start(): void;
|
|
53
|
+
/**
|
|
54
|
+
* 停止快照收集並清理計時器。
|
|
55
|
+
*/
|
|
56
|
+
stop(): void;
|
|
57
|
+
/**
|
|
58
|
+
* 手動觸發一次快照收集。
|
|
59
|
+
*/
|
|
60
|
+
collectSnapshot(): PerformanceSnapshot;
|
|
61
|
+
/**
|
|
62
|
+
* 取得最近 N 筆歷史快照(不可變副本)。
|
|
63
|
+
*/
|
|
64
|
+
getHistory(count?: number): PerformanceSnapshot[];
|
|
65
|
+
/**
|
|
66
|
+
* 取得最新一筆快照。
|
|
67
|
+
*/
|
|
68
|
+
getLatestSnapshot(): PerformanceSnapshot | null;
|
|
69
|
+
/**
|
|
70
|
+
* 產生文字報告摘要。
|
|
71
|
+
*/
|
|
72
|
+
generateReport(): string;
|
|
73
|
+
/**
|
|
74
|
+
* 是否正在執行中。
|
|
75
|
+
*/
|
|
76
|
+
isRunning(): boolean;
|
|
77
|
+
/**
|
|
78
|
+
* 清除歷史記錄並停止。
|
|
79
|
+
*/
|
|
80
|
+
destroy(): void;
|
|
81
|
+
/**
|
|
82
|
+
* 內部:評估整體健康狀態。
|
|
83
|
+
*
|
|
84
|
+
* 當消費者尚未啟動(idle)時,心跳未活躍是預期的,不視為 unhealthy。
|
|
85
|
+
* 只有在消費者已啟動(running/restarting)的情況下,心跳死亡才算 unhealthy。
|
|
86
|
+
*/
|
|
87
|
+
private evaluateHealth;
|
|
88
|
+
}
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Sliding window rate limiter for per-queue message throughput control.
|
|
3
|
+
*
|
|
4
|
+
* Implements token depletion: allows max N messages within a rolling duration window.
|
|
5
|
+
* Uses a sliding window counter algorithm for memory efficiency.
|
|
6
|
+
*
|
|
7
|
+
* @public
|
|
8
|
+
*/
|
|
9
|
+
export declare class RateLimiter {
|
|
10
|
+
private windowStates;
|
|
11
|
+
private hitCount;
|
|
12
|
+
/**
|
|
13
|
+
* Check if a message should be allowed based on rate limit config.
|
|
14
|
+
* @returns true if allowed, false if exceeds limit
|
|
15
|
+
*/
|
|
16
|
+
check(queue: string, config: {
|
|
17
|
+
max: number;
|
|
18
|
+
duration: number;
|
|
19
|
+
}): boolean;
|
|
20
|
+
/**
|
|
21
|
+
* Get current rate limit state for a queue.
|
|
22
|
+
*/
|
|
23
|
+
getState(queue: string): {
|
|
24
|
+
queue: string;
|
|
25
|
+
allowed: number;
|
|
26
|
+
remaining: number;
|
|
27
|
+
resetAt: number;
|
|
28
|
+
windowStart: number;
|
|
29
|
+
};
|
|
30
|
+
/**
|
|
31
|
+
* Get rate limit hit statistics for a queue.
|
|
32
|
+
*/
|
|
33
|
+
getStats(queue: string): {
|
|
34
|
+
allowed: number;
|
|
35
|
+
denied: number;
|
|
36
|
+
};
|
|
37
|
+
/**
|
|
38
|
+
* Get statistics for all queues.
|
|
39
|
+
*/
|
|
40
|
+
getAllStats(): Record<string, {
|
|
41
|
+
allowed: number;
|
|
42
|
+
denied: number;
|
|
43
|
+
}>;
|
|
44
|
+
/**
|
|
45
|
+
* Reset rate limiter for a specific queue or all queues.
|
|
46
|
+
*/
|
|
47
|
+
reset(queue?: string): void;
|
|
48
|
+
/**
|
|
49
|
+
* Internal: Record rate limit hit/deny.
|
|
50
|
+
*/
|
|
51
|
+
private recordHit;
|
|
52
|
+
}
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
import { EventEmitter } from 'node:events';
|
|
2
|
+
import type { PartitionAssignment, RebalanceConfig, RebalanceEvent, RebalanceStatus } from './types';
|
|
3
|
+
/**
|
|
4
|
+
* 回調介面:RebalanceHandler 在撤銷/分配時通知外部元件。
|
|
5
|
+
*/
|
|
6
|
+
export interface RebalanceCallbacks {
|
|
7
|
+
/** 在分區撤銷前提交 offset */
|
|
8
|
+
commitOffsets: () => Promise<void>;
|
|
9
|
+
/** 清除指定分區的緩衝訊息 */
|
|
10
|
+
clearPartitionBuffers: (partitions: PartitionAssignment[]) => void;
|
|
11
|
+
/** 清除指定分區的 offset 追蹤 */
|
|
12
|
+
clearPartitionTracking: (partitions: PartitionAssignment[]) => void;
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* 管理 Kafka 消費者群組的重新平衡事件。
|
|
16
|
+
*
|
|
17
|
+
* 協調 MessageBuffer、OffsetTracker 和 ConsumerLifecycleManager
|
|
18
|
+
* 在分區撤銷與分配期間的狀態一致性。
|
|
19
|
+
*
|
|
20
|
+
* 狀態機:
|
|
21
|
+
* - stable → revoking → assigning → stable
|
|
22
|
+
* - any → error
|
|
23
|
+
*
|
|
24
|
+
* 事件:
|
|
25
|
+
* - 'rebalance' - 每次狀態變更時發出 RebalanceEvent
|
|
26
|
+
* - 'partitionsRevoked' - 分區被撤銷
|
|
27
|
+
* - 'partitionsAssigned' - 分區被分配
|
|
28
|
+
*
|
|
29
|
+
* @public
|
|
30
|
+
*/
|
|
31
|
+
export declare class RebalanceHandler extends EventEmitter {
|
|
32
|
+
private readonly revocationTimeoutMs;
|
|
33
|
+
private readonly commitOnRevoke;
|
|
34
|
+
private readonly clearBufferOnRevoke;
|
|
35
|
+
private readonly maxConcurrentRebalances;
|
|
36
|
+
private currentState;
|
|
37
|
+
private previousState;
|
|
38
|
+
private assignedPartitions;
|
|
39
|
+
private totalRebalances;
|
|
40
|
+
private lastRebalanceTimestamp;
|
|
41
|
+
private activeRebalances;
|
|
42
|
+
private callbacks;
|
|
43
|
+
constructor(config?: RebalanceConfig);
|
|
44
|
+
/**
|
|
45
|
+
* 註冊重新平衡回調。
|
|
46
|
+
*
|
|
47
|
+
* 外部元件(KafkaDriver)透過此方法提供
|
|
48
|
+
* offset 提交和緩衝清除能力。
|
|
49
|
+
*/
|
|
50
|
+
registerCallbacks(callbacks: RebalanceCallbacks): void;
|
|
51
|
+
/**
|
|
52
|
+
* 處理分區撤銷事件。
|
|
53
|
+
*
|
|
54
|
+
* 執行順序:
|
|
55
|
+
* 1. 轉換到 'revoking' 狀態
|
|
56
|
+
* 2. 提交已追蹤的 offset(如果啟用)
|
|
57
|
+
* 3. 清除被撤銷分區的緩衝(如果啟用)
|
|
58
|
+
* 4. 清除被撤銷分區的追蹤資料
|
|
59
|
+
* 5. 發出事件通知
|
|
60
|
+
*/
|
|
61
|
+
handlePartitionsRevoked(partitions: PartitionAssignment[]): Promise<void>;
|
|
62
|
+
/**
|
|
63
|
+
* 處理分區分配事件。
|
|
64
|
+
*
|
|
65
|
+
* 更新已分配分區列表並發出事件。
|
|
66
|
+
*/
|
|
67
|
+
handlePartitionsAssigned(partitions: PartitionAssignment[]): Promise<void>;
|
|
68
|
+
/**
|
|
69
|
+
* 執行完整的重新平衡流程(撤銷 + 分配)。
|
|
70
|
+
*/
|
|
71
|
+
handleRebalance(revokedPartitions: PartitionAssignment[], assignedPartitions: PartitionAssignment[]): Promise<void>;
|
|
72
|
+
/**
|
|
73
|
+
* 返回當前重新平衡狀態快照(不可變)。
|
|
74
|
+
*/
|
|
75
|
+
getStatus(): RebalanceStatus;
|
|
76
|
+
/**
|
|
77
|
+
* 檢查是否正在重新平衡中。
|
|
78
|
+
*/
|
|
79
|
+
isRebalancing(): boolean;
|
|
80
|
+
/**
|
|
81
|
+
* 取得當前已分配的分區列表(不可變)。
|
|
82
|
+
*/
|
|
83
|
+
getAssignedPartitions(): PartitionAssignment[];
|
|
84
|
+
/**
|
|
85
|
+
* 註冊重新平衡狀態變更監聽器。
|
|
86
|
+
*/
|
|
87
|
+
onRebalance(listener: (event: RebalanceEvent) => void): void;
|
|
88
|
+
/**
|
|
89
|
+
* 重置為初始狀態。
|
|
90
|
+
*/
|
|
91
|
+
reset(): void;
|
|
92
|
+
/**
|
|
93
|
+
* 清理資源。
|
|
94
|
+
*/
|
|
95
|
+
destroy(): void;
|
|
96
|
+
/**
|
|
97
|
+
* 內部:執行狀態轉換並發出事件。
|
|
98
|
+
*/
|
|
99
|
+
private transitionTo;
|
|
100
|
+
/**
|
|
101
|
+
* 內部:使用逾時保護執行非同步操作。
|
|
102
|
+
*/
|
|
103
|
+
private withTimeout;
|
|
104
|
+
}
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* 固定容量環形緩衝區,提供 O(1) push 和 shift 操作。
|
|
3
|
+
*
|
|
4
|
+
* 取代 Array.shift() 的 O(n) 瓶頸,用於高吞吐量訊息處理場景。
|
|
5
|
+
*
|
|
6
|
+
* 內部使用固定長度陣列 + head/tail 指標實現 FIFO 語義,
|
|
7
|
+
* 避免重新索引開銷。
|
|
8
|
+
*
|
|
9
|
+
* @public
|
|
10
|
+
*/
|
|
11
|
+
export declare class RingBuffer<T> {
|
|
12
|
+
private readonly items;
|
|
13
|
+
private readonly capacity;
|
|
14
|
+
private head;
|
|
15
|
+
private tail;
|
|
16
|
+
private count;
|
|
17
|
+
constructor(capacity: number);
|
|
18
|
+
/**
|
|
19
|
+
* 推入元素到緩衝區尾端。
|
|
20
|
+
* @returns true 如果成功推入,false 如果緩衝區已滿
|
|
21
|
+
*/
|
|
22
|
+
push(item: T): boolean;
|
|
23
|
+
/**
|
|
24
|
+
* 從緩衝區頭部取出元素(FIFO)。
|
|
25
|
+
* @returns 元素,或 undefined 如果緩衝區為空
|
|
26
|
+
*/
|
|
27
|
+
shift(): T | undefined;
|
|
28
|
+
/**
|
|
29
|
+
* 查看頭部元素但不移除。
|
|
30
|
+
* @returns 元素,或 undefined 如果緩衝區為空
|
|
31
|
+
*/
|
|
32
|
+
peek(): T | undefined;
|
|
33
|
+
/**
|
|
34
|
+
* 從頭部批次取出多個元素。
|
|
35
|
+
* @param count 要取出的最大數量
|
|
36
|
+
* @returns 取出的元素陣列
|
|
37
|
+
*/
|
|
38
|
+
shiftMany(count: number): T[];
|
|
39
|
+
/**
|
|
40
|
+
* 當前緩衝區中的元素數量。
|
|
41
|
+
*/
|
|
42
|
+
get length(): number;
|
|
43
|
+
/**
|
|
44
|
+
* 緩衝區是否為空。
|
|
45
|
+
*/
|
|
46
|
+
get isEmpty(): boolean;
|
|
47
|
+
/**
|
|
48
|
+
* 緩衝區是否已滿。
|
|
49
|
+
*/
|
|
50
|
+
get isFull(): boolean;
|
|
51
|
+
/**
|
|
52
|
+
* 緩衝區的最大容量。
|
|
53
|
+
*/
|
|
54
|
+
getCapacity(): number;
|
|
55
|
+
/**
|
|
56
|
+
* 清空緩衝區中所有元素。
|
|
57
|
+
*/
|
|
58
|
+
clear(): void;
|
|
59
|
+
/**
|
|
60
|
+
* 將緩衝區中的元素轉換為陣列(從頭到尾順序,不移除元素)。
|
|
61
|
+
*/
|
|
62
|
+
toArray(): T[];
|
|
63
|
+
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @gravito/stream Kafka Driver 模組。
|
|
3
|
+
*
|
|
4
|
+
* 提供生產級的 Kafka Queue 驅動程式實作,
|
|
5
|
+
* 整合 ReactiveStrategy 和完整的 QueueDriver 介面。
|
|
6
|
+
*
|
|
7
|
+
* @public
|
|
8
|
+
*/
|
|
9
|
+
export { BackpressureController } from './BackpressureController';
|
|
10
|
+
export { BatchProcessor } from './BatchProcessor';
|
|
11
|
+
export { ConsumerLifecycleManager } from './ConsumerLifecycleManager';
|
|
12
|
+
export { ErrorCategorizer } from './ErrorCategorizer';
|
|
13
|
+
export { ErrorRecoveryManager } from './ErrorRecoveryManager';
|
|
14
|
+
export { HeartbeatManager } from './HeartbeatManager';
|
|
15
|
+
export { KafkaDriver } from './KafkaDriver';
|
|
16
|
+
export { KafkaMetrics } from './KafkaMetrics';
|
|
17
|
+
export { KafkaNotifier } from './KafkaNotifier';
|
|
18
|
+
export { MessageBuffer } from './MessageBuffer';
|
|
19
|
+
export { OffsetTracker } from './OffsetTracker';
|
|
20
|
+
export { PerformanceMonitor } from './PerformanceMonitor';
|
|
21
|
+
export { RebalanceHandler } from './RebalanceHandler';
|
|
22
|
+
export type { BufferedMessage, ConsumerLifecycleState, DlqStats, ErrorCategory, ErrorRecoveryConfig, ErrorRecoveryState, HeartbeatConfig, HeartbeatStatus, KafkaAdminClient, KafkaClientFactory, KafkaConsumerClient, KafkaDriverFullConfig, KafkaMessage, KafkaProducerClient, LifecycleEvent, MetricsConfig, PartitionAssignment, PerformanceConfig, PerformanceSnapshot, RebalanceConfig, RebalanceEvent, RebalanceState, RebalanceStatus, SerializationErrorRecord, SubscribeOptions, } from './types';
|