@gravito/stream 2.0.1 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/README.md +127 -285
  2. package/README.zh-TW.md +146 -13
  3. package/dist/BatchConsumer.d.ts +81 -0
  4. package/dist/Consumer.d.ts +215 -0
  5. package/dist/DashboardProvider.d.ts +20 -0
  6. package/dist/Job.d.ts +183 -0
  7. package/dist/OrbitStream.d.ts +151 -0
  8. package/dist/QueueManager.d.ts +319 -0
  9. package/dist/Queueable.d.ts +91 -0
  10. package/dist/Scheduler.d.ts +214 -0
  11. package/dist/StreamEventBackend.d.ts +114 -0
  12. package/dist/SystemEventJob.d.ts +33 -0
  13. package/dist/Worker.d.ts +139 -0
  14. package/dist/benchmarks/PerformanceReporter.d.ts +99 -0
  15. package/dist/consumer/ConcurrencyGate.d.ts +55 -0
  16. package/dist/consumer/ConsumerStrategy.d.ts +41 -0
  17. package/dist/consumer/GroupSequencer.d.ts +57 -0
  18. package/dist/consumer/HeartbeatManager.d.ts +65 -0
  19. package/dist/consumer/JobExecutor.d.ts +61 -0
  20. package/dist/consumer/JobSourceGenerator.d.ts +31 -0
  21. package/dist/consumer/PollingStrategy.d.ts +42 -0
  22. package/dist/consumer/ReactiveStrategy.d.ts +41 -0
  23. package/dist/consumer/StreamingConsumer.d.ts +88 -0
  24. package/dist/consumer/index.d.ts +13 -0
  25. package/dist/consumer/types.d.ts +102 -0
  26. package/dist/drivers/BinaryJobFrame.d.ts +78 -0
  27. package/dist/drivers/BullMQDriver.d.ts +186 -0
  28. package/dist/drivers/DatabaseDriver.d.ts +131 -0
  29. package/dist/drivers/GrpcDriver.d.ts +16 -0
  30. package/dist/drivers/KafkaDriver.d.ts +148 -0
  31. package/dist/drivers/MemoryDriver.d.ts +108 -0
  32. package/dist/drivers/QueueDriver.d.ts +250 -0
  33. package/dist/drivers/RabbitMQDriver.d.ts +102 -0
  34. package/dist/drivers/RedisDriver.d.ts +294 -0
  35. package/dist/drivers/SQSDriver.d.ts +111 -0
  36. package/dist/drivers/kafka/BackpressureController.d.ts +60 -0
  37. package/dist/drivers/kafka/BatchProcessor.d.ts +50 -0
  38. package/dist/drivers/kafka/ConsumerLifecycleManager.d.ts +80 -0
  39. package/dist/drivers/kafka/ErrorCategorizer.d.ts +39 -0
  40. package/dist/drivers/kafka/ErrorRecoveryManager.d.ts +100 -0
  41. package/dist/drivers/kafka/HeartbeatManager.d.ts +57 -0
  42. package/dist/drivers/kafka/KafkaDriver.d.ts +138 -0
  43. package/dist/drivers/kafka/KafkaMetrics.d.ts +88 -0
  44. package/dist/drivers/kafka/KafkaNotifier.d.ts +54 -0
  45. package/dist/drivers/kafka/MessageBuffer.d.ts +71 -0
  46. package/dist/drivers/kafka/OffsetTracker.d.ts +63 -0
  47. package/dist/drivers/kafka/PerformanceMonitor.d.ts +88 -0
  48. package/dist/drivers/kafka/RateLimiter.d.ts +52 -0
  49. package/dist/drivers/kafka/RebalanceHandler.d.ts +104 -0
  50. package/dist/drivers/kafka/RingBuffer.d.ts +63 -0
  51. package/dist/drivers/kafka/index.d.ts +22 -0
  52. package/dist/drivers/kafka/types.d.ts +553 -0
  53. package/dist/drivers/prepareJobForTransport.d.ts +10 -0
  54. package/dist/index.cjs +6274 -3777
  55. package/dist/index.cjs.map +71 -0
  56. package/dist/index.d.ts +60 -2233
  57. package/dist/index.js +6955 -4446
  58. package/dist/index.js.map +71 -0
  59. package/dist/locks/DistributedLock.d.ts +175 -0
  60. package/dist/persistence/BufferedPersistence.d.ts +130 -0
  61. package/dist/persistence/BunBufferedPersistence.d.ts +173 -0
  62. package/dist/persistence/MySQLPersistence.d.ts +134 -0
  63. package/dist/persistence/SQLitePersistence.d.ts +133 -0
  64. package/dist/serializers/BinarySerializer.d.ts +42 -0
  65. package/dist/serializers/CachedSerializer.d.ts +38 -0
  66. package/dist/serializers/CborNativeSerializer.d.ts +56 -0
  67. package/dist/serializers/ClassNameSerializer.d.ts +58 -0
  68. package/dist/serializers/JobSerializer.d.ts +33 -0
  69. package/dist/serializers/JsonSerializer.d.ts +28 -0
  70. package/dist/serializers/JsonlSerializer.d.ts +90 -0
  71. package/dist/serializers/MessagePackSerializer.d.ts +29 -0
  72. package/dist/types.d.ts +653 -0
  73. package/dist/workers/BinaryWorkerProtocol.d.ts +77 -0
  74. package/dist/workers/BunWorker.d.ts +179 -0
  75. package/dist/workers/SandboxedWorker.d.ts +132 -0
  76. package/dist/workers/WorkerFactory.d.ts +128 -0
  77. package/dist/workers/WorkerPool.d.ts +186 -0
  78. package/dist/workers/bun-job-executor.d.ts +14 -0
  79. package/dist/workers/index.d.ts +13 -0
  80. package/dist/workers/job-executor.d.ts +9 -0
  81. package/package.json +13 -6
  82. package/proto/queue.proto +101 -0
  83. package/dist/index.d.cts +0 -2242
@@ -0,0 +1,553 @@
1
+ import type { SerializedJob } from '../../types';
2
+ /**
3
+ * 錯誤分類,用於決定是否影響電路斷路器。
4
+ *
5
+ * - `transient`: 網路相關錯誤,應影響電路斷路器
6
+ * - `serialization`: 序列化錯誤,不影響電路
7
+ * - `business_logic`: 回調應用邏輯錯誤,不影響電路
8
+ * - `permanent`: 其他永久性錯誤,不影響電路
9
+ *
10
+ * @public
11
+ */
12
+ export type ErrorCategory = 'transient' | 'permanent' | 'serialization' | 'business_logic';
13
+ /**
14
+ * 序列化錯誤記錄,路由至 DLQ 的訊息內容。
15
+ *
16
+ * @public
17
+ */
18
+ export interface SerializationErrorRecord {
19
+ /** 主題名稱 */
20
+ topic: string;
21
+ /** 分區號 */
22
+ partition: number;
23
+ /** 訊息偏移量 */
24
+ offset: string;
25
+ /** 原始負載(字串化)*/
26
+ rawPayload: string;
27
+ /** 錯誤訊息 */
28
+ error: string;
29
+ /** 錯誤發生時的時間戳 */
30
+ timestamp: number;
31
+ /** 錯誤分類 */
32
+ category: 'serialization';
33
+ }
34
+ /**
35
+ * DLQ 統計快照。
36
+ *
37
+ * @public
38
+ */
39
+ export interface DlqStats {
40
+ /** DLQ 緩衝區中的總項目數 */
41
+ totalBuffered: number;
42
+ /** 按主題的 DLQ 項目計數 */
43
+ perTopic: Record<string, number>;
44
+ /** 上次嘗試刷新 DLQ 的時間戳 */
45
+ lastFlushAttempt: number;
46
+ /** 由於緩衝區溢位丟棄的項目計數 */
47
+ overflowCount: number;
48
+ }
49
+ /**
50
+ * Kafka client factory interface (compatible with KafkaJS).
51
+ *
52
+ * Defines the minimal API surface to allow any compatible Kafka client implementation.
53
+ *
54
+ * @public
55
+ */
56
+ export interface KafkaClientFactory {
57
+ producer: () => KafkaProducerClient;
58
+ admin: () => KafkaAdminClient;
59
+ consumer: (config: {
60
+ groupId: string;
61
+ }) => KafkaConsumerClient;
62
+ }
63
+ /**
64
+ * Kafka Producer client interface.
65
+ *
66
+ * @public
67
+ */
68
+ export interface KafkaProducerClient {
69
+ connect(): Promise<void>;
70
+ send(args: {
71
+ topic: string;
72
+ messages: Array<{
73
+ key?: string | Buffer | null;
74
+ value: string | Buffer;
75
+ partition?: number;
76
+ headers?: Record<string, string | Buffer>;
77
+ }>;
78
+ }): Promise<Array<{
79
+ topicName: string;
80
+ partition: number;
81
+ errorCode: number;
82
+ offset: string;
83
+ }>>;
84
+ disconnect(): Promise<void>;
85
+ }
86
+ /**
87
+ * Kafka Admin client interface.
88
+ *
89
+ * @public
90
+ */
91
+ export interface KafkaAdminClient {
92
+ connect(): Promise<void>;
93
+ createTopics(args: {
94
+ topics: Array<{
95
+ topic: string;
96
+ numPartitions?: number;
97
+ replicationFactor?: number;
98
+ configEntries?: Array<{
99
+ name: string;
100
+ value: string;
101
+ }>;
102
+ }>;
103
+ }): Promise<boolean>;
104
+ deleteTopics(args: {
105
+ topics: string[];
106
+ }): Promise<void>;
107
+ fetchTopicOffsets(topic: string): Promise<Array<{
108
+ partition: number;
109
+ offset: string;
110
+ high: string;
111
+ low: string;
112
+ }>>;
113
+ fetchOffsets(args: {
114
+ groupId: string;
115
+ topics: string[];
116
+ }): Promise<Array<{
117
+ topic: string;
118
+ partitions: Array<{
119
+ partition: number;
120
+ offset: string;
121
+ }>;
122
+ }>>;
123
+ listTopics(): Promise<string[]>;
124
+ disconnect(): Promise<void>;
125
+ }
126
+ /**
127
+ * Kafka Consumer client interface.
128
+ *
129
+ * @public
130
+ */
131
+ export interface KafkaConsumerClient {
132
+ connect(): Promise<void>;
133
+ subscribe(args: {
134
+ topics: string[];
135
+ fromBeginning?: boolean;
136
+ }): Promise<void>;
137
+ run(args: {
138
+ eachMessage?: (args: {
139
+ topic: string;
140
+ partition: number;
141
+ message: KafkaMessage;
142
+ }) => Promise<void>;
143
+ eachBatch?: (args: {
144
+ batch: {
145
+ topic: string;
146
+ partition: number;
147
+ messages: KafkaMessage[];
148
+ };
149
+ resolveOffset: (offset: string) => void;
150
+ heartbeat: () => Promise<void>;
151
+ isRunning: () => boolean;
152
+ commitOffsetsIfNecessary: () => Promise<void>;
153
+ }) => Promise<void>;
154
+ autoCommit?: boolean;
155
+ }): Promise<void>;
156
+ commitOffsets(offsets: Array<{
157
+ topic: string;
158
+ partition: number;
159
+ offset: string;
160
+ }>): Promise<void>;
161
+ seek(args: {
162
+ topic: string;
163
+ partition: number;
164
+ offset: string;
165
+ }): void;
166
+ pause(topics: Array<{
167
+ topic: string;
168
+ partitions?: number[];
169
+ }>): void;
170
+ resume(topics: Array<{
171
+ topic: string;
172
+ partitions?: number[];
173
+ }>): void;
174
+ disconnect(): Promise<void>;
175
+ }
176
+ /**
177
+ * Kafka message interface.
178
+ *
179
+ * @public
180
+ */
181
+ export interface KafkaMessage {
182
+ key?: Buffer | null;
183
+ value: Buffer | null;
184
+ offset: string;
185
+ timestamp?: string;
186
+ headers?: Record<string, Buffer | string>;
187
+ }
188
+ /**
189
+ * Full Kafka driver configuration.
190
+ *
191
+ * @public
192
+ */
193
+ export interface KafkaDriverFullConfig {
194
+ /** Kafka client factory (KafkaJS compatible) */
195
+ client: KafkaClientFactory;
196
+ /** Consumer Group ID */
197
+ consumerGroupId?: string;
198
+ /** Message buffer size limit (per queue) */
199
+ bufferSize?: number;
200
+ /** pop() wait timeout (milliseconds) */
201
+ popTimeout?: number;
202
+ /** Auto-create topics */
203
+ autoCreateTopics?: boolean;
204
+ /** DLQ topic suffix */
205
+ dlqSuffix?: string;
206
+ /** Auto-commit offsets */
207
+ autoCommit?: boolean;
208
+ /** Auto-commit interval (milliseconds) */
209
+ autoCommitInterval?: number;
210
+ /** Max batch size per fetch */
211
+ maxBatchSize?: number;
212
+ /** Message serializer (default: json) */
213
+ serializer?: 'json' | 'binary';
214
+ /** Maximum DLQ buffer size (default: 1000) */
215
+ maxDlqBufferSize?: number;
216
+ /** DLQ retry interval in milliseconds (default: 60000) */
217
+ dlqRetryIntervalMs?: number;
218
+ /** Whether to enable DLQ retry loop (default: true) */
219
+ dlqRetryEnabled?: boolean;
220
+ /** Callback failure circuit threshold (default: 5) */
221
+ callbackCircuitThreshold?: number;
222
+ }
223
+ /**
224
+ * Buffered Kafka message.
225
+ *
226
+ * @public
227
+ */
228
+ export interface BufferedMessage {
229
+ job: SerializedJob;
230
+ topic: string;
231
+ partition: number;
232
+ offset: string;
233
+ timestamp: number;
234
+ acknowledged: boolean;
235
+ }
236
+ /**
237
+ * Consumer lifecycle states for Phase 6C.
238
+ *
239
+ * State transitions:
240
+ * - idle → starting → running → stopping → stopped
241
+ * - running → restarting → running
242
+ * - any → error
243
+ *
244
+ * @public
245
+ */
246
+ export type ConsumerLifecycleState = 'idle' | 'starting' | 'running' | 'restarting' | 'stopping' | 'stopped' | 'error';
247
+ /**
248
+ * Configuration for subscribe() push-based consumption.
249
+ *
250
+ * @public
251
+ */
252
+ export interface SubscribeOptions {
253
+ /** Parallel callback concurrency (default: 1) */
254
+ concurrency?: number;
255
+ /** Whether to auto-acknowledge after callback completes (default: true) */
256
+ autoAcknowledge?: boolean;
257
+ /** Callback timeout in milliseconds (default: 30000) */
258
+ callbackTimeout?: number;
259
+ /** Whether to start from beginning (default: false) */
260
+ fromBeginning?: boolean;
261
+ }
262
+ /**
263
+ * Backpressure controller configuration.
264
+ *
265
+ * @public
266
+ */
267
+ export interface BackpressureConfig {
268
+ /** High watermark percentage (0-1). Pause consumer when buffer exceeds this. Default: 0.8 */
269
+ highWatermark?: number;
270
+ /** Low watermark percentage (0-1). Resume consumer when buffer drops below this. Default: 0.5 */
271
+ lowWatermark?: number;
272
+ /** Check interval in milliseconds. Default: 100 */
273
+ checkInterval?: number;
274
+ /** Maximum number of in-flight (processing) callbacks. Default: concurrency * 2 */
275
+ maxInFlight?: number;
276
+ }
277
+ /**
278
+ * Lifecycle event payload.
279
+ *
280
+ * @public
281
+ */
282
+ export interface LifecycleEvent {
283
+ state: ConsumerLifecycleState;
284
+ previousState: ConsumerLifecycleState;
285
+ timestamp: number;
286
+ error?: Error;
287
+ }
288
+ /**
289
+ * Heartbeat configuration.
290
+ * @public
291
+ */
292
+ export interface HeartbeatConfig {
293
+ /** Heartbeat interval in milliseconds. Default: 3000 */
294
+ interval?: number;
295
+ /** Session timeout in milliseconds. Default: 30000 */
296
+ sessionTimeout?: number;
297
+ /** Maximum consecutive missed heartbeats before declaring dead. Default: 3 */
298
+ maxMissed?: number;
299
+ }
300
+ /**
301
+ * Heartbeat status snapshot.
302
+ * @public
303
+ */
304
+ export interface HeartbeatStatus {
305
+ consumerId: string;
306
+ lastHeartbeat: number;
307
+ missedCount: number;
308
+ isAlive: boolean;
309
+ uptime: number;
310
+ queues: string[];
311
+ }
312
+ /**
313
+ * Kafka driver metrics snapshot.
314
+ * @public
315
+ */
316
+ export interface KafkaDriverMetrics {
317
+ /** Timestamp of this snapshot */
318
+ timestamp: number;
319
+ /** Per-queue throughput (messages/second) */
320
+ throughput: Record<string, number>;
321
+ /** Consumer lag per topic-partition */
322
+ lag: Record<string, number>;
323
+ /** Error counts by type */
324
+ errors: {
325
+ total: number;
326
+ serialization: number;
327
+ callback: number;
328
+ connection: number;
329
+ timeout: number;
330
+ };
331
+ /** Processing latency stats (milliseconds) */
332
+ latency: {
333
+ p50: number;
334
+ p95: number;
335
+ p99: number;
336
+ avg: number;
337
+ min: number;
338
+ max: number;
339
+ };
340
+ /** Buffer utilization */
341
+ buffer: {
342
+ totalSize: number;
343
+ perQueue: Record<string, number>;
344
+ utilization: number;
345
+ };
346
+ /** Rate limit stats */
347
+ rateLimits: {
348
+ totalAllowed: number;
349
+ totalDenied: number;
350
+ perQueue: Record<string, {
351
+ allowed: number;
352
+ denied: number;
353
+ }>;
354
+ };
355
+ /** In-flight processing stats */
356
+ inFlight: number;
357
+ /** Total messages processed since start */
358
+ totalProcessed: number;
359
+ /** Total messages failed since start */
360
+ totalFailed: number;
361
+ }
362
+ /**
363
+ * Metrics configuration.
364
+ * @public
365
+ */
366
+ export interface MetricsConfig {
367
+ /** Enable metrics collection. Default: true */
368
+ enabled?: boolean;
369
+ /** Collection interval in milliseconds. Default: 5000 */
370
+ collectionInterval?: number;
371
+ /** Histogram bucket count for latency tracking. Default: 100 */
372
+ histogramSize?: number;
373
+ /** Whether to track per-partition lag. Default: false */
374
+ perPartitionLag?: boolean;
375
+ }
376
+ /**
377
+ * Circuit breaker states for Kafka operations.
378
+ * @public
379
+ */
380
+ export type KafkaCircuitState = 'CLOSED' | 'OPEN' | 'HALF_OPEN';
381
+ /**
382
+ * Error recovery configuration.
383
+ * @public
384
+ */
385
+ export interface ErrorRecoveryConfig {
386
+ /** Circuit breaker failure threshold. Default: 5 */
387
+ failureThreshold?: number;
388
+ /** Circuit breaker reset timeout in ms. Default: 30000 */
389
+ resetTimeoutMs?: number;
390
+ /** Half-open max probe requests. Default: 1 */
391
+ halfOpenMaxRequests?: number;
392
+ /** Initial backoff delay in ms. Default: 1000 */
393
+ initialBackoffMs?: number;
394
+ /** Maximum backoff delay in ms. Default: 60000 */
395
+ maxBackoffMs?: number;
396
+ /** Backoff multiplier. Default: 2 */
397
+ backoffMultiplier?: number;
398
+ /** Whether to add jitter to backoff. Default: true */
399
+ jitter?: boolean;
400
+ /** Max consecutive retries before giving up. Default: 10 */
401
+ maxRetries?: number;
402
+ }
403
+ /**
404
+ * Error recovery state snapshot.
405
+ * @public
406
+ */
407
+ export interface ErrorRecoveryState {
408
+ circuitState: KafkaCircuitState;
409
+ consecutiveFailures: number;
410
+ lastFailureTime: number;
411
+ currentBackoffMs: number;
412
+ totalRecoveries: number;
413
+ isRecovering: boolean;
414
+ }
415
+ /**
416
+ * Partition assignment descriptor.
417
+ * @public
418
+ */
419
+ export interface PartitionAssignment {
420
+ topic: string;
421
+ partition: number;
422
+ }
423
+ /**
424
+ * Rebalance lifecycle states.
425
+ *
426
+ * State transitions:
427
+ * - stable → revoking → assigning → stable
428
+ * - any → error
429
+ *
430
+ * @public
431
+ */
432
+ export type RebalanceState = 'stable' | 'revoking' | 'assigning' | 'error';
433
+ /**
434
+ * Rebalance event payload emitted during rebalance lifecycle.
435
+ * @public
436
+ */
437
+ export interface RebalanceEvent {
438
+ state: RebalanceState;
439
+ previousState: RebalanceState;
440
+ timestamp: number;
441
+ /** Partitions being revoked (available during 'revoking' state) */
442
+ revokedPartitions?: PartitionAssignment[];
443
+ /** Partitions being assigned (available during 'assigning' state) */
444
+ assignedPartitions?: PartitionAssignment[];
445
+ error?: Error;
446
+ }
447
+ /**
448
+ * Rebalance handler configuration.
449
+ * @public
450
+ */
451
+ export interface RebalanceConfig {
452
+ /** Timeout for revocation processing in milliseconds. Default: 10000 */
453
+ revocationTimeoutMs?: number;
454
+ /** Whether to commit offsets before revocation. Default: true */
455
+ commitOnRevoke?: boolean;
456
+ /** Whether to clear buffer for revoked partitions. Default: true */
457
+ clearBufferOnRevoke?: boolean;
458
+ /** Maximum concurrent rebalance operations. Default: 1 */
459
+ maxConcurrentRebalances?: number;
460
+ }
461
+ /**
462
+ * Rebalance handler status snapshot.
463
+ * @public
464
+ */
465
+ export interface RebalanceStatus {
466
+ state: RebalanceState;
467
+ assignedPartitions: PartitionAssignment[];
468
+ totalRebalances: number;
469
+ lastRebalanceTimestamp: number;
470
+ isRebalancing: boolean;
471
+ }
472
+ /**
473
+ * 批次處理配置。
474
+ * @public
475
+ */
476
+ export interface BatchConfig {
477
+ /** 最大批次大小。Default: 100 */
478
+ maxBatchSize?: number;
479
+ /** 批次收集逾時(ms)。Default: 50 */
480
+ batchLingerMs?: number;
481
+ /** 批次並行度。Default: 3 */
482
+ concurrency?: number;
483
+ /** 是否啟用 producer batch 管線。Default: true */
484
+ enablePipeline?: boolean;
485
+ }
486
+ /**
487
+ * 批次處理結果。
488
+ * @public
489
+ */
490
+ export interface BatchResult {
491
+ /** 成功處理的訊息 ID */
492
+ succeeded: string[];
493
+ /** 失敗的訊息 */
494
+ failed: Array<{
495
+ id: string;
496
+ error: Error;
497
+ }>;
498
+ }
499
+ /**
500
+ * 效能監控配置。
501
+ * @public
502
+ */
503
+ export interface PerformanceConfig {
504
+ /** 快照收集間隔(毫秒)。Default: 5000 */
505
+ snapshotIntervalMs?: number;
506
+ /** 歷史快照保留數量。Default: 60 */
507
+ historySize?: number;
508
+ /** 是否啟用效能監控。Default: true */
509
+ enabled?: boolean;
510
+ }
511
+ /**
512
+ * 效能快照,聚合所有元件的狀態。
513
+ * @public
514
+ */
515
+ export interface PerformanceSnapshot {
516
+ /** 快照時間戳 */
517
+ timestamp: number;
518
+ /** 吞吐量指標(來自 KafkaMetrics) */
519
+ throughput: Record<string, number>;
520
+ /** 延遲指標(來自 KafkaMetrics) */
521
+ latency: {
522
+ p50: number;
523
+ p95: number;
524
+ p99: number;
525
+ avg: number;
526
+ };
527
+ /** 錯誤統計(來自 KafkaMetrics) */
528
+ errors: {
529
+ total: number;
530
+ serialization: number;
531
+ callback: number;
532
+ connection: number;
533
+ timeout: number;
534
+ };
535
+ /** 電路斷路器狀態(來自 ErrorRecoveryManager) */
536
+ circuitState: KafkaCircuitState;
537
+ /** 心跳狀態(來自 HeartbeatManager) */
538
+ heartbeatAlive: boolean;
539
+ /** 背壓狀態(來自 BackpressureController) */
540
+ backpressurePaused: boolean;
541
+ /** 消費者生命週期狀態(來自 ConsumerLifecycleManager) */
542
+ consumerState: ConsumerLifecycleState;
543
+ /** 緩衝區利用率 */
544
+ bufferUtilization: number;
545
+ /** 處理中的訊息數 */
546
+ inFlight: number;
547
+ /** 總處理量 */
548
+ totalProcessed: number;
549
+ /** 總失敗量 */
550
+ totalFailed: number;
551
+ /** 整體健康狀態 */
552
+ health: 'healthy' | 'degraded' | 'unhealthy';
553
+ }
@@ -0,0 +1,10 @@
1
+ import type { SerializedJob } from '../types';
2
+ /**
3
+ * Prepares a job for transport by converting Uint8Array data to Base64 string.
4
+ *
5
+ * This ensures safe JSON serialization across all drivers (Redis, RabbitMQ, SQS, Database).
6
+ *
7
+ * @param job - The serialized job to prepare for transport.
8
+ * @returns Job with data in transportable format (string for binary/msgpack types).
9
+ */
10
+ export declare function prepareJobForTransport(job: SerializedJob): SerializedJob;