@gravito/stream 2.0.1 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/README.md +127 -285
  2. package/README.zh-TW.md +146 -13
  3. package/dist/BatchConsumer.d.ts +81 -0
  4. package/dist/Consumer.d.ts +215 -0
  5. package/dist/DashboardProvider.d.ts +20 -0
  6. package/dist/Job.d.ts +183 -0
  7. package/dist/OrbitStream.d.ts +151 -0
  8. package/dist/QueueManager.d.ts +319 -0
  9. package/dist/Queueable.d.ts +91 -0
  10. package/dist/Scheduler.d.ts +214 -0
  11. package/dist/StreamEventBackend.d.ts +114 -0
  12. package/dist/SystemEventJob.d.ts +33 -0
  13. package/dist/Worker.d.ts +139 -0
  14. package/dist/benchmarks/PerformanceReporter.d.ts +99 -0
  15. package/dist/consumer/ConcurrencyGate.d.ts +55 -0
  16. package/dist/consumer/ConsumerStrategy.d.ts +41 -0
  17. package/dist/consumer/GroupSequencer.d.ts +57 -0
  18. package/dist/consumer/HeartbeatManager.d.ts +65 -0
  19. package/dist/consumer/JobExecutor.d.ts +61 -0
  20. package/dist/consumer/JobSourceGenerator.d.ts +31 -0
  21. package/dist/consumer/PollingStrategy.d.ts +42 -0
  22. package/dist/consumer/ReactiveStrategy.d.ts +41 -0
  23. package/dist/consumer/StreamingConsumer.d.ts +88 -0
  24. package/dist/consumer/index.d.ts +13 -0
  25. package/dist/consumer/types.d.ts +102 -0
  26. package/dist/drivers/BinaryJobFrame.d.ts +78 -0
  27. package/dist/drivers/BullMQDriver.d.ts +186 -0
  28. package/dist/drivers/DatabaseDriver.d.ts +131 -0
  29. package/dist/drivers/GrpcDriver.d.ts +16 -0
  30. package/dist/drivers/KafkaDriver.d.ts +148 -0
  31. package/dist/drivers/MemoryDriver.d.ts +108 -0
  32. package/dist/drivers/QueueDriver.d.ts +250 -0
  33. package/dist/drivers/RabbitMQDriver.d.ts +102 -0
  34. package/dist/drivers/RedisDriver.d.ts +294 -0
  35. package/dist/drivers/SQSDriver.d.ts +111 -0
  36. package/dist/drivers/kafka/BackpressureController.d.ts +60 -0
  37. package/dist/drivers/kafka/BatchProcessor.d.ts +50 -0
  38. package/dist/drivers/kafka/ConsumerLifecycleManager.d.ts +80 -0
  39. package/dist/drivers/kafka/ErrorCategorizer.d.ts +39 -0
  40. package/dist/drivers/kafka/ErrorRecoveryManager.d.ts +100 -0
  41. package/dist/drivers/kafka/HeartbeatManager.d.ts +57 -0
  42. package/dist/drivers/kafka/KafkaDriver.d.ts +138 -0
  43. package/dist/drivers/kafka/KafkaMetrics.d.ts +88 -0
  44. package/dist/drivers/kafka/KafkaNotifier.d.ts +54 -0
  45. package/dist/drivers/kafka/MessageBuffer.d.ts +71 -0
  46. package/dist/drivers/kafka/OffsetTracker.d.ts +63 -0
  47. package/dist/drivers/kafka/PerformanceMonitor.d.ts +88 -0
  48. package/dist/drivers/kafka/RateLimiter.d.ts +52 -0
  49. package/dist/drivers/kafka/RebalanceHandler.d.ts +104 -0
  50. package/dist/drivers/kafka/RingBuffer.d.ts +63 -0
  51. package/dist/drivers/kafka/index.d.ts +22 -0
  52. package/dist/drivers/kafka/types.d.ts +553 -0
  53. package/dist/drivers/prepareJobForTransport.d.ts +10 -0
  54. package/dist/index.cjs +6274 -3777
  55. package/dist/index.cjs.map +71 -0
  56. package/dist/index.d.ts +60 -2233
  57. package/dist/index.js +6955 -4446
  58. package/dist/index.js.map +71 -0
  59. package/dist/locks/DistributedLock.d.ts +175 -0
  60. package/dist/persistence/BufferedPersistence.d.ts +130 -0
  61. package/dist/persistence/BunBufferedPersistence.d.ts +173 -0
  62. package/dist/persistence/MySQLPersistence.d.ts +134 -0
  63. package/dist/persistence/SQLitePersistence.d.ts +133 -0
  64. package/dist/serializers/BinarySerializer.d.ts +42 -0
  65. package/dist/serializers/CachedSerializer.d.ts +38 -0
  66. package/dist/serializers/CborNativeSerializer.d.ts +56 -0
  67. package/dist/serializers/ClassNameSerializer.d.ts +58 -0
  68. package/dist/serializers/JobSerializer.d.ts +33 -0
  69. package/dist/serializers/JsonSerializer.d.ts +28 -0
  70. package/dist/serializers/JsonlSerializer.d.ts +90 -0
  71. package/dist/serializers/MessagePackSerializer.d.ts +29 -0
  72. package/dist/types.d.ts +653 -0
  73. package/dist/workers/BinaryWorkerProtocol.d.ts +77 -0
  74. package/dist/workers/BunWorker.d.ts +179 -0
  75. package/dist/workers/SandboxedWorker.d.ts +132 -0
  76. package/dist/workers/WorkerFactory.d.ts +128 -0
  77. package/dist/workers/WorkerPool.d.ts +186 -0
  78. package/dist/workers/bun-job-executor.d.ts +14 -0
  79. package/dist/workers/index.d.ts +13 -0
  80. package/dist/workers/job-executor.d.ts +9 -0
  81. package/package.json +13 -6
  82. package/proto/queue.proto +101 -0
  83. package/dist/index.d.cts +0 -2242
@@ -0,0 +1,175 @@
1
+ import type { GroupRedisClient } from '../drivers/RedisDriver';
2
+ /**
3
+ * Configuration options for distributed locks.
4
+ *
5
+ * Defines the time-to-live (TTL), retry strategy, and automatic renewal behavior for a lock.
6
+ *
7
+ * @public
8
+ * @since 3.1.0
9
+ * @example
10
+ * ```typescript
11
+ * const options: LockOptions = {
12
+ * ttl: 60000, // Lock held for 60 seconds
13
+ * retryCount: 3, // Retry 3 times on failure
14
+ * retryDelay: 100, // Wait 100ms between retries
15
+ * refreshInterval: 20000 // Auto-renew every 20 seconds
16
+ * };
17
+ * ```
18
+ */
19
+ export interface LockOptions {
20
+ /**
21
+ * Time-to-live for the lock in milliseconds.
22
+ *
23
+ * The lock will automatically expire if the holder does not release or renew it
24
+ * before this duration elapses.
25
+ */
26
+ ttl: number;
27
+ /**
28
+ * Number of retry attempts if lock acquisition fails.
29
+ *
30
+ * Set to 0 to disable retries.
31
+ */
32
+ retryCount: number;
33
+ /**
34
+ * Delay between retry attempts in milliseconds.
35
+ */
36
+ retryDelay: number;
37
+ /**
38
+ * Interval for automatic lock renewal in milliseconds.
39
+ *
40
+ * If set, the lock will automatically extend its TTL every `refreshInterval`.
41
+ * Recommended value is 1/3 of the `ttl`.
42
+ *
43
+ * @optional
44
+ */
45
+ refreshInterval?: number;
46
+ }
47
+ /**
48
+ * Distributed lock implementation based on Redis (Redlock style).
49
+ *
50
+ * Provides mutual exclusion in a distributed environment, ensuring only one node
51
+ * holds a specific lock at a time. Supports automatic renewal, retry mechanisms,
52
+ * and safe release (only the holder can release).
53
+ *
54
+ * @public
55
+ * @since 3.1.0
56
+ * @example
57
+ * ```typescript
58
+ * const lock = new DistributedLock(redisClient);
59
+ *
60
+ * const acquired = await lock.acquire('my-resource', {
61
+ * ttl: 60000,
62
+ * retryCount: 3,
63
+ * retryDelay: 100,
64
+ * refreshInterval: 20000
65
+ * });
66
+ *
67
+ * if (acquired) {
68
+ * try {
69
+ * // Perform exclusive operation
70
+ * } finally {
71
+ * await lock.release('my-resource');
72
+ * }
73
+ * }
74
+ * ```
75
+ */
76
+ export declare class DistributedLock {
77
+ private client;
78
+ /**
79
+ * Unique identifier for this lock instance.
80
+ * Used to ensure only the owner can release the lock.
81
+ */
82
+ private lockId;
83
+ /**
84
+ * Timer for automatic renewal.
85
+ */
86
+ private refreshTimer;
87
+ /**
88
+ * The key of the currently held lock.
89
+ */
90
+ private currentLockKey;
91
+ /**
92
+ * Creates a DistributedLock instance.
93
+ *
94
+ * @param client - Redis client instance. Must support SET, DEL, and EVAL commands.
95
+ */
96
+ constructor(client: GroupRedisClient);
97
+ /**
98
+ * Attempts to acquire a distributed lock for the specified key.
99
+ *
100
+ * Uses Redis `SET key value EX ttl NX` for atomic acquisition.
101
+ * If the lock is held by another node, it retries according to `retryCount`.
102
+ * Upon success, if `refreshInterval` is set, automatic renewal starts.
103
+ *
104
+ * @param key - The lock key. Use a meaningful resource identifier.
105
+ * @param options - Configuration options for the lock.
106
+ * @returns `true` if the lock was acquired, `false` otherwise.
107
+ *
108
+ * @throws {Error} If the Redis client does not support the SET command.
109
+ *
110
+ * @example
111
+ * ```typescript
112
+ * const acquired = await lock.acquire('schedule:job-123', {
113
+ * ttl: 30000,
114
+ * retryCount: 5,
115
+ * retryDelay: 200
116
+ * });
117
+ *
118
+ * if (!acquired) {
119
+ * console.log('Resource is currently locked by another node');
120
+ * }
121
+ * ```
122
+ */
123
+ acquire(key: string, options: LockOptions): Promise<boolean>;
124
+ /**
125
+ * Releases the lock for the specified key.
126
+ *
127
+ * Uses a Lua script to ensure atomicity: the lock is deleted ONLY if the value matches
128
+ * this instance's `lockId`. This prevents deleting locks held by others.
129
+ * Stops the auto-renewal timer upon success.
130
+ *
131
+ * @param key - The lock key to release.
132
+ *
133
+ * @throws {Error} If the Redis client does not support the EVAL command.
134
+ *
135
+ * @example
136
+ * ```typescript
137
+ * await lock.release('schedule:job-123');
138
+ * ```
139
+ */
140
+ release(key: string): Promise<void>;
141
+ /**
142
+ * Starts the automatic renewal mechanism.
143
+ *
144
+ * Periodically extends the lock's TTL to prevent expiration during long-running tasks.
145
+ * Uses a Lua script to ensure only owned locks are renewed.
146
+ *
147
+ * @param key - The lock key.
148
+ * @param options - Lock options containing `refreshInterval`.
149
+ */
150
+ private startRefresh;
151
+ /**
152
+ * Stops the automatic renewal timer.
153
+ */
154
+ private stopRefresh;
155
+ /**
156
+ * Helper for delay.
157
+ *
158
+ * @param ms - Milliseconds to sleep.
159
+ */
160
+ private sleep;
161
+ /**
162
+ * Checks if the specified lock is currently held by this instance.
163
+ *
164
+ * @param key - The lock key.
165
+ * @returns `true` if held, `false` otherwise.
166
+ *
167
+ * @example
168
+ * ```typescript
169
+ * if (lock.isHeld('schedule:job-123')) {
170
+ * console.log('Lock is active');
171
+ * }
172
+ * ```
173
+ */
174
+ isHeld(key: string): boolean;
175
+ }
@@ -0,0 +1,130 @@
1
+ import type { PersistenceAdapter, SerializedJob } from '../types';
2
+ /**
3
+ * Buffered Persistence Wrapper.
4
+ *
5
+ * Decorates any `PersistenceAdapter` to add write buffering. Instead of writing
6
+ * to the database immediately for every event, it collects jobs and logs in memory
7
+ * and flushes them in batches. This significantly reduces database I/O for high-throughput queues.
8
+ *
9
+ * @public
10
+ * @example
11
+ * ```typescript
12
+ * const mysqlAdapter = new MySQLPersistence(db);
13
+ * const bufferedAdapter = new BufferedPersistence(mysqlAdapter, {
14
+ * maxBufferSize: 100,
15
+ * flushInterval: 500
16
+ * });
17
+ * ```
18
+ */
19
+ export declare class BufferedPersistence implements PersistenceAdapter {
20
+ private adapter;
21
+ private jobBuffer;
22
+ private logBuffer;
23
+ private flushTimer;
24
+ private maxBufferSize;
25
+ private flushInterval;
26
+ constructor(adapter: PersistenceAdapter, options?: {
27
+ maxBufferSize?: number;
28
+ flushInterval?: number;
29
+ });
30
+ /**
31
+ * Buffers a job archive request.
32
+ *
33
+ * @param queue - The queue name.
34
+ * @param job - The serialized job.
35
+ * @param status - The final job status.
36
+ */
37
+ archive(queue: string, job: SerializedJob, status: 'completed' | 'failed' | 'waiting' | string): Promise<void>;
38
+ /**
39
+ * Delegates find to the underlying adapter (no buffering for reads).
40
+ */
41
+ find(queue: string, id: string): Promise<SerializedJob | null>;
42
+ /**
43
+ * Delegates list to the underlying adapter (no buffering for reads).
44
+ */
45
+ list(queue: string, options?: {
46
+ limit?: number;
47
+ offset?: number;
48
+ status?: 'completed' | 'failed' | 'waiting' | string;
49
+ jobId?: string;
50
+ startTime?: Date;
51
+ endTime?: Date;
52
+ }): Promise<SerializedJob[]>;
53
+ /**
54
+ * Archives multiple jobs directly (bypassing buffer, or flushing first).
55
+ *
56
+ * Actually, for consistency, this might just pass through.
57
+ */
58
+ archiveMany(jobs: Array<{
59
+ queue: string;
60
+ job: SerializedJob;
61
+ status: 'completed' | 'failed' | 'waiting' | string;
62
+ }>): Promise<void>;
63
+ /**
64
+ * Delegates cleanup to the underlying adapter.
65
+ */
66
+ cleanup(days: number): Promise<number>;
67
+ /**
68
+ * Flushes all buffered data to the underlying adapter.
69
+ *
70
+ * Uses `archiveMany` and `archiveLogMany` if supported by the adapter for batch efficiency.
71
+ */
72
+ flush(): Promise<void>;
73
+ /**
74
+ * Delegates count to the underlying adapter.
75
+ */
76
+ count(queue: string, options?: {
77
+ status?: 'completed' | 'failed' | 'waiting' | string;
78
+ jobId?: string;
79
+ startTime?: Date;
80
+ endTime?: Date;
81
+ }): Promise<number>;
82
+ /**
83
+ * Buffers a log message.
84
+ */
85
+ archiveLog(log: {
86
+ level: string;
87
+ message: string;
88
+ workerId: string;
89
+ queue?: string;
90
+ timestamp: Date;
91
+ }): Promise<void>;
92
+ /**
93
+ * Archives multiple logs directly.
94
+ */
95
+ archiveLogMany(logs: Array<{
96
+ level: string;
97
+ message: string;
98
+ workerId: string;
99
+ queue?: string;
100
+ timestamp: Date;
101
+ }>): Promise<void>;
102
+ /**
103
+ * Delegates listLogs to the underlying adapter.
104
+ */
105
+ listLogs(options?: {
106
+ limit?: number;
107
+ offset?: number;
108
+ level?: string;
109
+ workerId?: string;
110
+ queue?: string;
111
+ search?: string;
112
+ startTime?: Date;
113
+ endTime?: Date;
114
+ }): Promise<any[]>;
115
+ /**
116
+ * Delegates countLogs to the underlying adapter.
117
+ */
118
+ countLogs(options?: {
119
+ level?: string;
120
+ workerId?: string;
121
+ queue?: string;
122
+ search?: string;
123
+ startTime?: Date;
124
+ endTime?: Date;
125
+ }): Promise<number>;
126
+ /**
127
+ * Ensures the auto-flush timer is running.
128
+ */
129
+ private ensureFlushTimer;
130
+ }
@@ -0,0 +1,173 @@
1
+ import type { PersistenceAdapter, SerializedJob } from '../types';
2
+ /**
3
+ * BunBufferedPersistence 設定選項
4
+ */
5
+ export interface BunBufferedPersistenceOptions {
6
+ /**
7
+ * 觸發自動 flush 的緩衝筆數上限(預設 50)
8
+ */
9
+ maxBufferSize?: number;
10
+ /**
11
+ * 定時 flush 的間隔(毫秒,預設 5000)
12
+ */
13
+ flushInterval?: number;
14
+ /**
15
+ * ArrayBufferSink 的初始容量(bytes,預設 65536)
16
+ */
17
+ sinkCapacity?: number;
18
+ }
19
+ /**
20
+ * 使用 Bun ArrayBufferSink 的高效緩衝持久化裝飾器
21
+ *
22
+ * 在 Bun 環境下,使用 Bun 原生的 ArrayBufferSink 作為記憶體緩衝區,
23
+ * 以 CBOR 格式直接寫入二進制資料(零字串轉換開銷),
24
+ * 並透過背壓控制避免無限增長。
25
+ *
26
+ * 在非 Bun 環境(Node.js 等)則回退到簡單的陣列緩衝機制,
27
+ * 與原始 BufferedPersistence 行為等效。
28
+ *
29
+ * @public
30
+ * @example
31
+ * ```typescript
32
+ * const mysqlAdapter = new MySQLPersistence(db)
33
+ * const buffered = new BunBufferedPersistence(mysqlAdapter, {
34
+ * maxBufferSize: 100,
35
+ * flushInterval: 1000,
36
+ * })
37
+ * await buffered.archive('email', job, 'completed')
38
+ * await buffered.flush()
39
+ * ```
40
+ */
41
+ export declare class BunBufferedPersistence implements PersistenceAdapter {
42
+ private readonly adapter;
43
+ private jobSink;
44
+ private logSink;
45
+ private jobPendingCount;
46
+ private logPendingCount;
47
+ private flushing;
48
+ private flushPromise;
49
+ private jobBuffer;
50
+ private logBuffer;
51
+ private flushTimer;
52
+ private readonly maxBufferSize;
53
+ private readonly flushInterval;
54
+ private readonly useBun;
55
+ private readonly sinkCapacity;
56
+ constructor(adapter: PersistenceAdapter, options?: BunBufferedPersistenceOptions);
57
+ /**
58
+ * 初始化 Bun ArrayBufferSink(Bun 模式專用)
59
+ * 可重複呼叫(flush 後重建 sink)
60
+ */
61
+ private initSinks;
62
+ /**
63
+ * 緩衝一筆 job archive 請求
64
+ *
65
+ * @param queue - 隊列名稱
66
+ * @param job - 序列化的 job 資料
67
+ * @param status - job 最終狀態
68
+ */
69
+ archive(queue: string, job: SerializedJob, status: 'completed' | 'failed' | 'waiting' | string): Promise<void>;
70
+ /**
71
+ * 緩衝一筆 log archive 請求
72
+ */
73
+ archiveLog(log: {
74
+ level: string;
75
+ message: string;
76
+ workerId: string;
77
+ queue?: string;
78
+ timestamp: Date;
79
+ }): Promise<void>;
80
+ /**
81
+ * 批次 archive 多個 job(直接委派,不走緩衝)
82
+ */
83
+ archiveMany(jobs: Array<{
84
+ queue: string;
85
+ job: SerializedJob;
86
+ status: 'completed' | 'failed' | 'waiting' | string;
87
+ }>): Promise<void>;
88
+ /**
89
+ * 批次 archive 多個 log(直接委派,不走緩衝)
90
+ */
91
+ archiveLogMany(logs: Array<{
92
+ level: string;
93
+ message: string;
94
+ workerId: string;
95
+ queue?: string;
96
+ timestamp: Date;
97
+ }>): Promise<void>;
98
+ /**
99
+ * 將緩衝區的所有資料批次寫入底層 adapter
100
+ *
101
+ * 具備互斥鎖語意:同時只有一個 flush 在執行。
102
+ * 背壓控制:archive() 和 archiveLog() 在 flush 進行中時會等待。
103
+ */
104
+ flush(): Promise<void>;
105
+ /**
106
+ * 實際執行 flush 的內部方法
107
+ */
108
+ private _doFlush;
109
+ find(queue: string, id: string): Promise<SerializedJob | null>;
110
+ list(queue: string, options?: {
111
+ limit?: number;
112
+ offset?: number;
113
+ status?: 'completed' | 'failed' | 'waiting' | string;
114
+ jobId?: string;
115
+ startTime?: Date;
116
+ endTime?: Date;
117
+ }): Promise<SerializedJob[]>;
118
+ count(queue: string, options?: {
119
+ status?: 'completed' | 'failed' | 'waiting' | string;
120
+ jobId?: string;
121
+ startTime?: Date;
122
+ endTime?: Date;
123
+ }): Promise<number>;
124
+ cleanup(days: number): Promise<number>;
125
+ listLogs(options?: {
126
+ limit?: number;
127
+ offset?: number;
128
+ level?: string;
129
+ workerId?: string;
130
+ queue?: string;
131
+ search?: string;
132
+ startTime?: Date;
133
+ endTime?: Date;
134
+ }): Promise<unknown[]>;
135
+ countLogs(options?: {
136
+ level?: string;
137
+ workerId?: string;
138
+ queue?: string;
139
+ search?: string;
140
+ startTime?: Date;
141
+ endTime?: Date;
142
+ }): Promise<number>;
143
+ /**
144
+ * 確保定時 flush 計時器正在執行
145
+ */
146
+ private ensureFlushTimer;
147
+ /**
148
+ * 取得目前緩衝中的 job 筆數(供測試與監控使用)
149
+ */
150
+ get pendingJobCount(): number;
151
+ /**
152
+ * 取得目前緩衝中的 log 筆數(供測試與監控使用)
153
+ */
154
+ get pendingLogCount(): number;
155
+ /**
156
+ * 目前是否正在執行 flush
157
+ */
158
+ get isFlushing(): boolean;
159
+ }
160
+ /**
161
+ * 建立最佳的 BufferedPersistence 實例
162
+ *
163
+ * 在 Bun 環境自動使用 BunBufferedPersistence(ArrayBufferSink 加速),
164
+ * 在 Node.js 環境回退到一般陣列緩衝行為。
165
+ *
166
+ * 兩者皆實作相同的 `PersistenceAdapter` 介面,可互換使用。
167
+ *
168
+ * @example
169
+ * ```typescript
170
+ * const adapter = createBufferedPersistence(mysqlAdapter, { maxBufferSize: 100 })
171
+ * ```
172
+ */
173
+ export declare function createBufferedPersistence(adapter: PersistenceAdapter, options?: BunBufferedPersistenceOptions): BunBufferedPersistence;
@@ -0,0 +1,134 @@
1
+ import type { ConnectionContract } from '@gravito/atlas';
2
+ import type { PersistenceAdapter, SerializedJob } from '../types';
3
+ /**
4
+ * MySQL Persistence Adapter.
5
+ *
6
+ * Implements the `PersistenceAdapter` interface for MySQL databases.
7
+ * Stores job history and logs in relational tables for long-term retention and auditing.
8
+ *
9
+ * @public
10
+ * @example
11
+ * ```typescript
12
+ * const persistence = new MySQLPersistence(dbConnection);
13
+ * ```
14
+ */
15
+ export declare class MySQLPersistence implements PersistenceAdapter {
16
+ private db;
17
+ private table;
18
+ private logsTable;
19
+ /**
20
+ * @param db - An Atlas DB instance or compatible QueryBuilder.
21
+ * @param table - The name of the table to store archived jobs.
22
+ * @param logsTable - The name of the table to store system logs.
23
+ * @param options - Buffering options (Deprecated: Use BufferedPersistence wrapper instead).
24
+ */
25
+ constructor(db: ConnectionContract, table?: string, logsTable?: string, _options?: {
26
+ maxBufferSize?: number;
27
+ flushInterval?: number;
28
+ });
29
+ /**
30
+ * Archives a single job.
31
+ */
32
+ archive(queue: string, job: SerializedJob, status: 'completed' | 'failed' | 'waiting' | string): Promise<void>;
33
+ /**
34
+ * Archives multiple jobs in a batch.
35
+ */
36
+ archiveMany(jobs: Array<{
37
+ queue: string;
38
+ job: SerializedJob;
39
+ status: 'completed' | 'failed' | 'waiting' | string;
40
+ }>): Promise<void>;
41
+ /**
42
+ * No-op. Use BufferedPersistence if flushing is needed.
43
+ */
44
+ flush(): Promise<void>;
45
+ /**
46
+ * Finds an archived job by ID.
47
+ */
48
+ find(queue: string, id: string): Promise<SerializedJob | null>;
49
+ /**
50
+ * List jobs from the archive.
51
+ */
52
+ list(queue: string, options?: {
53
+ limit?: number;
54
+ offset?: number;
55
+ status?: 'completed' | 'failed' | 'waiting' | string;
56
+ jobId?: string;
57
+ startTime?: Date;
58
+ endTime?: Date;
59
+ }): Promise<SerializedJob[]>;
60
+ /**
61
+ * Search jobs from the archive.
62
+ *
63
+ * @param query - Search string (matches ID, payload, or error).
64
+ * @param options - Filter options.
65
+ */
66
+ search(query: string, options?: {
67
+ limit?: number;
68
+ offset?: number;
69
+ queue?: string;
70
+ }): Promise<SerializedJob[]>;
71
+ /**
72
+ * Archive a system log message.
73
+ */
74
+ archiveLog(log: {
75
+ level: string;
76
+ message: string;
77
+ workerId: string;
78
+ queue?: string;
79
+ timestamp: Date;
80
+ }): Promise<void>;
81
+ /**
82
+ * Archive multiple log messages.
83
+ */
84
+ archiveLogMany(logs: Array<{
85
+ level: string;
86
+ message: string;
87
+ workerId: string;
88
+ queue?: string;
89
+ timestamp: Date;
90
+ }>): Promise<void>;
91
+ /**
92
+ * List system logs from the archive.
93
+ */
94
+ listLogs(options?: {
95
+ limit?: number;
96
+ offset?: number;
97
+ level?: string;
98
+ workerId?: string;
99
+ queue?: string;
100
+ search?: string;
101
+ startTime?: Date;
102
+ endTime?: Date;
103
+ }): Promise<any[]>;
104
+ /**
105
+ * Count system logs in the archive.
106
+ */
107
+ countLogs(options?: {
108
+ level?: string;
109
+ workerId?: string;
110
+ queue?: string;
111
+ search?: string;
112
+ startTime?: Date;
113
+ endTime?: Date;
114
+ }): Promise<number>;
115
+ /**
116
+ * Remove old records from the archive.
117
+ */
118
+ cleanup(days: number): Promise<number>;
119
+ /**
120
+ * Count jobs in the archive.
121
+ */
122
+ count(queue: string, options?: {
123
+ status?: 'completed' | 'failed' | 'waiting' | string;
124
+ jobId?: string;
125
+ startTime?: Date;
126
+ endTime?: Date;
127
+ }): Promise<number>;
128
+ /**
129
+ * Helper to create necessary tables if they don't exist.
130
+ */
131
+ setupTable(): Promise<void>;
132
+ private setupJobsTable;
133
+ private setupLogsTable;
134
+ }