@delma/fylo 1.0.1 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +62 -3
- package/package.json +4 -2
- package/src/adapters/redis.ts +346 -0
- package/src/core/write-queue.ts +56 -0
- package/src/index.ts +292 -29
- package/src/types/fylo.d.ts +37 -3
- package/src/types/write-queue.ts +42 -0
- package/src/worker.ts +12 -0
- package/src/workers/write-worker.ts +119 -0
- package/tests/integration/queue.test.ts +99 -0
- package/tests/mocks/redis.ts +156 -0
package/README.md
CHANGED
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
# Fylo
|
|
2
2
|
|
|
3
|
-
S3-backed NoSQL document store with SQL parsing, Redis pub/sub for real-time events, and a CLI.
|
|
3
|
+
S3-backed NoSQL document store with SQL parsing, Redis-backed write coordination and pub/sub for real-time events, and a CLI.
|
|
4
4
|
|
|
5
5
|
Documents are stored as **S3 key paths** — not file contents. Each document produces two keys per field: a **data key** (`{ttid}/{field}/{value}`) for full-doc retrieval and an **index key** (`{field}/{value}/{ttid}`) for query lookups. This enables fast reads and filtered queries without a traditional database engine.
|
|
6
6
|
|
|
7
7
|
Built for **serverless** runtimes (AWS Lambda, Cloudflare Workers) — no persistent in-memory state, lazy connections, minimal cold-start overhead.
|
|
8
8
|
|
|
9
|
+
Writes are coordinated through Redis before they are flushed to S3. By default the high-level CRUD methods wait for the queued write to be processed so existing code can continue to behave synchronously. If you want fire-and-forget semantics, pass `{ wait: false }` and process queued jobs with a worker or `processQueuedWrites()`.
|
|
10
|
+
|
|
9
11
|
## Install
|
|
10
12
|
|
|
11
13
|
```bash
|
|
@@ -21,7 +23,15 @@ bun add @delma/fylo
|
|
|
21
23
|
| `S3_SECRET_ACCESS_KEY` / `AWS_SECRET_ACCESS_KEY` | S3 credentials |
|
|
22
24
|
| `S3_REGION` / `AWS_REGION` | S3 region |
|
|
23
25
|
| `S3_ENDPOINT` / `AWS_ENDPOINT` | S3 endpoint (for LocalStack, MinIO, etc.) |
|
|
24
|
-
| `REDIS_URL` | Redis connection URL
|
|
26
|
+
| `REDIS_URL` | Redis connection URL used for pub/sub, document locks, and queued write coordination |
|
|
27
|
+
| `FYLO_WRITE_MAX_ATTEMPTS` | Maximum retry attempts before a queued job is dead-lettered |
|
|
28
|
+
| `FYLO_WRITE_RETRY_BASE_MS` | Base retry delay used for exponential backoff between recovery attempts |
|
|
29
|
+
| `FYLO_WORKER_ID` | Optional stable identifier for a write worker process |
|
|
30
|
+
| `FYLO_WORKER_BATCH_SIZE` | Number of queued jobs a worker pulls per read loop |
|
|
31
|
+
| `FYLO_WORKER_BLOCK_MS` | Redis stream block time for waiting on new jobs |
|
|
32
|
+
| `FYLO_WORKER_RECOVER_ON_START` | Whether the worker reclaims stale pending jobs on startup |
|
|
33
|
+
| `FYLO_WORKER_RECOVER_IDLE_MS` | Minimum idle time before a pending job is reclaimed |
|
|
34
|
+
| `FYLO_WORKER_STOP_WHEN_IDLE` | Exit the worker loop when no jobs are available |
|
|
25
35
|
| `LOGGING` | Enable debug logging |
|
|
26
36
|
| `STRICT` | Enable schema validation via CHEX |
|
|
27
37
|
|
|
@@ -69,6 +79,44 @@ const deleted = await fylo.delDocs<_user>("users", {
|
|
|
69
79
|
await Fylo.dropCollection("users")
|
|
70
80
|
```
|
|
71
81
|
|
|
82
|
+
### Queued Writes
|
|
83
|
+
|
|
84
|
+
```typescript
|
|
85
|
+
const fylo = new Fylo()
|
|
86
|
+
|
|
87
|
+
// Default behavior waits for the queued write to finish.
|
|
88
|
+
const _id = await fylo.putData("users", { name: "John Doe" })
|
|
89
|
+
|
|
90
|
+
// Async mode returns the queued job immediately.
|
|
91
|
+
const queued = await fylo.putData("users", { name: "Jane Doe" }, { wait: false })
|
|
92
|
+
|
|
93
|
+
// Poll status if you need to track progress.
|
|
94
|
+
const status = await fylo.getJobStatus(queued.jobId)
|
|
95
|
+
|
|
96
|
+
// Process pending writes in-process when you are not running a separate worker.
|
|
97
|
+
await fylo.processQueuedWrites()
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
When `wait: false` is used, the job is durable in Redis but the document is not visible in S3 until a worker commits it.
|
|
101
|
+
|
|
102
|
+
Queued jobs that fail are left pending for recovery. Recovered jobs retry up to `FYLO_WRITE_MAX_ATTEMPTS` times before being moved to a dead-letter stream. You can inspect dead letters with `getDeadLetters()` and reclaim stale pending jobs with `processQueuedWrites(count, true)`.
|
|
103
|
+
|
|
104
|
+
Operational helpers:
|
|
105
|
+
|
|
106
|
+
- `getQueueStats()` returns current queue, pending, and dead-letter counts
|
|
107
|
+
- `getDeadLetters()` lists exhausted jobs
|
|
108
|
+
- `replayDeadLetter(streamId)` moves a dead-lettered job back into the main queue
|
|
109
|
+
|
|
110
|
+
### Worker
|
|
111
|
+
|
|
112
|
+
Run a dedicated write worker when you want queued writes to be flushed outside the request path:
|
|
113
|
+
|
|
114
|
+
```bash
|
|
115
|
+
bun run worker
|
|
116
|
+
```
|
|
117
|
+
|
|
118
|
+
The worker entrypoint lives at [worker.ts](/Users/iyor/Library/CloudStorage/Dropbox/myProjects/FYLO/src/worker.ts) and continuously drains the Redis stream, recovers stale pending jobs on startup, and respects the retry/dead-letter settings above.
|
|
119
|
+
|
|
72
120
|
### CRUD — SQL API
|
|
73
121
|
|
|
74
122
|
```typescript
|
|
@@ -160,7 +208,9 @@ for await (const doc of Fylo.exportBulkData<_user>("users")) {
|
|
|
160
208
|
|
|
161
209
|
### Rollback
|
|
162
210
|
|
|
163
|
-
|
|
211
|
+
`rollback()` is now a legacy escape hatch.
|
|
212
|
+
|
|
213
|
+
Fylo still keeps best-effort rollback data for writes performed by the current instance. This is mainly useful for in-process failures and test workflows:
|
|
164
214
|
|
|
165
215
|
```typescript
|
|
166
216
|
const fylo = new Fylo()
|
|
@@ -168,6 +218,15 @@ await fylo.putData("users", { name: "test" })
|
|
|
168
218
|
await fylo.rollback() // undoes all writes in this instance
|
|
169
219
|
```
|
|
170
220
|
|
|
221
|
+
For queued writes, prefer:
|
|
222
|
+
|
|
223
|
+
- `getJobStatus()` to inspect an individual write
|
|
224
|
+
- `processQueuedWrites(count, true)` to recover stale pending jobs
|
|
225
|
+
- `getDeadLetters()` to inspect exhausted jobs
|
|
226
|
+
- compensating writes instead of `rollback()` after a commit
|
|
227
|
+
|
|
228
|
+
`rollback()` may be removed from the main queued-write path in a future major release.
|
|
229
|
+
|
|
171
230
|
### CLI
|
|
172
231
|
|
|
173
232
|
```bash
|
package/package.json
CHANGED
|
@@ -1,15 +1,17 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@delma/fylo",
|
|
3
|
-
"version": "1.0
|
|
3
|
+
"version": "1.1.0",
|
|
4
4
|
"main": "./dist/index.js",
|
|
5
5
|
"types": "./dist/types/index.d.ts",
|
|
6
6
|
"bin": {
|
|
7
|
-
"fylo.query": "./dist/cli/index.js"
|
|
7
|
+
"fylo.query": "./dist/cli/index.js",
|
|
8
|
+
"fylo.worker": "./dist/worker.js"
|
|
8
9
|
},
|
|
9
10
|
"scripts": {
|
|
10
11
|
"build": "tsc",
|
|
11
12
|
"test": "bun test",
|
|
12
13
|
"typecheck": "tsc --noEmit",
|
|
14
|
+
"worker": "bun run ./src/worker.ts",
|
|
13
15
|
"lint": "eslint src tests",
|
|
14
16
|
"format": "prettier --write src tests"
|
|
15
17
|
},
|
package/src/adapters/redis.ts
CHANGED
|
@@ -1,8 +1,15 @@
|
|
|
1
1
|
import { RedisClient } from "bun";
|
|
2
2
|
import { S3 } from "./s3";
|
|
3
|
+
import type { DeadLetterJob, QueueStats, StreamJobEntry, WriteJob, WriteJobStatus } from "../types/write-queue";
|
|
3
4
|
|
|
4
5
|
export class Redis {
|
|
5
6
|
|
|
7
|
+
static readonly WRITE_STREAM = 'fylo:writes'
|
|
8
|
+
|
|
9
|
+
static readonly WRITE_GROUP = 'fylo-workers'
|
|
10
|
+
|
|
11
|
+
static readonly DEAD_LETTER_STREAM = 'fylo:writes:dead'
|
|
12
|
+
|
|
6
13
|
private client: RedisClient
|
|
7
14
|
|
|
8
15
|
private static LOGGING = process.env.LOGGING
|
|
@@ -31,6 +38,41 @@ export class Redis {
|
|
|
31
38
|
this.client.connect()
|
|
32
39
|
}
|
|
33
40
|
|
|
41
|
+
private async ensureWriteGroup() {
|
|
42
|
+
|
|
43
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
44
|
+
|
|
45
|
+
try {
|
|
46
|
+
await this.client.send('XGROUP', ['CREATE', Redis.WRITE_STREAM, Redis.WRITE_GROUP, '$', 'MKSTREAM'])
|
|
47
|
+
} catch(err) {
|
|
48
|
+
if(!(err instanceof Error) || !err.message.includes('BUSYGROUP')) throw err
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
private static hashKey(jobId: string) {
|
|
53
|
+
return `fylo:job:${jobId}`
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
private static docKey(collection: string, docId: _ttid) {
|
|
57
|
+
return `fylo:doc:${collection}:${docId}`
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
private static lockKey(collection: string, docId: _ttid) {
|
|
61
|
+
return `fylo:lock:${collection}:${docId}`
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
private static parseHash(values: unknown): Record<string, string> {
|
|
65
|
+
if(!Array.isArray(values)) return {}
|
|
66
|
+
|
|
67
|
+
const parsed: Record<string, string> = {}
|
|
68
|
+
|
|
69
|
+
for(let i = 0; i < values.length; i += 2) {
|
|
70
|
+
parsed[String(values[i])] = String(values[i + 1] ?? '')
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return parsed
|
|
74
|
+
}
|
|
75
|
+
|
|
34
76
|
async publish(collection: string, action: 'insert' | 'delete', keyId: string | _ttid) {
|
|
35
77
|
|
|
36
78
|
if(this.client.connected) {
|
|
@@ -48,6 +90,310 @@ export class Redis {
|
|
|
48
90
|
return result === 'OK'
|
|
49
91
|
}
|
|
50
92
|
|
|
93
|
+
async enqueueWrite<T extends Record<string, any>>(job: WriteJob<T>) {
|
|
94
|
+
|
|
95
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
96
|
+
|
|
97
|
+
await this.ensureWriteGroup()
|
|
98
|
+
|
|
99
|
+
const now = Date.now()
|
|
100
|
+
const payload = JSON.stringify(job.payload)
|
|
101
|
+
|
|
102
|
+
await this.client.send('HSET', [
|
|
103
|
+
Redis.hashKey(job.jobId),
|
|
104
|
+
'jobId', job.jobId,
|
|
105
|
+
'collection', job.collection,
|
|
106
|
+
'docId', job.docId,
|
|
107
|
+
'operation', job.operation,
|
|
108
|
+
'payload', payload,
|
|
109
|
+
'status', job.status,
|
|
110
|
+
'attempts', String(job.attempts),
|
|
111
|
+
'createdAt', String(job.createdAt),
|
|
112
|
+
'updatedAt', String(now),
|
|
113
|
+
'nextAttemptAt', String(job.nextAttemptAt ?? now)
|
|
114
|
+
])
|
|
115
|
+
|
|
116
|
+
await this.client.send('HSET', [
|
|
117
|
+
Redis.docKey(job.collection, job.docId),
|
|
118
|
+
'status', 'queued',
|
|
119
|
+
'lastJobId', job.jobId,
|
|
120
|
+
'updatedAt', String(now)
|
|
121
|
+
])
|
|
122
|
+
|
|
123
|
+
return await this.client.send('XADD', [
|
|
124
|
+
Redis.WRITE_STREAM,
|
|
125
|
+
'*',
|
|
126
|
+
'jobId', job.jobId,
|
|
127
|
+
'collection', job.collection,
|
|
128
|
+
'docId', job.docId,
|
|
129
|
+
'operation', job.operation
|
|
130
|
+
])
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
async readWriteJobs(workerId: string, count: number = 1, blockMs: number = 1000): Promise<Array<StreamJobEntry>> {
|
|
134
|
+
|
|
135
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
136
|
+
|
|
137
|
+
await this.ensureWriteGroup()
|
|
138
|
+
|
|
139
|
+
const rows = await this.client.send('XREADGROUP', [
|
|
140
|
+
'GROUP', Redis.WRITE_GROUP, workerId,
|
|
141
|
+
'COUNT', String(count),
|
|
142
|
+
'BLOCK', String(blockMs),
|
|
143
|
+
'STREAMS', Redis.WRITE_STREAM, '>'
|
|
144
|
+
])
|
|
145
|
+
|
|
146
|
+
if(!Array.isArray(rows) || rows.length === 0) return []
|
|
147
|
+
|
|
148
|
+
const items: Array<StreamJobEntry> = []
|
|
149
|
+
|
|
150
|
+
for(const streamRow of rows as unknown[]) {
|
|
151
|
+
if(!Array.isArray(streamRow) || streamRow.length < 2) continue
|
|
152
|
+
const entries = streamRow[1]
|
|
153
|
+
if(!Array.isArray(entries)) continue
|
|
154
|
+
|
|
155
|
+
for(const entry of entries as unknown[]) {
|
|
156
|
+
if(!Array.isArray(entry) || entry.length < 2) continue
|
|
157
|
+
const streamId = String(entry[0])
|
|
158
|
+
const fields = Redis.parseHash(entry[1])
|
|
159
|
+
const job = await this.getJob(fields.jobId)
|
|
160
|
+
if(job) items.push({ streamId, job })
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
return items
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
async ackWriteJob(streamId: string) {
|
|
168
|
+
|
|
169
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
170
|
+
|
|
171
|
+
await this.client.send('XACK', [Redis.WRITE_STREAM, Redis.WRITE_GROUP, streamId])
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
async deadLetterWriteJob(streamId: string, job: WriteJob, reason?: string) {
|
|
175
|
+
|
|
176
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
177
|
+
|
|
178
|
+
const failedAt = Date.now()
|
|
179
|
+
|
|
180
|
+
await this.client.send('XADD', [
|
|
181
|
+
Redis.DEAD_LETTER_STREAM,
|
|
182
|
+
'*',
|
|
183
|
+
'jobId', job.jobId,
|
|
184
|
+
'collection', job.collection,
|
|
185
|
+
'docId', job.docId,
|
|
186
|
+
'operation', job.operation,
|
|
187
|
+
'reason', reason ?? '',
|
|
188
|
+
'failedAt', String(failedAt)
|
|
189
|
+
])
|
|
190
|
+
|
|
191
|
+
await this.ackWriteJob(streamId)
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
async claimPendingJobs(workerId: string, minIdleMs: number = 30_000, count: number = 10): Promise<Array<StreamJobEntry>> {
|
|
195
|
+
|
|
196
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
197
|
+
|
|
198
|
+
await this.ensureWriteGroup()
|
|
199
|
+
|
|
200
|
+
const result = await this.client.send('XAUTOCLAIM', [
|
|
201
|
+
Redis.WRITE_STREAM,
|
|
202
|
+
Redis.WRITE_GROUP,
|
|
203
|
+
workerId,
|
|
204
|
+
String(minIdleMs),
|
|
205
|
+
'0-0',
|
|
206
|
+
'COUNT',
|
|
207
|
+
String(count)
|
|
208
|
+
])
|
|
209
|
+
|
|
210
|
+
if(!Array.isArray(result) || result.length < 2 || !Array.isArray(result[1])) return []
|
|
211
|
+
|
|
212
|
+
const items: Array<StreamJobEntry> = []
|
|
213
|
+
|
|
214
|
+
for(const entry of result[1] as unknown[]) {
|
|
215
|
+
if(!Array.isArray(entry) || entry.length < 2) continue
|
|
216
|
+
const streamId = String(entry[0])
|
|
217
|
+
const fields = Redis.parseHash(entry[1])
|
|
218
|
+
const job = await this.getJob(fields.jobId)
|
|
219
|
+
if(job) items.push({ streamId, job })
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
return items
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
async setJobStatus(jobId: string, status: WriteJobStatus, extra: Partial<Pick<WriteJob, 'workerId' | 'error' | 'attempts' | 'nextAttemptAt'>> = {}) {
|
|
226
|
+
|
|
227
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
228
|
+
|
|
229
|
+
const args = [
|
|
230
|
+
Redis.hashKey(jobId),
|
|
231
|
+
'status', status,
|
|
232
|
+
'updatedAt', String(Date.now())
|
|
233
|
+
]
|
|
234
|
+
|
|
235
|
+
if(extra.workerId) args.push('workerId', extra.workerId)
|
|
236
|
+
if(extra.error) args.push('error', extra.error)
|
|
237
|
+
if(typeof extra.attempts === 'number') args.push('attempts', String(extra.attempts))
|
|
238
|
+
if(typeof extra.nextAttemptAt === 'number') args.push('nextAttemptAt', String(extra.nextAttemptAt))
|
|
239
|
+
|
|
240
|
+
await this.client.send('HSET', args)
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
async setDocStatus(collection: string, docId: _ttid, status: WriteJobStatus, jobId?: string) {
|
|
244
|
+
|
|
245
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
246
|
+
|
|
247
|
+
const args = [
|
|
248
|
+
Redis.docKey(collection, docId),
|
|
249
|
+
'status', status,
|
|
250
|
+
'updatedAt', String(Date.now())
|
|
251
|
+
]
|
|
252
|
+
|
|
253
|
+
if(jobId) args.push('lastJobId', jobId)
|
|
254
|
+
|
|
255
|
+
await this.client.send('HSET', args)
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
async getJob(jobId: string): Promise<WriteJob | null> {
|
|
259
|
+
|
|
260
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
261
|
+
|
|
262
|
+
const hash = Redis.parseHash(await this.client.send('HGETALL', [Redis.hashKey(jobId)]))
|
|
263
|
+
|
|
264
|
+
if(Object.keys(hash).length === 0) return null
|
|
265
|
+
|
|
266
|
+
return {
|
|
267
|
+
jobId: hash.jobId,
|
|
268
|
+
collection: hash.collection,
|
|
269
|
+
docId: hash.docId as _ttid,
|
|
270
|
+
operation: hash.operation as WriteJob['operation'],
|
|
271
|
+
payload: JSON.parse(hash.payload),
|
|
272
|
+
status: hash.status as WriteJobStatus,
|
|
273
|
+
attempts: Number(hash.attempts ?? 0),
|
|
274
|
+
createdAt: Number(hash.createdAt ?? 0),
|
|
275
|
+
updatedAt: Number(hash.updatedAt ?? 0),
|
|
276
|
+
nextAttemptAt: Number(hash.nextAttemptAt ?? 0) || undefined,
|
|
277
|
+
workerId: hash.workerId || undefined,
|
|
278
|
+
error: hash.error || undefined
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
async getDocStatus(collection: string, docId: _ttid) {
|
|
283
|
+
|
|
284
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
285
|
+
|
|
286
|
+
const hash = Redis.parseHash(await this.client.send('HGETALL', [Redis.docKey(collection, docId)]))
|
|
287
|
+
|
|
288
|
+
return Object.keys(hash).length > 0 ? hash : null
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
async readDeadLetters(count: number = 10): Promise<Array<DeadLetterJob>> {
|
|
292
|
+
|
|
293
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
294
|
+
|
|
295
|
+
const rows = await this.client.send('XRANGE', [Redis.DEAD_LETTER_STREAM, '-', '+', 'COUNT', String(count)])
|
|
296
|
+
|
|
297
|
+
if(!Array.isArray(rows)) return []
|
|
298
|
+
|
|
299
|
+
const items: Array<DeadLetterJob> = []
|
|
300
|
+
|
|
301
|
+
for(const row of rows as unknown[]) {
|
|
302
|
+
if(!Array.isArray(row) || row.length < 2) continue
|
|
303
|
+
const streamId = String(row[0])
|
|
304
|
+
const fields = Redis.parseHash(row[1])
|
|
305
|
+
const job = await this.getJob(fields.jobId)
|
|
306
|
+
|
|
307
|
+
if(job) {
|
|
308
|
+
items.push({
|
|
309
|
+
streamId,
|
|
310
|
+
job,
|
|
311
|
+
reason: fields.reason || undefined,
|
|
312
|
+
failedAt: Number(fields.failedAt ?? 0)
|
|
313
|
+
})
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
return items
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
async replayDeadLetter(streamId: string): Promise<WriteJob | null> {
|
|
321
|
+
|
|
322
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
323
|
+
|
|
324
|
+
const rows = await this.client.send('XRANGE', [Redis.DEAD_LETTER_STREAM, streamId, streamId, 'COUNT', '1'])
|
|
325
|
+
|
|
326
|
+
if(!Array.isArray(rows) || rows.length === 0) return null
|
|
327
|
+
|
|
328
|
+
const row = rows[0]
|
|
329
|
+
if(!Array.isArray(row) || row.length < 2) return null
|
|
330
|
+
|
|
331
|
+
const fields = Redis.parseHash(row[1])
|
|
332
|
+
const job = await this.getJob(fields.jobId)
|
|
333
|
+
|
|
334
|
+
if(!job) return null
|
|
335
|
+
|
|
336
|
+
const replayed: WriteJob = {
|
|
337
|
+
...job,
|
|
338
|
+
status: 'queued',
|
|
339
|
+
error: undefined,
|
|
340
|
+
workerId: undefined,
|
|
341
|
+
attempts: 0,
|
|
342
|
+
updatedAt: Date.now(),
|
|
343
|
+
nextAttemptAt: Date.now()
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
await this.enqueueWrite(replayed)
|
|
347
|
+
await this.client.send('XDEL', [Redis.DEAD_LETTER_STREAM, streamId])
|
|
348
|
+
|
|
349
|
+
return replayed
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
async getQueueStats(): Promise<QueueStats> {
|
|
353
|
+
|
|
354
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
355
|
+
|
|
356
|
+
await this.ensureWriteGroup()
|
|
357
|
+
|
|
358
|
+
const [queuedRaw, deadRaw, pendingRaw] = await Promise.all([
|
|
359
|
+
this.client.send('XLEN', [Redis.WRITE_STREAM]),
|
|
360
|
+
this.client.send('XLEN', [Redis.DEAD_LETTER_STREAM]),
|
|
361
|
+
this.client.send('XPENDING', [Redis.WRITE_STREAM, Redis.WRITE_GROUP])
|
|
362
|
+
])
|
|
363
|
+
|
|
364
|
+
const pending = Array.isArray(pendingRaw) ? Number(pendingRaw[0] ?? 0) : 0
|
|
365
|
+
|
|
366
|
+
return {
|
|
367
|
+
queued: Number(queuedRaw ?? 0),
|
|
368
|
+
pending,
|
|
369
|
+
deadLetters: Number(deadRaw ?? 0)
|
|
370
|
+
}
|
|
371
|
+
}
|
|
372
|
+
|
|
373
|
+
async acquireDocLock(collection: string, docId: _ttid, jobId: string, ttlSeconds: number = 60) {
|
|
374
|
+
|
|
375
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
376
|
+
|
|
377
|
+
const result = await this.client.send('SET', [
|
|
378
|
+
Redis.lockKey(collection, docId),
|
|
379
|
+
jobId,
|
|
380
|
+
'NX',
|
|
381
|
+
'EX',
|
|
382
|
+
String(ttlSeconds)
|
|
383
|
+
])
|
|
384
|
+
|
|
385
|
+
return result === 'OK'
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
async releaseDocLock(collection: string, docId: _ttid, jobId: string) {
|
|
389
|
+
|
|
390
|
+
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
391
|
+
|
|
392
|
+
const key = Redis.lockKey(collection, docId)
|
|
393
|
+
const current = await this.client.send('GET', [key])
|
|
394
|
+
if(current === jobId) await this.client.send('DEL', [key])
|
|
395
|
+
}
|
|
396
|
+
|
|
51
397
|
async *subscribe(collection: string) {
|
|
52
398
|
|
|
53
399
|
if(!this.client.connected) throw new Error('Redis not connected!')
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import type { WriteJob } from '../types/write-queue'
|
|
2
|
+
|
|
3
|
+
export class WriteQueue {
|
|
4
|
+
|
|
5
|
+
static createInsertJob<T extends Record<string, any>>(collection: string, docId: _ttid, payload: T): WriteJob<T> {
|
|
6
|
+
const now = Date.now()
|
|
7
|
+
|
|
8
|
+
return {
|
|
9
|
+
jobId: Bun.randomUUIDv7(),
|
|
10
|
+
collection,
|
|
11
|
+
docId,
|
|
12
|
+
operation: 'insert',
|
|
13
|
+
payload,
|
|
14
|
+
status: 'queued',
|
|
15
|
+
attempts: 0,
|
|
16
|
+
createdAt: now,
|
|
17
|
+
updatedAt: now
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
static createUpdateJob<T extends Record<string, any>>(
|
|
22
|
+
collection: string,
|
|
23
|
+
docId: _ttid,
|
|
24
|
+
payload: { newDoc: Record<_ttid, Partial<T>>, oldDoc?: Record<_ttid, T> }
|
|
25
|
+
): WriteJob<{ newDoc: Record<_ttid, Partial<T>>, oldDoc?: Record<_ttid, T> }> {
|
|
26
|
+
const now = Date.now()
|
|
27
|
+
|
|
28
|
+
return {
|
|
29
|
+
jobId: Bun.randomUUIDv7(),
|
|
30
|
+
collection,
|
|
31
|
+
docId,
|
|
32
|
+
operation: 'update',
|
|
33
|
+
payload,
|
|
34
|
+
status: 'queued',
|
|
35
|
+
attempts: 0,
|
|
36
|
+
createdAt: now,
|
|
37
|
+
updatedAt: now
|
|
38
|
+
}
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
static createDeleteJob(collection: string, docId: _ttid): WriteJob<{ _id: _ttid }> {
|
|
42
|
+
const now = Date.now()
|
|
43
|
+
|
|
44
|
+
return {
|
|
45
|
+
jobId: Bun.randomUUIDv7(),
|
|
46
|
+
collection,
|
|
47
|
+
docId,
|
|
48
|
+
operation: 'delete',
|
|
49
|
+
payload: { _id: docId },
|
|
50
|
+
status: 'queued',
|
|
51
|
+
attempts: 0,
|
|
52
|
+
createdAt: now,
|
|
53
|
+
updatedAt: now
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
}
|