@delma/fylo 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/publish.yml +4 -14
- package/README.md +66 -7
- package/package.json +4 -2
- package/src/adapters/redis.ts +346 -0
- package/src/core/write-queue.ts +56 -0
- package/src/index.ts +292 -29
- package/src/types/fylo.d.ts +37 -3
- package/src/types/write-queue.ts +42 -0
- package/src/worker.ts +12 -0
- package/src/workers/write-worker.ts +119 -0
- package/tests/integration/queue.test.ts +99 -0
- package/tests/mocks/redis.ts +156 -0
package/src/index.ts
CHANGED
|
@@ -7,8 +7,12 @@ import Gen from "@vyckr/chex"
|
|
|
7
7
|
import { Walker } from './core/walker';
|
|
8
8
|
import { S3 } from "./adapters/s3"
|
|
9
9
|
import { Cipher } from "./adapters/cipher"
|
|
10
|
+
import { Redis } from "./adapters/redis"
|
|
11
|
+
import { WriteQueue } from './core/write-queue'
|
|
10
12
|
import './core/format'
|
|
11
13
|
import './core/extensions'
|
|
14
|
+
import type { QueueStats, QueuedWriteResult, WriteJob } from './types/write-queue'
|
|
15
|
+
import type { StreamJobEntry } from './types/write-queue'
|
|
12
16
|
|
|
13
17
|
export default class Fylo {
|
|
14
18
|
|
|
@@ -25,12 +29,25 @@ export default class Fylo {
|
|
|
25
29
|
/** Collections whose schema `$encrypted` config has already been loaded. */
|
|
26
30
|
private static readonly loadedEncryption: Set<string> = new Set()
|
|
27
31
|
|
|
32
|
+
private static _queueRedis: Redis | null = null
|
|
33
|
+
|
|
34
|
+
private static rollbackWarningShown = false
|
|
35
|
+
|
|
36
|
+
private static readonly MAX_WRITE_ATTEMPTS = Number(process.env.FYLO_WRITE_MAX_ATTEMPTS ?? 3)
|
|
37
|
+
|
|
38
|
+
private static readonly WRITE_RETRY_BASE_MS = Number(process.env.FYLO_WRITE_RETRY_BASE_MS ?? 10)
|
|
39
|
+
|
|
28
40
|
private dir: Dir;
|
|
29
41
|
|
|
30
42
|
constructor() {
|
|
31
43
|
this.dir = new Dir()
|
|
32
44
|
}
|
|
33
45
|
|
|
46
|
+
private static get queueRedis(): Redis {
|
|
47
|
+
if(!Fylo._queueRedis) Fylo._queueRedis = new Redis()
|
|
48
|
+
return Fylo._queueRedis
|
|
49
|
+
}
|
|
50
|
+
|
|
34
51
|
/**
|
|
35
52
|
* Executes a SQL query and returns the results.
|
|
36
53
|
* @param SQL The SQL query to execute.
|
|
@@ -129,11 +146,142 @@ export default class Fylo {
|
|
|
129
146
|
|
|
130
147
|
/**
|
|
131
148
|
* Rolls back all transcations in current instance
|
|
149
|
+
* @deprecated Prefer queued write recovery, retries, dead letters, or compensating writes.
|
|
132
150
|
*/
|
|
133
151
|
async rollback() {
|
|
152
|
+
if(!Fylo.rollbackWarningShown) {
|
|
153
|
+
Fylo.rollbackWarningShown = true
|
|
154
|
+
console.warn('[FYLO] rollback() is deprecated for queued-write flows. Prefer job recovery, dead letters, or compensating writes.')
|
|
155
|
+
}
|
|
134
156
|
await this.dir.executeRollback()
|
|
135
157
|
}
|
|
136
158
|
|
|
159
|
+
async getJobStatus(jobId: string) {
|
|
160
|
+
return await Fylo.queueRedis.getJob(jobId)
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
async getDocStatus(collection: string, docId: _ttid) {
|
|
164
|
+
return await Fylo.queueRedis.getDocStatus(collection, docId)
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
async getDeadLetters(count: number = 10) {
|
|
168
|
+
return await Fylo.queueRedis.readDeadLetters(count)
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
async getQueueStats(): Promise<QueueStats> {
|
|
172
|
+
return await Fylo.queueRedis.getQueueStats()
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
async replayDeadLetter(streamId: string) {
|
|
176
|
+
return await Fylo.queueRedis.replayDeadLetter(streamId)
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
private async waitForJob(jobId: string, timeoutMs: number = 5_000, intervalMs: number = 50) {
|
|
180
|
+
|
|
181
|
+
const start = Date.now()
|
|
182
|
+
|
|
183
|
+
do {
|
|
184
|
+
const job = await this.getJobStatus(jobId)
|
|
185
|
+
if(job && (job.status === 'committed' || job.status === 'failed')) return job
|
|
186
|
+
await Bun.sleep(intervalMs)
|
|
187
|
+
} while(Date.now() - start < timeoutMs)
|
|
188
|
+
|
|
189
|
+
throw new Error(`Timed out waiting for job ${jobId}`)
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
private async runQueuedJob<T>(
|
|
193
|
+
queued: QueuedWriteResult,
|
|
194
|
+
{
|
|
195
|
+
wait = true
|
|
196
|
+
}: {
|
|
197
|
+
wait?: boolean
|
|
198
|
+
timeoutMs?: number
|
|
199
|
+
} = {},
|
|
200
|
+
resolveValue?: () => Promise<T> | T
|
|
201
|
+
): Promise<T | QueuedWriteResult> {
|
|
202
|
+
|
|
203
|
+
if(!wait) return queued
|
|
204
|
+
|
|
205
|
+
const processed = await this.processQueuedWrites(1)
|
|
206
|
+
|
|
207
|
+
if(processed === 0) throw new Error(`No worker available to process job ${queued.jobId}`)
|
|
208
|
+
|
|
209
|
+
const job = await this.getJobStatus(queued.jobId)
|
|
210
|
+
|
|
211
|
+
if(job?.status === 'failed') {
|
|
212
|
+
throw new Error(job.error ?? `Queued job ${queued.jobId} failed`)
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
return resolveValue ? await resolveValue() : queued
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
async processQueuedWrites(count: number = 1, recover: boolean = false) {
|
|
219
|
+
const jobs = recover
|
|
220
|
+
? await Fylo.queueRedis.claimPendingJobs(Bun.randomUUIDv7(), 30_000, count)
|
|
221
|
+
: await Fylo.queueRedis.readWriteJobs(Bun.randomUUIDv7(), count)
|
|
222
|
+
|
|
223
|
+
let processed = 0
|
|
224
|
+
|
|
225
|
+
for(const job of jobs) {
|
|
226
|
+
if(await this.processQueuedJob(job)) processed++
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
return processed
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
private async processQueuedJob({ streamId, job }: StreamJobEntry) {
|
|
233
|
+
if(job.nextAttemptAt && job.nextAttemptAt > Date.now()) return false
|
|
234
|
+
|
|
235
|
+
const locked = await Fylo.queueRedis.acquireDocLock(job.collection, job.docId, job.jobId)
|
|
236
|
+
if(!locked) return false
|
|
237
|
+
|
|
238
|
+
try {
|
|
239
|
+
await Fylo.queueRedis.setJobStatus(job.jobId, 'processing', {
|
|
240
|
+
attempts: job.attempts + 1
|
|
241
|
+
})
|
|
242
|
+
await Fylo.queueRedis.setDocStatus(job.collection, job.docId, 'processing', job.jobId)
|
|
243
|
+
|
|
244
|
+
await this.executeQueuedWrite(job)
|
|
245
|
+
|
|
246
|
+
await Fylo.queueRedis.setJobStatus(job.jobId, 'committed')
|
|
247
|
+
await Fylo.queueRedis.setDocStatus(job.collection, job.docId, 'committed', job.jobId)
|
|
248
|
+
await Fylo.queueRedis.ackWriteJob(streamId)
|
|
249
|
+
return true
|
|
250
|
+
|
|
251
|
+
} catch(err) {
|
|
252
|
+
const attempts = job.attempts + 1
|
|
253
|
+
const message = err instanceof Error ? err.message : String(err)
|
|
254
|
+
|
|
255
|
+
if(attempts >= Fylo.MAX_WRITE_ATTEMPTS) {
|
|
256
|
+
await Fylo.queueRedis.setJobStatus(job.jobId, 'dead-letter', {
|
|
257
|
+
error: message,
|
|
258
|
+
attempts
|
|
259
|
+
})
|
|
260
|
+
await Fylo.queueRedis.setDocStatus(job.collection, job.docId, 'dead-letter', job.jobId)
|
|
261
|
+
await Fylo.queueRedis.deadLetterWriteJob(streamId, {
|
|
262
|
+
...job,
|
|
263
|
+
attempts,
|
|
264
|
+
status: 'dead-letter',
|
|
265
|
+
error: message
|
|
266
|
+
}, message)
|
|
267
|
+
return false
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
const nextAttemptAt = Date.now() + (Fylo.WRITE_RETRY_BASE_MS * Math.max(1, 2 ** (attempts - 1)))
|
|
271
|
+
|
|
272
|
+
await Fylo.queueRedis.setJobStatus(job.jobId, 'failed', {
|
|
273
|
+
error: message,
|
|
274
|
+
attempts,
|
|
275
|
+
nextAttemptAt
|
|
276
|
+
})
|
|
277
|
+
await Fylo.queueRedis.setDocStatus(job.collection, job.docId, 'failed', job.jobId)
|
|
278
|
+
return false
|
|
279
|
+
|
|
280
|
+
} finally {
|
|
281
|
+
await Fylo.queueRedis.releaseDocLock(job.collection, job.docId, job.jobId)
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
|
|
137
285
|
/**
|
|
138
286
|
* Imports data from a URL into a collection.
|
|
139
287
|
* @param collection The name of the collection.
|
|
@@ -401,6 +549,54 @@ export default class Fylo {
|
|
|
401
549
|
return ids
|
|
402
550
|
}
|
|
403
551
|
|
|
552
|
+
async queuePutData<T extends Record<string, any>>(collection: string, data: Record<_ttid, T> | T): Promise<QueuedWriteResult> {
|
|
553
|
+
|
|
554
|
+
const { _id, doc } = await this.prepareInsert(collection, data)
|
|
555
|
+
const job = WriteQueue.createInsertJob(collection, _id, doc)
|
|
556
|
+
|
|
557
|
+
await Fylo.queueRedis.enqueueWrite(job)
|
|
558
|
+
|
|
559
|
+
return {
|
|
560
|
+
jobId: job.jobId,
|
|
561
|
+
docId: _id,
|
|
562
|
+
status: 'queued'
|
|
563
|
+
}
|
|
564
|
+
}
|
|
565
|
+
|
|
566
|
+
async queuePatchDoc<T extends Record<string, any>>(
|
|
567
|
+
collection: string,
|
|
568
|
+
newDoc: Record<_ttid, Partial<T>>,
|
|
569
|
+
oldDoc: Record<_ttid, T> = {}
|
|
570
|
+
): Promise<QueuedWriteResult> {
|
|
571
|
+
|
|
572
|
+
const docId = Object.keys(newDoc).shift() as _ttid
|
|
573
|
+
|
|
574
|
+
if(!docId) throw new Error("this document does not contain an TTID")
|
|
575
|
+
|
|
576
|
+
const job = WriteQueue.createUpdateJob(collection, docId, { newDoc, oldDoc })
|
|
577
|
+
|
|
578
|
+
await Fylo.queueRedis.enqueueWrite(job)
|
|
579
|
+
|
|
580
|
+
return {
|
|
581
|
+
jobId: job.jobId,
|
|
582
|
+
docId,
|
|
583
|
+
status: 'queued'
|
|
584
|
+
}
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
async queueDelDoc(collection: string, _id: _ttid): Promise<QueuedWriteResult> {
|
|
588
|
+
|
|
589
|
+
const job = WriteQueue.createDeleteJob(collection, _id)
|
|
590
|
+
|
|
591
|
+
await Fylo.queueRedis.enqueueWrite(job)
|
|
592
|
+
|
|
593
|
+
return {
|
|
594
|
+
jobId: job.jobId,
|
|
595
|
+
docId: _id,
|
|
596
|
+
status: 'queued'
|
|
597
|
+
}
|
|
598
|
+
}
|
|
599
|
+
|
|
404
600
|
/**
|
|
405
601
|
* Puts a document into a collection.
|
|
406
602
|
* @param collection The name of the collection.
|
|
@@ -423,18 +619,22 @@ export default class Fylo {
|
|
|
423
619
|
return _id
|
|
424
620
|
}
|
|
425
621
|
|
|
426
|
-
async
|
|
622
|
+
private async prepareInsert<T extends Record<string, any>>(collection: string, data: Record<_ttid, T> | T) {
|
|
427
623
|
|
|
428
624
|
await Fylo.loadEncryption(collection)
|
|
429
625
|
|
|
430
626
|
const currId = Object.keys(data).shift()!
|
|
431
|
-
|
|
432
627
|
const _id = TTID.isTTID(currId) ? await Fylo.uniqueTTID(currId) : await Fylo.uniqueTTID()
|
|
433
|
-
|
|
628
|
+
|
|
434
629
|
let doc = TTID.isTTID(currId) ? Object.values(data).shift() as T : data as T
|
|
435
630
|
|
|
436
631
|
if(Fylo.STRICT) doc = await Gen.validateData(collection, doc) as T
|
|
437
|
-
|
|
632
|
+
|
|
633
|
+
return { _id, doc }
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
private async executePutDataDirect<T extends Record<string, any>>(collection: string, _id: _ttid, doc: T) {
|
|
637
|
+
|
|
438
638
|
const keys = await Dir.extractKeys(collection, _id, doc)
|
|
439
639
|
|
|
440
640
|
const results = await Promise.allSettled(keys.data.map((item, i) => this.dir.putKeys(collection, { dataKey: item, indexKey: keys.indexes[i] })))
|
|
@@ -443,20 +643,13 @@ export default class Fylo {
|
|
|
443
643
|
await this.dir.executeRollback()
|
|
444
644
|
throw new Error(`Unable to write to ${collection} collection`)
|
|
445
645
|
}
|
|
446
|
-
|
|
646
|
+
|
|
447
647
|
if(Fylo.LOGGING) console.log(`Finished Writing ${_id}`)
|
|
448
648
|
|
|
449
649
|
return _id
|
|
450
650
|
}
|
|
451
651
|
|
|
452
|
-
|
|
453
|
-
* Patches a document in a collection.
|
|
454
|
-
* @param collection The name of the collection.
|
|
455
|
-
* @param newDoc The new document data.
|
|
456
|
-
* @param oldDoc The old document data.
|
|
457
|
-
* @returns The number of documents patched.
|
|
458
|
-
*/
|
|
459
|
-
async patchDoc<T extends Record<string, any>>(collection: string, newDoc: Record<_ttid, Partial<T>>, oldDoc: Record<_ttid, T> = {}) {
|
|
652
|
+
private async executePatchDocDirect<T extends Record<string, any>>(collection: string, newDoc: Record<_ttid, Partial<T>>, oldDoc: Record<_ttid, T> = {}) {
|
|
460
653
|
|
|
461
654
|
await Fylo.loadEncryption(collection)
|
|
462
655
|
|
|
@@ -466,8 +659,6 @@ export default class Fylo {
|
|
|
466
659
|
|
|
467
660
|
if(!_id) throw new Error("this document does not contain an TTID")
|
|
468
661
|
|
|
469
|
-
// Fetch data keys once — needed for deletion and, when oldDoc is absent, reconstruction.
|
|
470
|
-
// Previously, delDoc re-fetched these internally, causing a redundant S3 list call per document.
|
|
471
662
|
const dataKeys = await Walker.getDocData(collection, _id)
|
|
472
663
|
|
|
473
664
|
if(dataKeys.length === 0) return _newId
|
|
@@ -485,7 +676,6 @@ export default class Fylo {
|
|
|
485
676
|
|
|
486
677
|
for(const field in newDoc[_id]) currData[field] = newDoc[_id][field]!
|
|
487
678
|
|
|
488
|
-
// Generate new TTID upfront so that delete and write can proceed in parallel.
|
|
489
679
|
_newId = await Fylo.uniqueTTID(_id)
|
|
490
680
|
|
|
491
681
|
let docToWrite: T = currData as T
|
|
@@ -509,6 +699,80 @@ export default class Fylo {
|
|
|
509
699
|
return _newId
|
|
510
700
|
}
|
|
511
701
|
|
|
702
|
+
private async executeDelDocDirect(collection: string, _id: _ttid) {
|
|
703
|
+
|
|
704
|
+
const keys = await Walker.getDocData(collection, _id)
|
|
705
|
+
|
|
706
|
+
const results = await Promise.allSettled(keys.map(key => this.dir.deleteKeys(collection, key)))
|
|
707
|
+
|
|
708
|
+
if(results.some(res => res.status === "rejected")) {
|
|
709
|
+
await this.dir.executeRollback()
|
|
710
|
+
throw new Error(`Unable to delete from ${collection} collection`)
|
|
711
|
+
}
|
|
712
|
+
|
|
713
|
+
if(Fylo.LOGGING) console.log(`Finished Deleting ${_id}`)
|
|
714
|
+
}
|
|
715
|
+
|
|
716
|
+
async putData<T extends Record<string, any>>(collection: string, data: T): Promise<_ttid>
|
|
717
|
+
async putData<T extends Record<string, any>>(collection: string, data: Record<_ttid, T>): Promise<_ttid>
|
|
718
|
+
async putData<T extends Record<string, any>>(collection: string, data: T, options: { wait?: true, timeoutMs?: number }): Promise<_ttid>
|
|
719
|
+
async putData<T extends Record<string, any>>(collection: string, data: Record<_ttid, T>, options: { wait?: true, timeoutMs?: number }): Promise<_ttid>
|
|
720
|
+
async putData<T extends Record<string, any>>(collection: string, data: T, options: { wait: false, timeoutMs?: number }): Promise<QueuedWriteResult>
|
|
721
|
+
async putData<T extends Record<string, any>>(collection: string, data: Record<_ttid, T>, options: { wait: false, timeoutMs?: number }): Promise<QueuedWriteResult>
|
|
722
|
+
async putData<T extends Record<string, any>>(
|
|
723
|
+
collection: string,
|
|
724
|
+
data: Record<_ttid, T> | T,
|
|
725
|
+
options: { wait?: boolean, timeoutMs?: number } = {}
|
|
726
|
+
): Promise<_ttid | QueuedWriteResult> {
|
|
727
|
+
|
|
728
|
+
const queued = await this.queuePutData(collection, data)
|
|
729
|
+
|
|
730
|
+
return await this.runQueuedJob(queued, options, async () => queued.docId)
|
|
731
|
+
}
|
|
732
|
+
|
|
733
|
+
async executeQueuedWrite(job: WriteJob) {
|
|
734
|
+
|
|
735
|
+
switch(job.operation) {
|
|
736
|
+
case 'insert':
|
|
737
|
+
await Fylo.loadEncryption(job.collection)
|
|
738
|
+
return await this.executePutDataDirect(job.collection, job.docId, job.payload)
|
|
739
|
+
case 'update':
|
|
740
|
+
return await this.executePatchDocDirect(
|
|
741
|
+
job.collection,
|
|
742
|
+
job.payload.newDoc as Record<_ttid, Partial<Record<string, any>>>,
|
|
743
|
+
job.payload.oldDoc as Record<_ttid, Record<string, any>> | undefined
|
|
744
|
+
)
|
|
745
|
+
case 'delete':
|
|
746
|
+
return await this.executeDelDocDirect(job.collection, job.payload._id as _ttid)
|
|
747
|
+
default:
|
|
748
|
+
throw new Error(`Unsupported queued write operation: ${job.operation}`)
|
|
749
|
+
}
|
|
750
|
+
}
|
|
751
|
+
|
|
752
|
+
/**
|
|
753
|
+
* Patches a document in a collection.
|
|
754
|
+
* @param collection The name of the collection.
|
|
755
|
+
* @param newDoc The new document data.
|
|
756
|
+
* @param oldDoc The old document data.
|
|
757
|
+
* @returns The number of documents patched.
|
|
758
|
+
*/
|
|
759
|
+
async patchDoc<T extends Record<string, any>>(collection: string, newDoc: Record<_ttid, Partial<T>>, oldDoc?: Record<_ttid, T>): Promise<_ttid>
|
|
760
|
+
async patchDoc<T extends Record<string, any>>(collection: string, newDoc: Record<_ttid, Partial<T>>, oldDoc: Record<_ttid, T> | undefined, options: { wait?: true, timeoutMs?: number }): Promise<_ttid>
|
|
761
|
+
async patchDoc<T extends Record<string, any>>(collection: string, newDoc: Record<_ttid, Partial<T>>, oldDoc: Record<_ttid, T> | undefined, options: { wait: false, timeoutMs?: number }): Promise<QueuedWriteResult>
|
|
762
|
+
async patchDoc<T extends Record<string, any>>(
|
|
763
|
+
collection: string,
|
|
764
|
+
newDoc: Record<_ttid, Partial<T>>,
|
|
765
|
+
oldDoc: Record<_ttid, T> = {},
|
|
766
|
+
options: { wait?: boolean, timeoutMs?: number } = {}
|
|
767
|
+
): Promise<_ttid | QueuedWriteResult> {
|
|
768
|
+
const queued = await this.queuePatchDoc(collection, newDoc, oldDoc)
|
|
769
|
+
|
|
770
|
+
return await this.runQueuedJob(queued, options, async () => {
|
|
771
|
+
const job = await this.getJobStatus(queued.jobId)
|
|
772
|
+
return (job?.docId ?? queued.docId) as _ttid
|
|
773
|
+
})
|
|
774
|
+
}
|
|
775
|
+
|
|
512
776
|
/**
|
|
513
777
|
* Patches documents in a collection.
|
|
514
778
|
* @param collection The name of the collection.
|
|
@@ -583,18 +847,17 @@ export default class Fylo {
|
|
|
583
847
|
* @param _id The ID of the document.
|
|
584
848
|
* @returns The number of documents deleted.
|
|
585
849
|
*/
|
|
586
|
-
async delDoc(collection: string, _id: _ttid)
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
if(Fylo.LOGGING) console.log(`Finished Deleting ${_id}`)
|
|
850
|
+
async delDoc(collection: string, _id: _ttid): Promise<void>
|
|
851
|
+
async delDoc(collection: string, _id: _ttid, options: { wait?: true, timeoutMs?: number }): Promise<void>
|
|
852
|
+
async delDoc(collection: string, _id: _ttid, options: { wait: false, timeoutMs?: number }): Promise<QueuedWriteResult>
|
|
853
|
+
async delDoc(
|
|
854
|
+
collection: string,
|
|
855
|
+
_id: _ttid,
|
|
856
|
+
options: { wait?: boolean, timeoutMs?: number } = {}
|
|
857
|
+
): Promise<void | QueuedWriteResult> {
|
|
858
|
+
const queued = await this.queueDelDoc(collection, _id)
|
|
859
|
+
|
|
860
|
+
await this.runQueuedJob(queued, options, async () => undefined)
|
|
598
861
|
}
|
|
599
862
|
|
|
600
863
|
/**
|
|
@@ -1085,4 +1348,4 @@ export default class Fylo {
|
|
|
1085
1348
|
}
|
|
1086
1349
|
}
|
|
1087
1350
|
}
|
|
1088
|
-
}
|
|
1351
|
+
}
|
package/src/types/fylo.d.ts
CHANGED
|
@@ -10,6 +10,12 @@ interface _findDocs {
|
|
|
10
10
|
onDelete(): AsyncGenerator<_ttid, void, unknown>
|
|
11
11
|
}
|
|
12
12
|
|
|
13
|
+
interface _queuedWriteResult {
|
|
14
|
+
jobId: string
|
|
15
|
+
docId: _ttid
|
|
16
|
+
status: 'queued' | 'processing' | 'committed' | 'failed' | 'dead-letter'
|
|
17
|
+
}
|
|
18
|
+
|
|
13
19
|
interface ObjectConstructor {
|
|
14
20
|
appendGroup: (target: Record<string, any>, source: Record<string, any>) => Record<string, any>;
|
|
15
21
|
}
|
|
@@ -26,6 +32,7 @@ declare module "@vyckr/fylo" {
|
|
|
26
32
|
|
|
27
33
|
/**
|
|
28
34
|
* Rolls back all transcations in current instance
|
|
35
|
+
* @deprecated Prefer queued write recovery, dead letters, or compensating writes.
|
|
29
36
|
*/
|
|
30
37
|
rollback(): Promise<void>
|
|
31
38
|
|
|
@@ -80,13 +87,36 @@ declare module "@vyckr/fylo" {
|
|
|
80
87
|
*/
|
|
81
88
|
batchPutData<T extends Record<string, any>>(collection: string, batch: Array<T>): Promise<_ttid[]>
|
|
82
89
|
|
|
90
|
+
queuePutData<T extends Record<string, any>>(collection: string, data: Record<_ttid, T> | T): Promise<_queuedWriteResult>
|
|
91
|
+
|
|
92
|
+
queuePatchDoc<T extends Record<string, any>>(collection: string, newDoc: Record<_ttid, Partial<T>>, oldDoc?: Record<_ttid, T>): Promise<_queuedWriteResult>
|
|
93
|
+
|
|
94
|
+
queueDelDoc(collection: string, _id: _ttid): Promise<_queuedWriteResult>
|
|
95
|
+
|
|
96
|
+
getJobStatus(jobId: string): Promise<Record<string, any> | null>
|
|
97
|
+
|
|
98
|
+
getDocStatus(collection: string, docId: _ttid): Promise<Record<string, any> | null>
|
|
99
|
+
|
|
100
|
+
getDeadLetters(count?: number): Promise<Array<Record<string, any>>>
|
|
101
|
+
|
|
102
|
+
getQueueStats(): Promise<{ queued: number, pending: number, deadLetters: number }>
|
|
103
|
+
|
|
104
|
+
replayDeadLetter(streamId: string): Promise<Record<string, any> | null>
|
|
105
|
+
|
|
106
|
+
processQueuedWrites(count?: number, recover?: boolean): Promise<number>
|
|
107
|
+
|
|
83
108
|
/**
|
|
84
109
|
* Puts a document into a collection.
|
|
85
110
|
* @param collection The name of the collection.
|
|
86
111
|
* @param data The document to put.
|
|
87
112
|
* @returns The ID of the document.
|
|
88
113
|
*/
|
|
89
|
-
putData<T extends Record<string, any>>(collection: string, data:
|
|
114
|
+
putData<T extends Record<string, any>>(collection: string, data: T): Promise<_ttid>
|
|
115
|
+
putData<T extends Record<string, any>>(collection: string, data: Record<_ttid, T>): Promise<_ttid>
|
|
116
|
+
putData<T extends Record<string, any>>(collection: string, data: T, options: { wait?: true, timeoutMs?: number }): Promise<_ttid>
|
|
117
|
+
putData<T extends Record<string, any>>(collection: string, data: Record<_ttid, T>, options: { wait?: true, timeoutMs?: number }): Promise<_ttid>
|
|
118
|
+
putData<T extends Record<string, any>>(collection: string, data: T, options: { wait: false, timeoutMs?: number }): Promise<_queuedWriteResult>
|
|
119
|
+
putData<T extends Record<string, any>>(collection: string, data: Record<_ttid, T>, options: { wait: false, timeoutMs?: number }): Promise<_queuedWriteResult>
|
|
90
120
|
|
|
91
121
|
/**
|
|
92
122
|
* Patches a document in a collection.
|
|
@@ -95,7 +125,9 @@ declare module "@vyckr/fylo" {
|
|
|
95
125
|
* @param oldDoc The old document data.
|
|
96
126
|
* @returns The number of documents patched.
|
|
97
127
|
*/
|
|
98
|
-
patchDoc<T extends Record<string, any>>(collection: string, newDoc: Record<_ttid, Partial<T>>, oldDoc
|
|
128
|
+
patchDoc<T extends Record<string, any>>(collection: string, newDoc: Record<_ttid, Partial<T>>, oldDoc?: Record<_ttid, T>): Promise<_ttid>
|
|
129
|
+
patchDoc<T extends Record<string, any>>(collection: string, newDoc: Record<_ttid, Partial<T>>, oldDoc: Record<_ttid, T> | undefined, options: { wait?: true, timeoutMs?: number }): Promise<_ttid>
|
|
130
|
+
patchDoc<T extends Record<string, any>>(collection: string, newDoc: Record<_ttid, Partial<T>>, oldDoc: Record<_ttid, T> | undefined, options: { wait: false, timeoutMs?: number }): Promise<_queuedWriteResult>
|
|
99
131
|
|
|
100
132
|
/**
|
|
101
133
|
* Patches documents in a collection.
|
|
@@ -112,6 +144,8 @@ declare module "@vyckr/fylo" {
|
|
|
112
144
|
* @returns The number of documents deleted.
|
|
113
145
|
*/
|
|
114
146
|
delDoc(collection: string, _id: _ttid): Promise<void>
|
|
147
|
+
delDoc(collection: string, _id: _ttid, options: { wait?: true, timeoutMs?: number }): Promise<void>
|
|
148
|
+
delDoc(collection: string, _id: _ttid, options: { wait: false, timeoutMs?: number }): Promise<_queuedWriteResult>
|
|
115
149
|
|
|
116
150
|
/**
|
|
117
151
|
* Deletes documents from a collection.
|
|
@@ -136,4 +170,4 @@ declare module "@vyckr/fylo" {
|
|
|
136
170
|
*/
|
|
137
171
|
static findDocs<T extends Record<string, any>>(collection: string, query?: _storeQuery<T>): _findDocs
|
|
138
172
|
}
|
|
139
|
-
}
|
|
173
|
+
}
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
export type WriteJobOperation = 'insert' | 'update' | 'delete'
|
|
2
|
+
|
|
3
|
+
export type WriteJobStatus = 'queued' | 'processing' | 'committed' | 'failed' | 'dead-letter'
|
|
4
|
+
|
|
5
|
+
export interface WriteJob<T extends Record<string, any> = Record<string, any>> {
|
|
6
|
+
jobId: string
|
|
7
|
+
collection: string
|
|
8
|
+
docId: _ttid
|
|
9
|
+
operation: WriteJobOperation
|
|
10
|
+
payload: T
|
|
11
|
+
status: WriteJobStatus
|
|
12
|
+
attempts: number
|
|
13
|
+
createdAt: number
|
|
14
|
+
updatedAt: number
|
|
15
|
+
nextAttemptAt?: number
|
|
16
|
+
workerId?: string
|
|
17
|
+
error?: string
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export interface QueuedWriteResult {
|
|
21
|
+
jobId: string
|
|
22
|
+
docId: _ttid
|
|
23
|
+
status: WriteJobStatus
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export interface StreamJobEntry<T extends Record<string, any> = Record<string, any>> {
|
|
27
|
+
streamId: string
|
|
28
|
+
job: WriteJob<T>
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
export interface DeadLetterJob<T extends Record<string, any> = Record<string, any>> {
|
|
32
|
+
streamId: string
|
|
33
|
+
job: WriteJob<T>
|
|
34
|
+
reason?: string
|
|
35
|
+
failedAt: number
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
export interface QueueStats {
|
|
39
|
+
queued: number
|
|
40
|
+
pending: number
|
|
41
|
+
deadLetters: number
|
|
42
|
+
}
|
package/src/worker.ts
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
#!/usr/bin/env bun
|
|
2
|
+
import { WriteWorker } from './workers/write-worker'
|
|
3
|
+
|
|
4
|
+
const worker = new WriteWorker(process.env.FYLO_WORKER_ID)
|
|
5
|
+
|
|
6
|
+
await worker.run({
|
|
7
|
+
batchSize: process.env.FYLO_WORKER_BATCH_SIZE ? Number(process.env.FYLO_WORKER_BATCH_SIZE) : 1,
|
|
8
|
+
blockMs: process.env.FYLO_WORKER_BLOCK_MS ? Number(process.env.FYLO_WORKER_BLOCK_MS) : 1000,
|
|
9
|
+
recoverOnStart: process.env.FYLO_WORKER_RECOVER_ON_START !== 'false',
|
|
10
|
+
recoverIdleMs: process.env.FYLO_WORKER_RECOVER_IDLE_MS ? Number(process.env.FYLO_WORKER_RECOVER_IDLE_MS) : 30_000,
|
|
11
|
+
stopWhenIdle: process.env.FYLO_WORKER_STOP_WHEN_IDLE === 'true'
|
|
12
|
+
})
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
import Fylo from "../index"
|
|
2
|
+
import { Redis } from "../adapters/redis"
|
|
3
|
+
import type { StreamJobEntry, WriteJob } from "../types/write-queue"
|
|
4
|
+
|
|
5
|
+
export class WriteWorker {
|
|
6
|
+
|
|
7
|
+
private static readonly MAX_WRITE_ATTEMPTS = Number(process.env.FYLO_WRITE_MAX_ATTEMPTS ?? 3)
|
|
8
|
+
|
|
9
|
+
private static readonly WRITE_RETRY_BASE_MS = Number(process.env.FYLO_WRITE_RETRY_BASE_MS ?? 10)
|
|
10
|
+
|
|
11
|
+
private readonly fylo: Fylo
|
|
12
|
+
|
|
13
|
+
private readonly redis: Redis
|
|
14
|
+
|
|
15
|
+
readonly workerId: string
|
|
16
|
+
|
|
17
|
+
constructor(workerId: string = Bun.randomUUIDv7()) {
|
|
18
|
+
this.workerId = workerId
|
|
19
|
+
this.fylo = new Fylo()
|
|
20
|
+
this.redis = new Redis()
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
async recoverPending(minIdleMs: number = 30_000, count: number = 10) {
|
|
24
|
+
const jobs = await this.redis.claimPendingJobs(this.workerId, minIdleMs, count)
|
|
25
|
+
for(const job of jobs) await this.processJob(job)
|
|
26
|
+
return jobs.length
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
async processNext(count: number = 1, blockMs: number = 1000) {
|
|
30
|
+
const jobs = await this.redis.readWriteJobs(this.workerId, count, blockMs)
|
|
31
|
+
for(const job of jobs) await this.processJob(job)
|
|
32
|
+
return jobs.length
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
async processJob({ streamId, job }: StreamJobEntry) {
|
|
36
|
+
if(job.nextAttemptAt && job.nextAttemptAt > Date.now()) return false
|
|
37
|
+
|
|
38
|
+
const locked = await this.redis.acquireDocLock(job.collection, job.docId, job.jobId)
|
|
39
|
+
if(!locked) return false
|
|
40
|
+
|
|
41
|
+
try {
|
|
42
|
+
await this.redis.setJobStatus(job.jobId, 'processing', {
|
|
43
|
+
workerId: this.workerId,
|
|
44
|
+
attempts: job.attempts + 1
|
|
45
|
+
})
|
|
46
|
+
await this.redis.setDocStatus(job.collection, job.docId, 'processing', job.jobId)
|
|
47
|
+
|
|
48
|
+
await this.fylo.executeQueuedWrite(job)
|
|
49
|
+
|
|
50
|
+
await this.redis.setJobStatus(job.jobId, 'committed', { workerId: this.workerId })
|
|
51
|
+
await this.redis.setDocStatus(job.collection, job.docId, 'committed', job.jobId)
|
|
52
|
+
await this.redis.ackWriteJob(streamId)
|
|
53
|
+
|
|
54
|
+
return true
|
|
55
|
+
|
|
56
|
+
} catch(err) {
|
|
57
|
+
const attempts = job.attempts + 1
|
|
58
|
+
const message = err instanceof Error ? err.message : String(err)
|
|
59
|
+
|
|
60
|
+
if(attempts >= WriteWorker.MAX_WRITE_ATTEMPTS) {
|
|
61
|
+
await this.redis.setJobStatus(job.jobId, 'dead-letter', {
|
|
62
|
+
workerId: this.workerId,
|
|
63
|
+
attempts,
|
|
64
|
+
error: message
|
|
65
|
+
})
|
|
66
|
+
await this.redis.setDocStatus(job.collection, job.docId, 'dead-letter', job.jobId)
|
|
67
|
+
await this.redis.deadLetterWriteJob(streamId, {
|
|
68
|
+
...job,
|
|
69
|
+
attempts,
|
|
70
|
+
status: 'dead-letter',
|
|
71
|
+
workerId: this.workerId,
|
|
72
|
+
error: message
|
|
73
|
+
}, message)
|
|
74
|
+
return false
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
const nextAttemptAt = Date.now() + (WriteWorker.WRITE_RETRY_BASE_MS * Math.max(1, 2 ** (attempts - 1)))
|
|
78
|
+
|
|
79
|
+
await this.redis.setJobStatus(job.jobId, 'failed', {
|
|
80
|
+
workerId: this.workerId,
|
|
81
|
+
attempts,
|
|
82
|
+
error: message,
|
|
83
|
+
nextAttemptAt
|
|
84
|
+
})
|
|
85
|
+
await this.redis.setDocStatus(job.collection, job.docId, 'failed', job.jobId)
|
|
86
|
+
|
|
87
|
+
return false
|
|
88
|
+
|
|
89
|
+
} finally {
|
|
90
|
+
await this.redis.releaseDocLock(job.collection, job.docId, job.jobId)
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
async processQueuedInsert(job: WriteJob) {
|
|
95
|
+
return await this.fylo.executeQueuedWrite(job)
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
async run({
|
|
99
|
+
batchSize = 1,
|
|
100
|
+
blockMs = 1000,
|
|
101
|
+
recoverOnStart = true,
|
|
102
|
+
recoverIdleMs = 30_000,
|
|
103
|
+
stopWhenIdle = false
|
|
104
|
+
}: {
|
|
105
|
+
batchSize?: number
|
|
106
|
+
blockMs?: number
|
|
107
|
+
recoverOnStart?: boolean
|
|
108
|
+
recoverIdleMs?: number
|
|
109
|
+
stopWhenIdle?: boolean
|
|
110
|
+
} = {}) {
|
|
111
|
+
|
|
112
|
+
if(recoverOnStart) await this.recoverPending(recoverIdleMs, batchSize)
|
|
113
|
+
|
|
114
|
+
while(true) {
|
|
115
|
+
const processed = await this.processNext(batchSize, blockMs)
|
|
116
|
+
if(stopWhenIdle && processed === 0) break
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|