@delma/fylo 2.0.0 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/README.md +185 -267
  2. package/package.json +2 -5
  3. package/src/core/directory.ts +22 -354
  4. package/src/engines/s3-files/documents.ts +65 -0
  5. package/src/engines/s3-files/filesystem.ts +172 -0
  6. package/src/engines/s3-files/query.ts +291 -0
  7. package/src/engines/s3-files/types.ts +42 -0
  8. package/src/engines/s3-files.ts +391 -510
  9. package/src/engines/types.ts +1 -1
  10. package/src/index.ts +142 -1237
  11. package/src/sync.ts +58 -0
  12. package/src/types/fylo.d.ts +66 -161
  13. package/src/types/node-runtime.d.ts +1 -0
  14. package/tests/collection/truncate.test.js +11 -10
  15. package/tests/helpers/root.js +7 -0
  16. package/tests/integration/create.test.js +9 -9
  17. package/tests/integration/delete.test.js +16 -14
  18. package/tests/integration/edge-cases.test.js +29 -25
  19. package/tests/integration/encryption.test.js +47 -30
  20. package/tests/integration/export.test.js +11 -11
  21. package/tests/integration/join-modes.test.js +16 -16
  22. package/tests/integration/nested.test.js +26 -24
  23. package/tests/integration/operators.test.js +43 -29
  24. package/tests/integration/read.test.js +25 -21
  25. package/tests/integration/rollback.test.js +21 -51
  26. package/tests/integration/s3-files.performance.test.js +75 -0
  27. package/tests/integration/s3-files.test.js +115 -18
  28. package/tests/integration/sync.test.js +154 -0
  29. package/tests/integration/update.test.js +24 -18
  30. package/src/adapters/redis.ts +0 -487
  31. package/src/adapters/s3.ts +0 -61
  32. package/src/core/walker.ts +0 -174
  33. package/src/core/write-queue.ts +0 -59
  34. package/src/migrate-cli.ts +0 -22
  35. package/src/migrate.ts +0 -74
  36. package/src/types/write-queue.ts +0 -42
  37. package/src/worker.ts +0 -18
  38. package/src/workers/write-worker.ts +0 -120
  39. package/tests/index.js +0 -14
  40. package/tests/integration/migration.test.js +0 -38
  41. package/tests/integration/queue.test.js +0 -83
  42. package/tests/mocks/redis.js +0 -123
  43. package/tests/mocks/s3.js +0 -80
@@ -1,174 +0,0 @@
1
- import { S3 } from '../adapters/s3'
2
- import TTID from '@delma/ttid'
3
- import { Redis } from '../adapters/redis'
4
-
5
- export class Walker {
6
- private static readonly MAX_KEYS = 1000
7
-
8
- private static _redis: Redis | null = null
9
-
10
- private static get redis(): Redis {
11
- if (!Walker._redis) Walker._redis = new Redis()
12
- return Walker._redis
13
- }
14
-
15
- private static async *searchS3(
16
- collection: string,
17
- prefix: string,
18
- pattern?: string
19
- ): AsyncGenerator<
20
- { _id: _ttid; data: string[] } | void,
21
- void,
22
- { count: number; limit?: number }
23
- > {
24
- const uniqueIds = new Set<string>()
25
-
26
- let token: string | undefined
27
-
28
- let filter = yield
29
-
30
- let limit = filter ? filter.limit : this.MAX_KEYS
31
-
32
- do {
33
- const res = await S3.list(collection, {
34
- prefix,
35
- maxKeys: pattern ? limit : undefined,
36
- continuationToken: token
37
- })
38
-
39
- if (res.contents === undefined) break
40
-
41
- const keys = res.contents.map((item) => item.key!)
42
-
43
- if (pattern) {
44
- for (const key of keys) {
45
- const segements = key.split('/')
46
-
47
- const _id = segements.pop()! as _ttid
48
-
49
- if (
50
- TTID.isTTID(_id) &&
51
- !uniqueIds.has(_id) &&
52
- pattern.length <= 1024 &&
53
- new Bun.Glob(pattern).match(key)
54
- ) {
55
- filter = yield { _id, data: await this.getDocData(collection, _id) }
56
-
57
- limit = filter.limit ? filter.limit : this.MAX_KEYS
58
-
59
- uniqueIds.add(_id)
60
-
61
- if (filter.count === limit) break
62
- }
63
- }
64
- } else {
65
- const _id = prefix.split('/').pop()! as _ttid
66
-
67
- yield { _id, data: keys }
68
-
69
- break
70
- }
71
-
72
- token = res.nextContinuationToken
73
- } while (token !== undefined)
74
- }
75
-
76
- static async *search(
77
- collection: string,
78
- pattern: string,
79
- { listen = false, skip = false }: { listen: boolean; skip: boolean },
80
- action: 'insert' | 'delete' = 'insert'
81
- ): AsyncGenerator<
82
- { _id: _ttid; data: string[] } | void,
83
- void,
84
- { count: number; limit?: number }
85
- > {
86
- if (!skip) {
87
- const segments = pattern.split('/')
88
- const idx = segments.findIndex((seg) => seg.includes('*'))
89
- const prefix = segments.slice(0, idx).join('/')
90
-
91
- yield* this.searchS3(collection, prefix, pattern)
92
- }
93
-
94
- const eventIds = new Set<string>()
95
-
96
- if (listen)
97
- for await (const event of this.listen(collection, pattern)) {
98
- if (event.action !== action && eventIds.has(event.id)) {
99
- eventIds.delete(event.id)
100
- } else if (event.action === action && !eventIds.has(event.id)) {
101
- eventIds.add(event.id)
102
- yield { _id: event.id, data: event.data }
103
- }
104
- }
105
- }
106
-
107
- static async getDocData(collection: string, _id: _ttid) {
108
- const prefix = _id.split('-')[0]
109
-
110
- const data: string[] = []
111
-
112
- let finished = false
113
-
114
- const iter = this.searchS3(collection, prefix)
115
-
116
- do {
117
- const { value, done } = await iter.next()
118
-
119
- if (done) {
120
- finished = true
121
- break
122
- }
123
-
124
- if (value) {
125
- for (const key of value.data) {
126
- if (key.startsWith(_id + '/')) data.push(key)
127
- }
128
- finished = true
129
- break
130
- }
131
- } while (!finished)
132
-
133
- return data
134
- }
135
-
136
- private static async *processPattern(collection: string, pattern: string) {
137
- const stackIds = new Set<string>()
138
-
139
- for await (const { action, keyId } of Walker.redis.subscribe(collection)) {
140
- if (
141
- action === 'insert' &&
142
- !TTID.isTTID(keyId) &&
143
- pattern.length <= 1024 &&
144
- new Bun.Glob(pattern).match(keyId)
145
- ) {
146
- const _id = keyId.split('/').pop()! as _ttid
147
-
148
- if (!stackIds.has(_id)) {
149
- stackIds.add(_id)
150
-
151
- yield {
152
- id: _id,
153
- action: 'insert',
154
- data: await this.getDocData(collection, _id)
155
- }
156
- }
157
- } else if (action === 'delete' && TTID.isTTID(keyId)) {
158
- yield { id: keyId as _ttid, action: 'delete', data: [] }
159
- } else if (TTID.isTTID(keyId) && stackIds.has(keyId)) {
160
- stackIds.delete(keyId)
161
- }
162
- }
163
- }
164
-
165
- static async *listen(collection: string, pattern: string | string[]) {
166
- if (Array.isArray(pattern)) {
167
- for (const p of pattern) {
168
- for await (const event of this.processPattern(collection, p)) yield event
169
- }
170
- } else {
171
- for await (const event of this.processPattern(collection, pattern)) yield event
172
- }
173
- }
174
- }
@@ -1,59 +0,0 @@
1
- import type { WriteJob } from '../types/write-queue'
2
-
3
- export class WriteQueue {
4
- static createInsertJob<T extends Record<string, any>>(
5
- collection: string,
6
- docId: _ttid,
7
- payload: T
8
- ): WriteJob<T> {
9
- const now = Date.now()
10
-
11
- return {
12
- jobId: Bun.randomUUIDv7(),
13
- collection,
14
- docId,
15
- operation: 'insert',
16
- payload,
17
- status: 'queued',
18
- attempts: 0,
19
- createdAt: now,
20
- updatedAt: now
21
- }
22
- }
23
-
24
- static createUpdateJob<T extends Record<string, any>>(
25
- collection: string,
26
- docId: _ttid,
27
- payload: { newDoc: Record<_ttid, Partial<T>>; oldDoc?: Record<_ttid, T> }
28
- ): WriteJob<{ newDoc: Record<_ttid, Partial<T>>; oldDoc?: Record<_ttid, T> }> {
29
- const now = Date.now()
30
-
31
- return {
32
- jobId: Bun.randomUUIDv7(),
33
- collection,
34
- docId,
35
- operation: 'update',
36
- payload,
37
- status: 'queued',
38
- attempts: 0,
39
- createdAt: now,
40
- updatedAt: now
41
- }
42
- }
43
-
44
- static createDeleteJob(collection: string, docId: _ttid): WriteJob<{ _id: _ttid }> {
45
- const now = Date.now()
46
-
47
- return {
48
- jobId: Bun.randomUUIDv7(),
49
- collection,
50
- docId,
51
- operation: 'delete',
52
- payload: { _id: docId },
53
- status: 'queued',
54
- attempts: 0,
55
- createdAt: now,
56
- updatedAt: now
57
- }
58
- }
59
- }
@@ -1,22 +0,0 @@
1
- #!/usr/bin/env bun
2
- import { migrateLegacyS3ToS3Files } from './migrate'
3
-
4
- const [, , ...args] = process.argv
5
-
6
- const collections = args.filter((arg) => !arg.startsWith('--'))
7
- const recreateCollections = !args.includes('--keep-existing')
8
- const verify = !args.includes('--no-verify')
9
-
10
- if (collections.length === 0) {
11
- throw new Error(
12
- 'Usage: fylo.migrate <collection> [collection...] [--keep-existing] [--no-verify]'
13
- )
14
- }
15
-
16
- const summary = await migrateLegacyS3ToS3Files({
17
- collections,
18
- recreateCollections,
19
- verify
20
- })
21
-
22
- console.log(JSON.stringify(summary, null, 2))
package/src/migrate.ts DELETED
@@ -1,74 +0,0 @@
1
- import Fylo from './index'
2
- import { S3FilesEngine } from './engines/s3-files'
3
-
4
- type MigrateOptions = {
5
- collections: string[]
6
- s3FilesRoot?: string
7
- recreateCollections?: boolean
8
- verify?: boolean
9
- }
10
-
11
- function normalize<T>(value: T): T {
12
- if (Array.isArray(value)) return value.map((item) => normalize(item)).sort() as T
13
- if (value && typeof value === 'object') {
14
- return Object.keys(value as Record<string, unknown>)
15
- .sort()
16
- .reduce(
17
- (acc, key) => {
18
- acc[key] = normalize((value as Record<string, unknown>)[key])
19
- return acc
20
- },
21
- {} as Record<string, unknown>
22
- ) as T
23
- }
24
- return value
25
- }
26
-
27
- export async function migrateLegacyS3ToS3Files({
28
- collections,
29
- s3FilesRoot = process.env.FYLO_S3FILES_ROOT,
30
- recreateCollections = true,
31
- verify = true
32
- }: MigrateOptions) {
33
- if (!s3FilesRoot) throw new Error('s3FilesRoot is required')
34
-
35
- const source = new Fylo({ engine: 'legacy-s3' })
36
- const target = new S3FilesEngine(s3FilesRoot)
37
-
38
- const summary: Record<string, { migrated: number; verified: boolean }> = {}
39
-
40
- for (const collection of collections) {
41
- if (recreateCollections) {
42
- await target.dropCollection(collection)
43
- await target.createCollection(collection)
44
- } else if (!(await target.hasCollection(collection))) {
45
- await target.createCollection(collection)
46
- }
47
-
48
- const docs = (await source.executeSQL<Record<string, any>>(
49
- `SELECT * FROM ${collection}`
50
- )) as Record<_ttid, Record<string, any>>
51
-
52
- for (const [docId, doc] of Object.entries(docs) as Array<[_ttid, Record<string, any>]>) {
53
- await target.putDocument(collection, docId, doc)
54
- }
55
-
56
- let verified = false
57
-
58
- if (verify) {
59
- const targetFylo = new Fylo({ engine: 's3-files', s3FilesRoot })
60
- const migratedDocs = (await targetFylo.executeSQL<Record<string, any>>(
61
- `SELECT * FROM ${collection}`
62
- )) as Record<_ttid, Record<string, any>>
63
- verified = JSON.stringify(normalize(docs)) === JSON.stringify(normalize(migratedDocs))
64
- if (!verified) throw new Error(`Verification failed for ${collection}`)
65
- }
66
-
67
- summary[collection] = {
68
- migrated: Object.keys(docs).length,
69
- verified
70
- }
71
- }
72
-
73
- return summary
74
- }
@@ -1,42 +0,0 @@
1
- export type WriteJobOperation = 'insert' | 'update' | 'delete'
2
-
3
- export type WriteJobStatus = 'queued' | 'processing' | 'committed' | 'failed' | 'dead-letter'
4
-
5
- export interface WriteJob<T extends Record<string, any> = Record<string, any>> {
6
- jobId: string
7
- collection: string
8
- docId: _ttid
9
- operation: WriteJobOperation
10
- payload: T
11
- status: WriteJobStatus
12
- attempts: number
13
- createdAt: number
14
- updatedAt: number
15
- nextAttemptAt?: number
16
- workerId?: string
17
- error?: string
18
- }
19
-
20
- export interface QueuedWriteResult {
21
- jobId: string
22
- docId: _ttid
23
- status: WriteJobStatus
24
- }
25
-
26
- export interface StreamJobEntry<T extends Record<string, any> = Record<string, any>> {
27
- streamId: string
28
- job: WriteJob<T>
29
- }
30
-
31
- export interface DeadLetterJob<T extends Record<string, any> = Record<string, any>> {
32
- streamId: string
33
- job: WriteJob<T>
34
- reason?: string
35
- failedAt: number
36
- }
37
-
38
- export interface QueueStats {
39
- queued: number
40
- pending: number
41
- deadLetters: number
42
- }
package/src/worker.ts DELETED
@@ -1,18 +0,0 @@
1
- #!/usr/bin/env bun
2
- import { WriteWorker } from './workers/write-worker'
3
-
4
- if (process.env.FYLO_STORAGE_ENGINE === 's3-files') {
5
- throw new Error('fylo.worker is not supported in s3-files engine')
6
- }
7
-
8
- const worker = new WriteWorker(process.env.FYLO_WORKER_ID)
9
-
10
- await worker.run({
11
- batchSize: process.env.FYLO_WORKER_BATCH_SIZE ? Number(process.env.FYLO_WORKER_BATCH_SIZE) : 1,
12
- blockMs: process.env.FYLO_WORKER_BLOCK_MS ? Number(process.env.FYLO_WORKER_BLOCK_MS) : 1000,
13
- recoverOnStart: process.env.FYLO_WORKER_RECOVER_ON_START !== 'false',
14
- recoverIdleMs: process.env.FYLO_WORKER_RECOVER_IDLE_MS
15
- ? Number(process.env.FYLO_WORKER_RECOVER_IDLE_MS)
16
- : 30_000,
17
- stopWhenIdle: process.env.FYLO_WORKER_STOP_WHEN_IDLE === 'true'
18
- })
@@ -1,120 +0,0 @@
1
- import Fylo from '../index'
2
- import { Redis } from '../adapters/redis'
3
- import type { StreamJobEntry, WriteJob } from '../types/write-queue'
4
-
5
- export class WriteWorker {
6
- private static readonly MAX_WRITE_ATTEMPTS = Number(process.env.FYLO_WRITE_MAX_ATTEMPTS ?? 3)
7
-
8
- private static readonly WRITE_RETRY_BASE_MS = Number(process.env.FYLO_WRITE_RETRY_BASE_MS ?? 10)
9
-
10
- private readonly fylo: Fylo
11
-
12
- private readonly redis: Redis
13
-
14
- readonly workerId: string
15
-
16
- constructor(workerId: string = Bun.randomUUIDv7()) {
17
- this.workerId = workerId
18
- this.fylo = new Fylo()
19
- this.redis = new Redis()
20
- }
21
-
22
- async recoverPending(minIdleMs: number = 30_000, count: number = 10) {
23
- const jobs = await this.redis.claimPendingJobs(this.workerId, minIdleMs, count)
24
- for (const job of jobs) await this.processJob(job)
25
- return jobs.length
26
- }
27
-
28
- async processNext(count: number = 1, blockMs: number = 1000) {
29
- const jobs = await this.redis.readWriteJobs(this.workerId, count, blockMs)
30
- for (const job of jobs) await this.processJob(job)
31
- return jobs.length
32
- }
33
-
34
- async processJob({ streamId, job }: StreamJobEntry) {
35
- if (job.nextAttemptAt && job.nextAttemptAt > Date.now()) return false
36
-
37
- const locked = await this.redis.acquireDocLock(job.collection, job.docId, job.jobId)
38
- if (!locked) return false
39
-
40
- try {
41
- await this.redis.setJobStatus(job.jobId, 'processing', {
42
- workerId: this.workerId,
43
- attempts: job.attempts + 1
44
- })
45
- await this.redis.setDocStatus(job.collection, job.docId, 'processing', job.jobId)
46
-
47
- await this.fylo.executeQueuedWrite(job)
48
-
49
- await this.redis.setJobStatus(job.jobId, 'committed', { workerId: this.workerId })
50
- await this.redis.setDocStatus(job.collection, job.docId, 'committed', job.jobId)
51
- await this.redis.ackWriteJob(streamId)
52
-
53
- return true
54
- } catch (err) {
55
- const attempts = job.attempts + 1
56
- const message = err instanceof Error ? err.message : String(err)
57
-
58
- if (attempts >= WriteWorker.MAX_WRITE_ATTEMPTS) {
59
- await this.redis.setJobStatus(job.jobId, 'dead-letter', {
60
- workerId: this.workerId,
61
- attempts,
62
- error: message
63
- })
64
- await this.redis.setDocStatus(job.collection, job.docId, 'dead-letter', job.jobId)
65
- await this.redis.deadLetterWriteJob(
66
- streamId,
67
- {
68
- ...job,
69
- attempts,
70
- status: 'dead-letter',
71
- workerId: this.workerId,
72
- error: message
73
- },
74
- message
75
- )
76
- return false
77
- }
78
-
79
- const nextAttemptAt =
80
- Date.now() + WriteWorker.WRITE_RETRY_BASE_MS * Math.max(1, 2 ** (attempts - 1))
81
-
82
- await this.redis.setJobStatus(job.jobId, 'failed', {
83
- workerId: this.workerId,
84
- attempts,
85
- error: message,
86
- nextAttemptAt
87
- })
88
- await this.redis.setDocStatus(job.collection, job.docId, 'failed', job.jobId)
89
-
90
- return false
91
- } finally {
92
- await this.redis.releaseDocLock(job.collection, job.docId, job.jobId)
93
- }
94
- }
95
-
96
- async processQueuedInsert(job: WriteJob) {
97
- return await this.fylo.executeQueuedWrite(job)
98
- }
99
-
100
- async run({
101
- batchSize = 1,
102
- blockMs = 1000,
103
- recoverOnStart = true,
104
- recoverIdleMs = 30_000,
105
- stopWhenIdle = false
106
- }: {
107
- batchSize?: number
108
- blockMs?: number
109
- recoverOnStart?: boolean
110
- recoverIdleMs?: number
111
- stopWhenIdle?: boolean
112
- } = {}) {
113
- if (recoverOnStart) await this.recoverPending(recoverIdleMs, batchSize)
114
-
115
- while (true) {
116
- const processed = await this.processNext(batchSize, blockMs)
117
- if (stopWhenIdle && processed === 0) break
118
- }
119
- }
120
- }
package/tests/index.js DELETED
@@ -1,14 +0,0 @@
1
- import { Redis } from '../src/adapters/redis'
2
- import ttid from '@delma/ttid'
3
- const redisPub = new Redis()
4
- const redisSub = new Redis()
5
- setTimeout(async () => {
6
- await redisPub.publish('bun', 'insert', ttid.generate())
7
- }, 2000)
8
- setTimeout(async () => {
9
- await redisPub.publish('bun', 'insert', ttid.generate())
10
- }, 3000)
11
- await Bun.sleep(1000)
12
- for await (const data of redisSub.subscribe('bun')) {
13
- console.log('Received:', data)
14
- }
@@ -1,38 +0,0 @@
1
- import { afterAll, beforeAll, describe, expect, mock, test } from 'bun:test'
2
- import { mkdtemp, rm } from 'node:fs/promises'
3
- import os from 'node:os'
4
- import path from 'node:path'
5
- import S3Mock from '../mocks/s3'
6
- import RedisMock from '../mocks/redis'
7
- mock.module('../../src/adapters/s3', () => ({ S3: S3Mock }))
8
- mock.module('../../src/adapters/redis', () => ({ Redis: RedisMock }))
9
- const { default: Fylo, migrateLegacyS3ToS3Files } = await import('../../src')
10
- const root = await mkdtemp(path.join(os.tmpdir(), 'fylo-migrate-'))
11
- const legacyFylo = new Fylo({ engine: 'legacy-s3' })
12
- const s3FilesFylo = new Fylo({ engine: 's3-files', s3FilesRoot: root })
13
- const COLLECTION = 'migration-posts'
14
- describe('legacy-s3 to s3-files migration', () => {
15
- beforeAll(async () => {
16
- await Fylo.createCollection(COLLECTION)
17
- await legacyFylo.putData(COLLECTION, { id: 1, title: 'Alpha' })
18
- await legacyFylo.putData(COLLECTION, { id: 2, title: 'Beta' })
19
- })
20
- afterAll(async () => {
21
- await rm(root, { recursive: true, force: true })
22
- })
23
- test('migrates legacy data and verifies parity', async () => {
24
- const summary = await migrateLegacyS3ToS3Files({
25
- collections: [COLLECTION],
26
- s3FilesRoot: root,
27
- verify: true
28
- })
29
- expect(summary[COLLECTION].migrated).toBe(2)
30
- expect(summary[COLLECTION].verified).toBe(true)
31
- const migrated = await s3FilesFylo.executeSQL(`SELECT * FROM ${COLLECTION}`)
32
- expect(
33
- Object.values(migrated)
34
- .map((item) => item.title)
35
- .sort()
36
- ).toEqual(['Alpha', 'Beta'])
37
- })
38
- })
@@ -1,83 +0,0 @@
1
- import { test, expect, describe, beforeAll, afterAll, mock } from 'bun:test'
2
- import Fylo from '../../src'
3
- import S3Mock from '../mocks/s3'
4
- import RedisMock from '../mocks/redis'
5
- const POSTS = 'queued-post'
6
- mock.module('../../src/adapters/s3', () => ({ S3: S3Mock }))
7
- mock.module('../../src/adapters/redis', () => ({ Redis: RedisMock }))
8
- const fylo = new Fylo()
9
- beforeAll(async () => {
10
- await Fylo.createCollection(POSTS)
11
- })
12
- afterAll(async () => {
13
- await Fylo.dropCollection(POSTS)
14
- })
15
- describe('queue writes', () => {
16
- test('queuePutData enqueues and worker commits insert', async () => {
17
- const queued = await fylo.queuePutData(POSTS, {
18
- title: 'Queued Title',
19
- body: 'Queued Body'
20
- })
21
- const queuedStatus = await fylo.getJobStatus(queued.jobId)
22
- expect(queuedStatus?.status).toBe('queued')
23
- const processed = await fylo.processQueuedWrites(1)
24
- expect(processed).toBe(1)
25
- const doc = await Fylo.getDoc(POSTS, queued.docId, false).once()
26
- expect(Object.keys(doc).length).toBe(1)
27
- })
28
- test('putData can return immediately when wait is false', async () => {
29
- const queued = await fylo.putData(
30
- POSTS,
31
- {
32
- title: 'Async Title',
33
- body: 'Async Body'
34
- },
35
- { wait: false }
36
- )
37
- expect(typeof queued).toBe('object')
38
- expect('jobId' in queued).toBe(true)
39
- const before = await Fylo.getDoc(POSTS, queued.docId, false).once()
40
- expect(Object.keys(before).length).toBe(0)
41
- const processed = await fylo.processQueuedWrites(1)
42
- expect(processed).toBe(1)
43
- const stats = await fylo.getQueueStats()
44
- expect(stats.deadLetters).toBe(0)
45
- const after = await Fylo.getDoc(POSTS, queued.docId, false).once()
46
- expect(Object.keys(after).length).toBe(1)
47
- })
48
- test('failed jobs can be recovered and eventually dead-lettered', async () => {
49
- const originalExecute = fylo.executeQueuedWrite.bind(fylo)
50
- fylo.executeQueuedWrite = async () => {
51
- throw new Error('simulated write failure')
52
- }
53
- try {
54
- const queued = await fylo.putData(
55
- POSTS,
56
- {
57
- title: 'Broken Title',
58
- body: 'Broken Body'
59
- },
60
- { wait: false }
61
- )
62
- expect(await fylo.processQueuedWrites(1)).toBe(0)
63
- expect((await fylo.getJobStatus(queued.jobId))?.status).toBe('failed')
64
- await Bun.sleep(15)
65
- expect(await fylo.processQueuedWrites(1, true)).toBe(0)
66
- expect((await fylo.getJobStatus(queued.jobId))?.status).toBe('failed')
67
- await Bun.sleep(25)
68
- expect(await fylo.processQueuedWrites(1, true)).toBe(0)
69
- expect((await fylo.getJobStatus(queued.jobId))?.status).toBe('dead-letter')
70
- const deadLetters = await fylo.getDeadLetters()
71
- expect((await fylo.getQueueStats()).deadLetters).toBeGreaterThan(0)
72
- expect(deadLetters.some((item) => item.job.jobId === queued.jobId)).toBe(true)
73
- fylo.executeQueuedWrite = originalExecute
74
- const replayed = await fylo.replayDeadLetter(deadLetters[0].streamId)
75
- expect(replayed?.jobId).toBe(queued.jobId)
76
- expect((await fylo.getQueueStats()).deadLetters).toBe(0)
77
- expect(await fylo.processQueuedWrites(1)).toBe(1)
78
- expect((await fylo.getJobStatus(queued.jobId))?.status).toBe('committed')
79
- } finally {
80
- fylo.executeQueuedWrite = originalExecute
81
- }
82
- })
83
- })