@delma/fylo 1.0.1 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,113 +1,3 @@
1
- # FYLO Project Guidelines
1
+ Follow the shared workspace instructions in `../instructions.md` and shared context in `../memory.md`.
2
2
 
3
- ## Overview
4
-
5
- FYLO (`@vyckr/fylo`) is an S3-backed NoSQL document store with SQL parsing, Redis pub/sub for real-time events, and a CLI. Documents are stored as S3 key paths — not as file contents — with dual key layouts for data access and indexed queries.
6
-
7
- **Assume a serverless deployment model** (e.g., AWS Lambda, Cloudflare Workers). This means:
8
- - No persistent in-memory state across invocations — every request starts cold
9
- - Distributed coordination (e.g., TTID uniqueness) must use external stores like Redis, not in-process caches
10
- - Avoid long-lived connections, background threads, or singleton patterns that assume process longevity
11
- - Keep cold-start overhead minimal — lazy initialization over eager setup
12
-
13
- ## Architecture
14
-
15
- ### Key Storage Format
16
-
17
- - **Data keys**: `{ttid}/{field}/{value}` — keyed by document ID for full-doc retrieval
18
- - **Index keys**: `{field}/{value}/{ttid}` — keyed by field for query lookups
19
- - Nested objects flatten to path segments: `address/city/Toronto`
20
- - Forward slashes in values are escaped with an ASCII substitute
21
-
22
- ### Core Modules
23
-
24
- | Module | Responsibility |
25
- |--------|---------------|
26
- | `src/index.ts` | Main `Fylo` class — CRUD, SQL execution, joins, bulk ops |
27
- | `src/core/parser.ts` | SQL lexer/parser — tokenizes SQL into query objects |
28
- | `src/core/query.ts` | Converts `$ops` into glob patterns for S3 key matching |
29
- | `src/core/walker.ts` | S3 key traversal, document data retrieval, Redis event streaming |
30
- | `src/core/directory.ts` | Key extraction, reconstruction, rollback tracking |
31
- | `src/core/format.ts` | Console formatting for query output |
32
- | `src/adapters/s3.ts` | S3 adapter (Bun S3Client) |
33
- | `src/adapters/redis.ts` | Redis adapter (Bun RedisClient) |
34
- | `src/cli/index.ts` | CLI entry point (`fylo.query`) |
35
-
36
- ### Folder Structure
37
-
38
- ```
39
- src/
40
- index.ts # Public API — main Fylo class
41
- adapters/ # I/O boundary abstractions (S3, Redis)
42
- core/ # Internal domain logic (parser, query, walker, directory)
43
- cli/ # CLI entry point
44
- types/ # Type declarations (.d.ts only — separate from implementation)
45
- tests/
46
- data.ts # Shared test data URLs
47
- index.ts # Test barrel
48
- mocks/ # Mock adapters (S3, Redis) for testing
49
- schemas/ # CHEX-generated test schemas (.d.ts + .json)
50
- integration/ # End-to-end tests (CRUD, operators, joins, edge cases)
51
- ```
52
-
53
- ### Dependencies
54
-
55
- - **`@vyckr/ttid`** — Time-based unique ID system. `TTID.generate()` creates new IDs; `TTID.generate(existingId)` creates a versioned ID sharing the same creation-time prefix.
56
- - **`@vyckr/chex`** — Schema validation. Generates `interface` declarations in `.d.ts` files. Generic constraints must use `Record<string, any>` (not `Record<string, unknown>`) to accept these interfaces.
57
- - **`Bun.Glob`** — Pattern matching for queries. Does NOT support negation extglob `!(pattern)`. Operators like `$ne`, `$gt`, `$lt` use broad globs with post-filtering instead.
58
-
59
- ## Engineering Standards
60
-
61
- - **SOLID principles**: Single responsibility per class/method, depend on abstractions (e.g., S3/Redis adapters), open for extension without modifying core logic
62
- - **Clean code**: Descriptive naming, small focused functions, no dead code or commented-out blocks, DRY without premature abstraction
63
- - **Test discipline**: When changing `src/` code, update or add corresponding tests in `tests/` — never leave tests stale after a behaviour change
64
- - **Error handling**: Fail fast with meaningful errors at system boundaries; use rollback mechanisms for partial writes
65
- - **No magic values**: Use constants or environment variables; avoid hardcoded strings/numbers in logic
66
- - **Type safety**: Leverage TypeScript's type system fully — avoid `any` in implementation code, prefer narrow types, and validate at I/O boundaries
67
-
68
- ## Code Style
69
-
70
- - **Runtime**: Bun (ESNext target, ES modules)
71
- - **Strict TypeScript**: `strict: true`, `noImplicitReturns`, `isolatedModules`
72
- - **ESLint** enforces `@typescript-eslint/no-explicit-any` in `src/` and `tests/` — use it only in type declarations (`.d.ts`)
73
- - **No default exports** except the main `Fylo` class
74
- - Prefer `class` with `static` methods for modules (no standalone functions)
75
- - Use `_ttid` branded type for document IDs — never plain `string`
76
- - Prefix internal/test type names with underscore: `_post`, `_album`, `_storeQuery`
77
- - Type declarations live in `src/types/*.d.ts` — keep separate from implementation
78
-
79
- ## Build & Test
80
-
81
- ```bash
82
- bun test # Run all tests
83
- bun run build # Compile TypeScript
84
- bun run typecheck # Type-check without emitting
85
- bun run lint # ESLint
86
- ```
87
-
88
- - Tests use `bun:test` — `describe`, `test`, `expect`, `mock`, `beforeAll`, `afterAll`
89
- - S3 and Redis are mocked via `mock.module()` in every test file using `tests/mocks/s3.ts` and `tests/mocks/redis.ts`
90
- - Test schemas live in `tests/schemas/*.d.ts` as global `interface` declarations (generated by CHEX)
91
- - Test data URLs are centralized in `tests/data.ts`
92
-
93
- ## Conventions
94
-
95
- - Collection names may contain hyphens (e.g., `ec-test`, `jm-album`) — the parser supports this
96
- - Nested field access in SQL uses dot notation (`address.city`) which the parser converts to slash-separated paths (`address/city`)
97
- - `putData` creates documents; `patchDoc` updates them (deletes old keys, writes new ones)
98
- - `getDocData` retrieves keys for a specific TTID — filters by exact ID, not just prefix
99
- - Query `$ops` use OR semantics — a document matches if it satisfies at least one operator
100
- - `$limit` on queries without `$ops` uses S3 `maxKeys`; with `$ops` it post-filters after glob matching
101
-
102
- ## Environment Variables
103
-
104
- | Variable | Purpose |
105
- |----------|---------|
106
- | `BUCKET_PREFIX` | S3 bucket name prefix |
107
- | `S3_ACCESS_KEY_ID` / `AWS_ACCESS_KEY_ID` | S3 credentials |
108
- | `S3_SECRET_ACCESS_KEY` / `AWS_SECRET_ACCESS_KEY` | S3 credentials |
109
- | `S3_REGION` / `AWS_REGION` | S3 region |
110
- | `S3_ENDPOINT` / `AWS_ENDPOINT` | S3 endpoint (for compatible stores) |
111
- | `REDIS_URL` | Redis connection URL |
112
- | `LOGGING` | Enable debug logging |
113
- | `STRICT` | Enable schema validation via CHEX |
3
+ If a repo-local instruction file adds stricter rules, follow the repo-local rule.
package/AGENTS.md ADDED
@@ -0,0 +1,3 @@
1
+ Follow the shared workspace instructions in [../instructions.md](../instructions.md) and shared context in [../memory.md](../memory.md).
2
+
3
+ Repo-local rules may add to or override the shared files when explicitly stated.
package/CLAUDE.md ADDED
@@ -0,0 +1,3 @@
1
+ Follow the shared workspace instructions in [../instructions.md](../instructions.md) and shared context in [../memory.md](../memory.md).
2
+
3
+ Repo-local rules may add to or override the shared files when explicitly stated.
package/README.md CHANGED
@@ -1,11 +1,13 @@
1
1
  # Fylo
2
2
 
3
- S3-backed NoSQL document store with SQL parsing, Redis pub/sub for real-time events, and a CLI.
3
+ S3-backed NoSQL document store with SQL parsing, Redis-backed write coordination and pub/sub for real-time events, and a CLI.
4
4
 
5
5
  Documents are stored as **S3 key paths** — not file contents. Each document produces two keys per field: a **data key** (`{ttid}/{field}/{value}`) for full-doc retrieval and an **index key** (`{field}/{value}/{ttid}`) for query lookups. This enables fast reads and filtered queries without a traditional database engine.
6
6
 
7
7
  Built for **serverless** runtimes (AWS Lambda, Cloudflare Workers) — no persistent in-memory state, lazy connections, minimal cold-start overhead.
8
8
 
9
+ Writes are coordinated through Redis before they are flushed to S3. By default the high-level CRUD methods wait for the queued write to be processed so existing code can continue to behave synchronously. If you want fire-and-forget semantics, pass `{ wait: false }` and process queued jobs with a worker or `processQueuedWrites()`.
10
+
9
11
  ## Install
10
12
 
11
13
  ```bash
@@ -21,7 +23,15 @@ bun add @delma/fylo
21
23
  | `S3_SECRET_ACCESS_KEY` / `AWS_SECRET_ACCESS_KEY` | S3 credentials |
22
24
  | `S3_REGION` / `AWS_REGION` | S3 region |
23
25
  | `S3_ENDPOINT` / `AWS_ENDPOINT` | S3 endpoint (for LocalStack, MinIO, etc.) |
24
- | `REDIS_URL` | Redis connection URL (default: `redis://localhost:6379`) |
26
+ | `REDIS_URL` | Redis connection URL used for pub/sub, document locks, and queued write coordination |
27
+ | `FYLO_WRITE_MAX_ATTEMPTS` | Maximum retry attempts before a queued job is dead-lettered |
28
+ | `FYLO_WRITE_RETRY_BASE_MS` | Base retry delay used for exponential backoff between recovery attempts |
29
+ | `FYLO_WORKER_ID` | Optional stable identifier for a write worker process |
30
+ | `FYLO_WORKER_BATCH_SIZE` | Number of queued jobs a worker pulls per read loop |
31
+ | `FYLO_WORKER_BLOCK_MS` | Redis stream block time for waiting on new jobs |
32
+ | `FYLO_WORKER_RECOVER_ON_START` | Whether the worker reclaims stale pending jobs on startup |
33
+ | `FYLO_WORKER_RECOVER_IDLE_MS` | Minimum idle time before a pending job is reclaimed |
34
+ | `FYLO_WORKER_STOP_WHEN_IDLE` | Exit the worker loop when no jobs are available |
25
35
  | `LOGGING` | Enable debug logging |
26
36
  | `STRICT` | Enable schema validation via CHEX |
27
37
 
@@ -69,6 +79,44 @@ const deleted = await fylo.delDocs<_user>("users", {
69
79
  await Fylo.dropCollection("users")
70
80
  ```
71
81
 
82
+ ### Queued Writes
83
+
84
+ ```typescript
85
+ const fylo = new Fylo()
86
+
87
+ // Default behavior waits for the queued write to finish.
88
+ const _id = await fylo.putData("users", { name: "John Doe" })
89
+
90
+ // Async mode returns the queued job immediately.
91
+ const queued = await fylo.putData("users", { name: "Jane Doe" }, { wait: false })
92
+
93
+ // Poll status if you need to track progress.
94
+ const status = await fylo.getJobStatus(queued.jobId)
95
+
96
+ // Process pending writes in-process when you are not running a separate worker.
97
+ await fylo.processQueuedWrites()
98
+ ```
99
+
100
+ When `wait: false` is used, the job is durable in Redis but the document is not visible in S3 until a worker commits it.
101
+
102
+ Queued jobs that fail are left pending for recovery. Recovered jobs retry up to `FYLO_WRITE_MAX_ATTEMPTS` times before being moved to a dead-letter stream. You can inspect dead letters with `getDeadLetters()` and reclaim stale pending jobs with `processQueuedWrites(count, true)`.
103
+
104
+ Operational helpers:
105
+
106
+ - `getQueueStats()` returns current queue, pending, and dead-letter counts
107
+ - `getDeadLetters()` lists exhausted jobs
108
+ - `replayDeadLetter(streamId)` moves a dead-lettered job back into the main queue
109
+
110
+ ### Worker
111
+
112
+ Run a dedicated write worker when you want queued writes to be flushed outside the request path:
113
+
114
+ ```bash
115
+ bun run worker
116
+ ```
117
+
118
+ The worker entrypoint lives at [worker.ts](/Users/iyor/Library/CloudStorage/Dropbox/myProjects/FYLO/src/worker.ts) and continuously drains the Redis stream, recovers stale pending jobs on startup, and respects the retry/dead-letter settings above.
119
+
72
120
  ### CRUD — SQL API
73
121
 
74
122
  ```typescript
@@ -160,7 +208,9 @@ for await (const doc of Fylo.exportBulkData<_user>("users")) {
160
208
 
161
209
  ### Rollback
162
210
 
163
- Every write is tracked as a transaction. If a batch write partially fails, Fylo automatically rolls back. You can also trigger it manually:
211
+ `rollback()` is now a legacy escape hatch.
212
+
213
+ Fylo still keeps best-effort rollback data for writes performed by the current instance. This is mainly useful for in-process failures and test workflows:
164
214
 
165
215
  ```typescript
166
216
  const fylo = new Fylo()
@@ -168,6 +218,15 @@ await fylo.putData("users", { name: "test" })
168
218
  await fylo.rollback() // undoes all writes in this instance
169
219
  ```
170
220
 
221
+ For queued writes, prefer:
222
+
223
+ - `getJobStatus()` to inspect an individual write
224
+ - `processQueuedWrites(count, true)` to recover stale pending jobs
225
+ - `getDeadLetters()` to inspect exhausted jobs
226
+ - compensating writes instead of `rollback()` after a commit
227
+
228
+ `rollback()` may be removed from the main queued-write path in a future major release.
229
+
171
230
  ### CLI
172
231
 
173
232
  ```bash
package/package.json CHANGED
@@ -1,15 +1,17 @@
1
1
  {
2
2
  "name": "@delma/fylo",
3
- "version": "1.0.1",
3
+ "version": "1.1.1",
4
4
  "main": "./dist/index.js",
5
5
  "types": "./dist/types/index.d.ts",
6
6
  "bin": {
7
- "fylo.query": "./dist/cli/index.js"
7
+ "fylo.query": "./dist/cli/index.js",
8
+ "fylo.worker": "./dist/worker.js"
8
9
  },
9
10
  "scripts": {
10
11
  "build": "tsc",
11
12
  "test": "bun test",
12
13
  "typecheck": "tsc --noEmit",
14
+ "worker": "bun run ./src/worker.ts",
13
15
  "lint": "eslint src tests",
14
16
  "format": "prettier --write src tests"
15
17
  },
@@ -23,8 +25,8 @@
23
25
  "prettier": "^3.0.0"
24
26
  },
25
27
  "dependencies": {
26
- "@vyckr/ttid": "1.3.1",
27
- "@vyckr/chex": "0.3.0"
28
+ "@delma/ttid": "1.3.4",
29
+ "@delma/chex": "0.3.3"
28
30
  },
29
31
  "type": "module",
30
32
  "peerDependencies": {
@@ -34,7 +36,7 @@
34
36
  "type": "git",
35
37
  "url": "git+https://github.com/Chidelma/Fylo.git"
36
38
  },
37
- "homepage": "https://fylo.vyckr.com",
39
+ "homepage": "https://fylo.del.ma",
38
40
  "license": "MIT",
39
41
  "keywords": [
40
42
  "storage",
@@ -1,8 +1,15 @@
1
1
  import { RedisClient } from "bun";
2
2
  import { S3 } from "./s3";
3
+ import type { DeadLetterJob, QueueStats, StreamJobEntry, WriteJob, WriteJobStatus } from "../types/write-queue";
3
4
 
4
5
  export class Redis {
5
6
 
7
+ static readonly WRITE_STREAM = 'fylo:writes'
8
+
9
+ static readonly WRITE_GROUP = 'fylo-workers'
10
+
11
+ static readonly DEAD_LETTER_STREAM = 'fylo:writes:dead'
12
+
6
13
  private client: RedisClient
7
14
 
8
15
  private static LOGGING = process.env.LOGGING
@@ -31,6 +38,41 @@ export class Redis {
31
38
  this.client.connect()
32
39
  }
33
40
 
41
+ private async ensureWriteGroup() {
42
+
43
+ if(!this.client.connected) throw new Error('Redis not connected!')
44
+
45
+ try {
46
+ await this.client.send('XGROUP', ['CREATE', Redis.WRITE_STREAM, Redis.WRITE_GROUP, '$', 'MKSTREAM'])
47
+ } catch(err) {
48
+ if(!(err instanceof Error) || !err.message.includes('BUSYGROUP')) throw err
49
+ }
50
+ }
51
+
52
+ private static hashKey(jobId: string) {
53
+ return `fylo:job:${jobId}`
54
+ }
55
+
56
+ private static docKey(collection: string, docId: _ttid) {
57
+ return `fylo:doc:${collection}:${docId}`
58
+ }
59
+
60
+ private static lockKey(collection: string, docId: _ttid) {
61
+ return `fylo:lock:${collection}:${docId}`
62
+ }
63
+
64
+ private static parseHash(values: unknown): Record<string, string> {
65
+ if(!Array.isArray(values)) return {}
66
+
67
+ const parsed: Record<string, string> = {}
68
+
69
+ for(let i = 0; i < values.length; i += 2) {
70
+ parsed[String(values[i])] = String(values[i + 1] ?? '')
71
+ }
72
+
73
+ return parsed
74
+ }
75
+
34
76
  async publish(collection: string, action: 'insert' | 'delete', keyId: string | _ttid) {
35
77
 
36
78
  if(this.client.connected) {
@@ -48,6 +90,310 @@ export class Redis {
48
90
  return result === 'OK'
49
91
  }
50
92
 
93
+ async enqueueWrite<T extends Record<string, any>>(job: WriteJob<T>) {
94
+
95
+ if(!this.client.connected) throw new Error('Redis not connected!')
96
+
97
+ await this.ensureWriteGroup()
98
+
99
+ const now = Date.now()
100
+ const payload = JSON.stringify(job.payload)
101
+
102
+ await this.client.send('HSET', [
103
+ Redis.hashKey(job.jobId),
104
+ 'jobId', job.jobId,
105
+ 'collection', job.collection,
106
+ 'docId', job.docId,
107
+ 'operation', job.operation,
108
+ 'payload', payload,
109
+ 'status', job.status,
110
+ 'attempts', String(job.attempts),
111
+ 'createdAt', String(job.createdAt),
112
+ 'updatedAt', String(now),
113
+ 'nextAttemptAt', String(job.nextAttemptAt ?? now)
114
+ ])
115
+
116
+ await this.client.send('HSET', [
117
+ Redis.docKey(job.collection, job.docId),
118
+ 'status', 'queued',
119
+ 'lastJobId', job.jobId,
120
+ 'updatedAt', String(now)
121
+ ])
122
+
123
+ return await this.client.send('XADD', [
124
+ Redis.WRITE_STREAM,
125
+ '*',
126
+ 'jobId', job.jobId,
127
+ 'collection', job.collection,
128
+ 'docId', job.docId,
129
+ 'operation', job.operation
130
+ ])
131
+ }
132
+
133
+ async readWriteJobs(workerId: string, count: number = 1, blockMs: number = 1000): Promise<Array<StreamJobEntry>> {
134
+
135
+ if(!this.client.connected) throw new Error('Redis not connected!')
136
+
137
+ await this.ensureWriteGroup()
138
+
139
+ const rows = await this.client.send('XREADGROUP', [
140
+ 'GROUP', Redis.WRITE_GROUP, workerId,
141
+ 'COUNT', String(count),
142
+ 'BLOCK', String(blockMs),
143
+ 'STREAMS', Redis.WRITE_STREAM, '>'
144
+ ])
145
+
146
+ if(!Array.isArray(rows) || rows.length === 0) return []
147
+
148
+ const items: Array<StreamJobEntry> = []
149
+
150
+ for(const streamRow of rows as unknown[]) {
151
+ if(!Array.isArray(streamRow) || streamRow.length < 2) continue
152
+ const entries = streamRow[1]
153
+ if(!Array.isArray(entries)) continue
154
+
155
+ for(const entry of entries as unknown[]) {
156
+ if(!Array.isArray(entry) || entry.length < 2) continue
157
+ const streamId = String(entry[0])
158
+ const fields = Redis.parseHash(entry[1])
159
+ const job = await this.getJob(fields.jobId)
160
+ if(job) items.push({ streamId, job })
161
+ }
162
+ }
163
+
164
+ return items
165
+ }
166
+
167
+ async ackWriteJob(streamId: string) {
168
+
169
+ if(!this.client.connected) throw new Error('Redis not connected!')
170
+
171
+ await this.client.send('XACK', [Redis.WRITE_STREAM, Redis.WRITE_GROUP, streamId])
172
+ }
173
+
174
+ async deadLetterWriteJob(streamId: string, job: WriteJob, reason?: string) {
175
+
176
+ if(!this.client.connected) throw new Error('Redis not connected!')
177
+
178
+ const failedAt = Date.now()
179
+
180
+ await this.client.send('XADD', [
181
+ Redis.DEAD_LETTER_STREAM,
182
+ '*',
183
+ 'jobId', job.jobId,
184
+ 'collection', job.collection,
185
+ 'docId', job.docId,
186
+ 'operation', job.operation,
187
+ 'reason', reason ?? '',
188
+ 'failedAt', String(failedAt)
189
+ ])
190
+
191
+ await this.ackWriteJob(streamId)
192
+ }
193
+
194
+ async claimPendingJobs(workerId: string, minIdleMs: number = 30_000, count: number = 10): Promise<Array<StreamJobEntry>> {
195
+
196
+ if(!this.client.connected) throw new Error('Redis not connected!')
197
+
198
+ await this.ensureWriteGroup()
199
+
200
+ const result = await this.client.send('XAUTOCLAIM', [
201
+ Redis.WRITE_STREAM,
202
+ Redis.WRITE_GROUP,
203
+ workerId,
204
+ String(minIdleMs),
205
+ '0-0',
206
+ 'COUNT',
207
+ String(count)
208
+ ])
209
+
210
+ if(!Array.isArray(result) || result.length < 2 || !Array.isArray(result[1])) return []
211
+
212
+ const items: Array<StreamJobEntry> = []
213
+
214
+ for(const entry of result[1] as unknown[]) {
215
+ if(!Array.isArray(entry) || entry.length < 2) continue
216
+ const streamId = String(entry[0])
217
+ const fields = Redis.parseHash(entry[1])
218
+ const job = await this.getJob(fields.jobId)
219
+ if(job) items.push({ streamId, job })
220
+ }
221
+
222
+ return items
223
+ }
224
+
225
+ async setJobStatus(jobId: string, status: WriteJobStatus, extra: Partial<Pick<WriteJob, 'workerId' | 'error' | 'attempts' | 'nextAttemptAt'>> = {}) {
226
+
227
+ if(!this.client.connected) throw new Error('Redis not connected!')
228
+
229
+ const args = [
230
+ Redis.hashKey(jobId),
231
+ 'status', status,
232
+ 'updatedAt', String(Date.now())
233
+ ]
234
+
235
+ if(extra.workerId) args.push('workerId', extra.workerId)
236
+ if(extra.error) args.push('error', extra.error)
237
+ if(typeof extra.attempts === 'number') args.push('attempts', String(extra.attempts))
238
+ if(typeof extra.nextAttemptAt === 'number') args.push('nextAttemptAt', String(extra.nextAttemptAt))
239
+
240
+ await this.client.send('HSET', args)
241
+ }
242
+
243
+ async setDocStatus(collection: string, docId: _ttid, status: WriteJobStatus, jobId?: string) {
244
+
245
+ if(!this.client.connected) throw new Error('Redis not connected!')
246
+
247
+ const args = [
248
+ Redis.docKey(collection, docId),
249
+ 'status', status,
250
+ 'updatedAt', String(Date.now())
251
+ ]
252
+
253
+ if(jobId) args.push('lastJobId', jobId)
254
+
255
+ await this.client.send('HSET', args)
256
+ }
257
+
258
+ async getJob(jobId: string): Promise<WriteJob | null> {
259
+
260
+ if(!this.client.connected) throw new Error('Redis not connected!')
261
+
262
+ const hash = Redis.parseHash(await this.client.send('HGETALL', [Redis.hashKey(jobId)]))
263
+
264
+ if(Object.keys(hash).length === 0) return null
265
+
266
+ return {
267
+ jobId: hash.jobId,
268
+ collection: hash.collection,
269
+ docId: hash.docId as _ttid,
270
+ operation: hash.operation as WriteJob['operation'],
271
+ payload: JSON.parse(hash.payload),
272
+ status: hash.status as WriteJobStatus,
273
+ attempts: Number(hash.attempts ?? 0),
274
+ createdAt: Number(hash.createdAt ?? 0),
275
+ updatedAt: Number(hash.updatedAt ?? 0),
276
+ nextAttemptAt: Number(hash.nextAttemptAt ?? 0) || undefined,
277
+ workerId: hash.workerId || undefined,
278
+ error: hash.error || undefined
279
+ }
280
+ }
281
+
282
+ async getDocStatus(collection: string, docId: _ttid) {
283
+
284
+ if(!this.client.connected) throw new Error('Redis not connected!')
285
+
286
+ const hash = Redis.parseHash(await this.client.send('HGETALL', [Redis.docKey(collection, docId)]))
287
+
288
+ return Object.keys(hash).length > 0 ? hash : null
289
+ }
290
+
291
+ async readDeadLetters(count: number = 10): Promise<Array<DeadLetterJob>> {
292
+
293
+ if(!this.client.connected) throw new Error('Redis not connected!')
294
+
295
+ const rows = await this.client.send('XRANGE', [Redis.DEAD_LETTER_STREAM, '-', '+', 'COUNT', String(count)])
296
+
297
+ if(!Array.isArray(rows)) return []
298
+
299
+ const items: Array<DeadLetterJob> = []
300
+
301
+ for(const row of rows as unknown[]) {
302
+ if(!Array.isArray(row) || row.length < 2) continue
303
+ const streamId = String(row[0])
304
+ const fields = Redis.parseHash(row[1])
305
+ const job = await this.getJob(fields.jobId)
306
+
307
+ if(job) {
308
+ items.push({
309
+ streamId,
310
+ job,
311
+ reason: fields.reason || undefined,
312
+ failedAt: Number(fields.failedAt ?? 0)
313
+ })
314
+ }
315
+ }
316
+
317
+ return items
318
+ }
319
+
320
+ async replayDeadLetter(streamId: string): Promise<WriteJob | null> {
321
+
322
+ if(!this.client.connected) throw new Error('Redis not connected!')
323
+
324
+ const rows = await this.client.send('XRANGE', [Redis.DEAD_LETTER_STREAM, streamId, streamId, 'COUNT', '1'])
325
+
326
+ if(!Array.isArray(rows) || rows.length === 0) return null
327
+
328
+ const row = rows[0]
329
+ if(!Array.isArray(row) || row.length < 2) return null
330
+
331
+ const fields = Redis.parseHash(row[1])
332
+ const job = await this.getJob(fields.jobId)
333
+
334
+ if(!job) return null
335
+
336
+ const replayed: WriteJob = {
337
+ ...job,
338
+ status: 'queued',
339
+ error: undefined,
340
+ workerId: undefined,
341
+ attempts: 0,
342
+ updatedAt: Date.now(),
343
+ nextAttemptAt: Date.now()
344
+ }
345
+
346
+ await this.enqueueWrite(replayed)
347
+ await this.client.send('XDEL', [Redis.DEAD_LETTER_STREAM, streamId])
348
+
349
+ return replayed
350
+ }
351
+
352
+ async getQueueStats(): Promise<QueueStats> {
353
+
354
+ if(!this.client.connected) throw new Error('Redis not connected!')
355
+
356
+ await this.ensureWriteGroup()
357
+
358
+ const [queuedRaw, deadRaw, pendingRaw] = await Promise.all([
359
+ this.client.send('XLEN', [Redis.WRITE_STREAM]),
360
+ this.client.send('XLEN', [Redis.DEAD_LETTER_STREAM]),
361
+ this.client.send('XPENDING', [Redis.WRITE_STREAM, Redis.WRITE_GROUP])
362
+ ])
363
+
364
+ const pending = Array.isArray(pendingRaw) ? Number(pendingRaw[0] ?? 0) : 0
365
+
366
+ return {
367
+ queued: Number(queuedRaw ?? 0),
368
+ pending,
369
+ deadLetters: Number(deadRaw ?? 0)
370
+ }
371
+ }
372
+
373
+ async acquireDocLock(collection: string, docId: _ttid, jobId: string, ttlSeconds: number = 60) {
374
+
375
+ if(!this.client.connected) throw new Error('Redis not connected!')
376
+
377
+ const result = await this.client.send('SET', [
378
+ Redis.lockKey(collection, docId),
379
+ jobId,
380
+ 'NX',
381
+ 'EX',
382
+ String(ttlSeconds)
383
+ ])
384
+
385
+ return result === 'OK'
386
+ }
387
+
388
+ async releaseDocLock(collection: string, docId: _ttid, jobId: string) {
389
+
390
+ if(!this.client.connected) throw new Error('Redis not connected!')
391
+
392
+ const key = Redis.lockKey(collection, docId)
393
+ const current = await this.client.send('GET', [key])
394
+ if(current === jobId) await this.client.send('DEL', [key])
395
+ }
396
+
51
397
  async *subscribe(collection: string) {
52
398
 
53
399
  if(!this.client.connected) throw new Error('Redis not connected!')
@@ -1,5 +1,5 @@
1
1
  import { Walker } from "./walker"
2
- import TTID from "@vyckr/ttid"
2
+ import TTID from "@delma/ttid"
3
3
  import { S3 } from "../adapters/s3"
4
4
  import { Redis } from "../adapters/redis"
5
5
  import { Cipher } from "../adapters/cipher"
@@ -1,4 +1,4 @@
1
- import TTID from '@vyckr/ttid'
1
+ import TTID from '@delma/ttid'
2
2
 
3
3
  class Format {
4
4
  static table(docs: Record<string, any>) {
@@ -1,5 +1,5 @@
1
1
  import { S3 } from "../adapters/s3"
2
- import TTID from "@vyckr/ttid"
2
+ import TTID from "@delma/ttid"
3
3
  import { Redis } from "../adapters/redis"
4
4
 
5
5
  export class Walker {