@delma/fylo 2.0.1 → 2.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +206 -261
- package/dist/adapters/cipher.js +155 -0
- package/dist/adapters/cipher.js.map +1 -0
- package/dist/core/collection.js +6 -0
- package/dist/core/collection.js.map +1 -0
- package/dist/core/directory.js +48 -0
- package/dist/core/directory.js.map +1 -0
- package/dist/core/doc-id.js +15 -0
- package/dist/core/doc-id.js.map +1 -0
- package/dist/core/extensions.js +16 -0
- package/dist/core/extensions.js.map +1 -0
- package/dist/core/format.js +355 -0
- package/dist/core/format.js.map +1 -0
- package/dist/core/parser.js +764 -0
- package/dist/core/parser.js.map +1 -0
- package/dist/core/query.js +47 -0
- package/dist/core/query.js.map +1 -0
- package/dist/engines/s3-files/documents.js +62 -0
- package/dist/engines/s3-files/documents.js.map +1 -0
- package/dist/engines/s3-files/filesystem.js +165 -0
- package/dist/engines/s3-files/filesystem.js.map +1 -0
- package/dist/engines/s3-files/query.js +235 -0
- package/dist/engines/s3-files/query.js.map +1 -0
- package/dist/engines/s3-files/types.js +2 -0
- package/dist/engines/s3-files/types.js.map +1 -0
- package/dist/engines/s3-files.js +629 -0
- package/dist/engines/s3-files.js.map +1 -0
- package/dist/engines/types.js +2 -0
- package/dist/engines/types.js.map +1 -0
- package/dist/index.js +562 -0
- package/dist/index.js.map +1 -0
- package/dist/sync.js +18 -0
- package/dist/sync.js.map +1 -0
- package/dist/types/fylo.d.ts +179 -0
- package/{src → dist}/types/node-runtime.d.ts +1 -0
- package/package.json +3 -6
- package/.env.example +0 -16
- package/.github/copilot-instructions.md +0 -3
- package/.github/prompts/release.prompt.md +0 -10
- package/.github/workflows/ci.yml +0 -37
- package/.github/workflows/publish.yml +0 -91
- package/.prettierrc +0 -7
- package/AGENTS.md +0 -3
- package/CLAUDE.md +0 -3
- package/eslint.config.js +0 -32
- package/src/CLI +0 -39
- package/src/adapters/cipher.ts +0 -180
- package/src/adapters/redis.ts +0 -487
- package/src/adapters/s3.ts +0 -61
- package/src/core/collection.ts +0 -5
- package/src/core/directory.ts +0 -387
- package/src/core/extensions.ts +0 -21
- package/src/core/format.ts +0 -457
- package/src/core/parser.ts +0 -901
- package/src/core/query.ts +0 -53
- package/src/core/walker.ts +0 -174
- package/src/core/write-queue.ts +0 -59
- package/src/engines/s3-files.ts +0 -1068
- package/src/engines/types.ts +0 -21
- package/src/index.ts +0 -1727
- package/src/migrate-cli.ts +0 -22
- package/src/migrate.ts +0 -74
- package/src/types/fylo.d.ts +0 -261
- package/src/types/write-queue.ts +0 -42
- package/src/worker.ts +0 -18
- package/src/workers/write-worker.ts +0 -120
- package/tests/collection/truncate.test.js +0 -35
- package/tests/data.js +0 -97
- package/tests/index.js +0 -14
- package/tests/integration/aws-s3-files.canary.test.js +0 -22
- package/tests/integration/create.test.js +0 -39
- package/tests/integration/delete.test.js +0 -95
- package/tests/integration/edge-cases.test.js +0 -158
- package/tests/integration/encryption.test.js +0 -131
- package/tests/integration/export.test.js +0 -46
- package/tests/integration/join-modes.test.js +0 -154
- package/tests/integration/migration.test.js +0 -38
- package/tests/integration/nested.test.js +0 -142
- package/tests/integration/operators.test.js +0 -122
- package/tests/integration/queue.test.js +0 -83
- package/tests/integration/read.test.js +0 -119
- package/tests/integration/rollback.test.js +0 -60
- package/tests/integration/s3-files.test.js +0 -192
- package/tests/integration/update.test.js +0 -99
- package/tests/mocks/cipher.js +0 -40
- package/tests/mocks/redis.js +0 -123
- package/tests/mocks/s3.js +0 -80
- package/tests/schemas/album.d.ts +0 -5
- package/tests/schemas/album.json +0 -5
- package/tests/schemas/comment.d.ts +0 -7
- package/tests/schemas/comment.json +0 -7
- package/tests/schemas/photo.d.ts +0 -7
- package/tests/schemas/photo.json +0 -7
- package/tests/schemas/post.d.ts +0 -6
- package/tests/schemas/post.json +0 -6
- package/tests/schemas/tip.d.ts +0 -7
- package/tests/schemas/tip.json +0 -7
- package/tests/schemas/todo.d.ts +0 -6
- package/tests/schemas/todo.json +0 -6
- package/tests/schemas/user.d.ts +0 -23
- package/tests/schemas/user.json +0 -23
- package/tsconfig.json +0 -21
- package/tsconfig.typecheck.json +0 -31
- /package/{src → dist}/types/bun-runtime.d.ts +0 -0
- /package/{src → dist}/types/index.d.ts +0 -0
- /package/{src → dist}/types/query.d.ts +0 -0
- /package/{src → dist}/types/vendor-modules.d.ts +0 -0
package/src/index.ts
DELETED
|
@@ -1,1727 +0,0 @@
|
|
|
1
|
-
/* eslint-disable @typescript-eslint/explicit-function-return-type */
|
|
2
|
-
import { Query } from './core/query'
|
|
3
|
-
import { Parser } from './core/parser'
|
|
4
|
-
import { Dir } from './core/directory'
|
|
5
|
-
import TTID from '@delma/ttid'
|
|
6
|
-
import Gen from '@delma/chex'
|
|
7
|
-
import { Walker } from './core/walker'
|
|
8
|
-
import { S3 } from './adapters/s3'
|
|
9
|
-
import { Cipher } from './adapters/cipher'
|
|
10
|
-
import { Redis } from './adapters/redis'
|
|
11
|
-
import { WriteQueue } from './core/write-queue'
|
|
12
|
-
import { S3FilesEngine } from './engines/s3-files'
|
|
13
|
-
import './core/format'
|
|
14
|
-
import './core/extensions'
|
|
15
|
-
import type { QueueStats, QueuedWriteResult, WriteJob } from './types/write-queue'
|
|
16
|
-
import type { StreamJobEntry } from './types/write-queue'
|
|
17
|
-
import type { FyloStorageEngineKind } from './engines/types'
|
|
18
|
-
|
|
19
|
-
export default class Fylo {
|
|
20
|
-
private static LOGGING = process.env.LOGGING
|
|
21
|
-
|
|
22
|
-
private static MAX_CPUS = navigator.hardwareConcurrency
|
|
23
|
-
|
|
24
|
-
private static readonly STRICT = process.env.STRICT
|
|
25
|
-
|
|
26
|
-
private static ttidLock: Promise<void> = Promise.resolve()
|
|
27
|
-
|
|
28
|
-
private static readonly SCHEMA_DIR = process.env.SCHEMA_DIR
|
|
29
|
-
|
|
30
|
-
/** Collections whose schema `$encrypted` config has already been loaded. */
|
|
31
|
-
private static readonly loadedEncryption: Set<string> = new Set()
|
|
32
|
-
|
|
33
|
-
private static _queueRedis: Redis | null = null
|
|
34
|
-
|
|
35
|
-
private static rollbackWarningShown = false
|
|
36
|
-
|
|
37
|
-
private static readonly MAX_WRITE_ATTEMPTS = Number(process.env.FYLO_WRITE_MAX_ATTEMPTS ?? 3)
|
|
38
|
-
|
|
39
|
-
private static readonly WRITE_RETRY_BASE_MS = Number(process.env.FYLO_WRITE_RETRY_BASE_MS ?? 10)
|
|
40
|
-
|
|
41
|
-
private readonly dir: Dir
|
|
42
|
-
|
|
43
|
-
private readonly engineKind: FyloStorageEngineKind
|
|
44
|
-
|
|
45
|
-
private readonly s3Files?: S3FilesEngine
|
|
46
|
-
|
|
47
|
-
constructor(options: { engine?: FyloStorageEngineKind; s3FilesRoot?: string } = {}) {
|
|
48
|
-
this.dir = new Dir()
|
|
49
|
-
this.engineKind = options.engine ?? Fylo.defaultEngineKind()
|
|
50
|
-
if (this.engineKind === 's3-files') this.s3Files = new S3FilesEngine(options.s3FilesRoot)
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
private static get queueRedis(): Redis {
|
|
54
|
-
if (!Fylo._queueRedis) Fylo._queueRedis = new Redis()
|
|
55
|
-
return Fylo._queueRedis
|
|
56
|
-
}
|
|
57
|
-
|
|
58
|
-
private static defaultEngineKind(): FyloStorageEngineKind {
|
|
59
|
-
return process.env.FYLO_STORAGE_ENGINE === 's3-files' ? 's3-files' : 'legacy-s3'
|
|
60
|
-
}
|
|
61
|
-
|
|
62
|
-
private static get defaultS3Files(): S3FilesEngine {
|
|
63
|
-
return new S3FilesEngine(process.env.FYLO_S3FILES_ROOT)
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
private assertS3FilesEngine(): S3FilesEngine {
|
|
67
|
-
if (!this.s3Files) throw new Error('S3 Files engine is not configured')
|
|
68
|
-
return this.s3Files
|
|
69
|
-
}
|
|
70
|
-
|
|
71
|
-
private ensureLegacyQueue(feature: string): Redis {
|
|
72
|
-
if (this.engineKind === 's3-files')
|
|
73
|
-
throw new Error(`${feature} is not supported in s3-files engine`)
|
|
74
|
-
return Fylo.queueRedis
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
/**
|
|
78
|
-
* Executes a SQL query and returns the results.
|
|
79
|
-
* @param SQL The SQL query to execute.
|
|
80
|
-
* @returns The results of the query.
|
|
81
|
-
*/
|
|
82
|
-
async executeSQL<
|
|
83
|
-
T extends Record<string, any>,
|
|
84
|
-
U extends Record<string, any> = Record<string, unknown>
|
|
85
|
-
>(SQL: string) {
|
|
86
|
-
const op = SQL.match(/^(SELECT|INSERT|UPDATE|DELETE|CREATE|DROP)/i)
|
|
87
|
-
|
|
88
|
-
if (!op) throw new Error('Missing SQL Operation')
|
|
89
|
-
|
|
90
|
-
switch (op.shift()) {
|
|
91
|
-
case 'CREATE':
|
|
92
|
-
return await this.createCollection(
|
|
93
|
-
(Parser.parse(SQL) as _storeDelete<T>).$collection!
|
|
94
|
-
)
|
|
95
|
-
case 'DROP':
|
|
96
|
-
return await this.dropCollection(
|
|
97
|
-
(Parser.parse(SQL) as _storeDelete<T>).$collection!
|
|
98
|
-
)
|
|
99
|
-
case 'SELECT':
|
|
100
|
-
const query = Parser.parse<T>(SQL) as _storeQuery<T>
|
|
101
|
-
if (SQL.includes('JOIN')) return await this.joinDocs(query as _join<T, U>)
|
|
102
|
-
const selCol = (query as _storeQuery<T>).$collection
|
|
103
|
-
delete (query as _storeQuery<T>).$collection
|
|
104
|
-
let docs: Record<string, unknown> | Array<_ttid> = query.$onlyIds
|
|
105
|
-
? new Array<_ttid>()
|
|
106
|
-
: {}
|
|
107
|
-
for await (const data of this.findDocs(
|
|
108
|
-
selCol! as string,
|
|
109
|
-
query as _storeQuery<T>
|
|
110
|
-
).collect()) {
|
|
111
|
-
if (typeof data === 'object') {
|
|
112
|
-
docs = Object.appendGroup(docs, data)
|
|
113
|
-
} else (docs as Array<_ttid>).push(data as _ttid)
|
|
114
|
-
}
|
|
115
|
-
return docs
|
|
116
|
-
case 'INSERT':
|
|
117
|
-
const insert = Parser.parse<T>(SQL) as _storeInsert<T>
|
|
118
|
-
const insCol = insert.$collection
|
|
119
|
-
delete insert.$collection
|
|
120
|
-
return await this.putData(insCol!, insert.$values)
|
|
121
|
-
case 'UPDATE':
|
|
122
|
-
const update = Parser.parse<T>(SQL) as _storeUpdate<T>
|
|
123
|
-
const updateCol = update.$collection
|
|
124
|
-
delete update.$collection
|
|
125
|
-
return await this.patchDocs(updateCol!, update)
|
|
126
|
-
case 'DELETE':
|
|
127
|
-
const del = Parser.parse<T>(SQL) as _storeDelete<T>
|
|
128
|
-
const delCol = del.$collection
|
|
129
|
-
delete del.$collection
|
|
130
|
-
return await this.delDocs(delCol!, del)
|
|
131
|
-
default:
|
|
132
|
-
throw new Error('Invalid Operation')
|
|
133
|
-
}
|
|
134
|
-
}
|
|
135
|
-
|
|
136
|
-
/**
|
|
137
|
-
* Creates a new schema for a collection.
|
|
138
|
-
* @param collection The name of the collection.
|
|
139
|
-
*/
|
|
140
|
-
static async createCollection(collection: string) {
|
|
141
|
-
if (Fylo.defaultEngineKind() === 's3-files') {
|
|
142
|
-
await Fylo.defaultS3Files.createCollection(collection)
|
|
143
|
-
return
|
|
144
|
-
}
|
|
145
|
-
await S3.createBucket(collection)
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
/**
|
|
149
|
-
* Drops an existing schema for a collection.
|
|
150
|
-
* @param collection The name of the collection.
|
|
151
|
-
*/
|
|
152
|
-
static async dropCollection(collection: string) {
|
|
153
|
-
if (Fylo.defaultEngineKind() === 's3-files') {
|
|
154
|
-
await Fylo.defaultS3Files.dropCollection(collection)
|
|
155
|
-
return
|
|
156
|
-
}
|
|
157
|
-
await S3.deleteBucket(collection)
|
|
158
|
-
}
|
|
159
|
-
|
|
160
|
-
async createCollection(collection: string) {
|
|
161
|
-
if (this.engineKind === 's3-files')
|
|
162
|
-
return await this.assertS3FilesEngine().createCollection(collection)
|
|
163
|
-
return await Fylo.createCollection(collection)
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
async dropCollection(collection: string) {
|
|
167
|
-
if (this.engineKind === 's3-files')
|
|
168
|
-
return await this.assertS3FilesEngine().dropCollection(collection)
|
|
169
|
-
return await Fylo.dropCollection(collection)
|
|
170
|
-
}
|
|
171
|
-
|
|
172
|
-
/**
|
|
173
|
-
* Loads encrypted field config from a collection's JSON schema if not already loaded.
|
|
174
|
-
* Reads the `$encrypted` array from the schema and registers fields with Cipher.
|
|
175
|
-
* Auto-configures the Cipher key from `ENCRYPTION_KEY` env var on first use.
|
|
176
|
-
*/
|
|
177
|
-
private static async loadEncryption(collection: string): Promise<void> {
|
|
178
|
-
if (Fylo.loadedEncryption.has(collection)) return
|
|
179
|
-
Fylo.loadedEncryption.add(collection)
|
|
180
|
-
|
|
181
|
-
if (!Fylo.SCHEMA_DIR) return
|
|
182
|
-
|
|
183
|
-
try {
|
|
184
|
-
const res = await import(`${Fylo.SCHEMA_DIR}/${collection}.json`)
|
|
185
|
-
const schema = res.default as Record<string, unknown>
|
|
186
|
-
const encrypted = schema.$encrypted
|
|
187
|
-
|
|
188
|
-
if (Array.isArray(encrypted) && encrypted.length > 0) {
|
|
189
|
-
if (!Cipher.isConfigured()) {
|
|
190
|
-
const secret = process.env.ENCRYPTION_KEY
|
|
191
|
-
if (!secret)
|
|
192
|
-
throw new Error(
|
|
193
|
-
'Schema declares $encrypted fields but ENCRYPTION_KEY env var is not set'
|
|
194
|
-
)
|
|
195
|
-
if (secret.length < 32)
|
|
196
|
-
throw new Error('ENCRYPTION_KEY must be at least 32 characters long')
|
|
197
|
-
await Cipher.configure(secret)
|
|
198
|
-
}
|
|
199
|
-
Cipher.registerFields(collection, encrypted as string[])
|
|
200
|
-
}
|
|
201
|
-
} catch {
|
|
202
|
-
// No schema file found — no encryption for this collection
|
|
203
|
-
}
|
|
204
|
-
}
|
|
205
|
-
|
|
206
|
-
getDoc<T extends Record<string, any>>(collection: string, _id: _ttid, onlyId: boolean = false) {
|
|
207
|
-
if (this.engineKind === 's3-files')
|
|
208
|
-
return this.assertS3FilesEngine().getDoc<T>(collection, _id, onlyId)
|
|
209
|
-
return Fylo.getDoc<T>(collection, _id, onlyId)
|
|
210
|
-
}
|
|
211
|
-
|
|
212
|
-
findDocs<T extends Record<string, any>>(collection: string, query?: _storeQuery<T>) {
|
|
213
|
-
if (this.engineKind === 's3-files')
|
|
214
|
-
return this.assertS3FilesEngine().findDocs<T>(collection, query)
|
|
215
|
-
return Fylo.findDocs<T>(collection, query)
|
|
216
|
-
}
|
|
217
|
-
|
|
218
|
-
async joinDocs<T extends Record<string, any>, U extends Record<string, any>>(
|
|
219
|
-
join: _join<T, U>
|
|
220
|
-
) {
|
|
221
|
-
if (this.engineKind === 's3-files') return await this.assertS3FilesEngine().joinDocs(join)
|
|
222
|
-
return await Fylo.joinDocs(join)
|
|
223
|
-
}
|
|
224
|
-
|
|
225
|
-
/**
|
|
226
|
-
* Rolls back all transcations in current instance
|
|
227
|
-
* @deprecated Prefer queued write recovery, retries, dead letters, or compensating writes.
|
|
228
|
-
*/
|
|
229
|
-
async rollback() {
|
|
230
|
-
if (this.engineKind === 's3-files') return
|
|
231
|
-
if (!Fylo.rollbackWarningShown) {
|
|
232
|
-
Fylo.rollbackWarningShown = true
|
|
233
|
-
console.warn(
|
|
234
|
-
'[FYLO] rollback() is deprecated for queued-write flows. Prefer job recovery, dead letters, or compensating writes.'
|
|
235
|
-
)
|
|
236
|
-
}
|
|
237
|
-
await this.dir.executeRollback()
|
|
238
|
-
}
|
|
239
|
-
|
|
240
|
-
async getJobStatus(jobId: string) {
|
|
241
|
-
return await this.ensureLegacyQueue('getJobStatus').getJob(jobId)
|
|
242
|
-
}
|
|
243
|
-
|
|
244
|
-
async getDocStatus(collection: string, docId: _ttid) {
|
|
245
|
-
return await this.ensureLegacyQueue('getDocStatus').getDocStatus(collection, docId)
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
async getDeadLetters(count: number = 10) {
|
|
249
|
-
return await this.ensureLegacyQueue('getDeadLetters').readDeadLetters(count)
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
async getQueueStats(): Promise<QueueStats> {
|
|
253
|
-
return await this.ensureLegacyQueue('getQueueStats').getQueueStats()
|
|
254
|
-
}
|
|
255
|
-
|
|
256
|
-
async replayDeadLetter(streamId: string) {
|
|
257
|
-
return await this.ensureLegacyQueue('replayDeadLetter').replayDeadLetter(streamId)
|
|
258
|
-
}
|
|
259
|
-
|
|
260
|
-
private async waitForJob(jobId: string, timeoutMs: number = 5_000, intervalMs: number = 50) {
|
|
261
|
-
const start = Date.now()
|
|
262
|
-
|
|
263
|
-
do {
|
|
264
|
-
const job = await this.getJobStatus(jobId)
|
|
265
|
-
if (job && (job.status === 'committed' || job.status === 'failed')) return job
|
|
266
|
-
await Bun.sleep(intervalMs)
|
|
267
|
-
} while (Date.now() - start < timeoutMs)
|
|
268
|
-
|
|
269
|
-
throw new Error(`Timed out waiting for job ${jobId}`)
|
|
270
|
-
}
|
|
271
|
-
|
|
272
|
-
private async runQueuedJob<T>(
|
|
273
|
-
queued: QueuedWriteResult,
|
|
274
|
-
{
|
|
275
|
-
wait = true
|
|
276
|
-
}: {
|
|
277
|
-
wait?: boolean
|
|
278
|
-
timeoutMs?: number
|
|
279
|
-
} = {},
|
|
280
|
-
resolveValue?: () => Promise<T> | T
|
|
281
|
-
): Promise<T | QueuedWriteResult> {
|
|
282
|
-
if (this.engineKind === 's3-files') {
|
|
283
|
-
if (!wait) return queued
|
|
284
|
-
return resolveValue ? await resolveValue() : queued
|
|
285
|
-
}
|
|
286
|
-
|
|
287
|
-
if (!wait) return queued
|
|
288
|
-
|
|
289
|
-
const processed = await this.processQueuedWrites(1)
|
|
290
|
-
|
|
291
|
-
if (processed === 0) throw new Error(`No worker available to process job ${queued.jobId}`)
|
|
292
|
-
|
|
293
|
-
const job = await this.getJobStatus(queued.jobId)
|
|
294
|
-
|
|
295
|
-
if (job?.status === 'failed') {
|
|
296
|
-
throw new Error(job.error ?? `Queued job ${queued.jobId} failed`)
|
|
297
|
-
}
|
|
298
|
-
|
|
299
|
-
return resolveValue ? await resolveValue() : queued
|
|
300
|
-
}
|
|
301
|
-
|
|
302
|
-
async processQueuedWrites(count: number = 1, recover: boolean = false) {
|
|
303
|
-
this.ensureLegacyQueue('processQueuedWrites')
|
|
304
|
-
const jobs = recover
|
|
305
|
-
? await Fylo.queueRedis.claimPendingJobs(Bun.randomUUIDv7(), 30_000, count)
|
|
306
|
-
: await Fylo.queueRedis.readWriteJobs(Bun.randomUUIDv7(), count)
|
|
307
|
-
|
|
308
|
-
let processed = 0
|
|
309
|
-
|
|
310
|
-
for (const job of jobs) {
|
|
311
|
-
if (await this.processQueuedJob(job)) processed++
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
return processed
|
|
315
|
-
}
|
|
316
|
-
|
|
317
|
-
private async processQueuedJob({ streamId, job }: StreamJobEntry) {
|
|
318
|
-
if (job.nextAttemptAt && job.nextAttemptAt > Date.now()) return false
|
|
319
|
-
|
|
320
|
-
const locked = await Fylo.queueRedis.acquireDocLock(job.collection, job.docId, job.jobId)
|
|
321
|
-
if (!locked) return false
|
|
322
|
-
|
|
323
|
-
try {
|
|
324
|
-
await Fylo.queueRedis.setJobStatus(job.jobId, 'processing', {
|
|
325
|
-
attempts: job.attempts + 1
|
|
326
|
-
})
|
|
327
|
-
await Fylo.queueRedis.setDocStatus(job.collection, job.docId, 'processing', job.jobId)
|
|
328
|
-
|
|
329
|
-
await this.executeQueuedWrite(job)
|
|
330
|
-
|
|
331
|
-
await Fylo.queueRedis.setJobStatus(job.jobId, 'committed')
|
|
332
|
-
await Fylo.queueRedis.setDocStatus(job.collection, job.docId, 'committed', job.jobId)
|
|
333
|
-
await Fylo.queueRedis.ackWriteJob(streamId)
|
|
334
|
-
return true
|
|
335
|
-
} catch (err) {
|
|
336
|
-
const attempts = job.attempts + 1
|
|
337
|
-
const message = err instanceof Error ? err.message : String(err)
|
|
338
|
-
|
|
339
|
-
if (attempts >= Fylo.MAX_WRITE_ATTEMPTS) {
|
|
340
|
-
await Fylo.queueRedis.setJobStatus(job.jobId, 'dead-letter', {
|
|
341
|
-
error: message,
|
|
342
|
-
attempts
|
|
343
|
-
})
|
|
344
|
-
await Fylo.queueRedis.setDocStatus(
|
|
345
|
-
job.collection,
|
|
346
|
-
job.docId,
|
|
347
|
-
'dead-letter',
|
|
348
|
-
job.jobId
|
|
349
|
-
)
|
|
350
|
-
await Fylo.queueRedis.deadLetterWriteJob(
|
|
351
|
-
streamId,
|
|
352
|
-
{
|
|
353
|
-
...job,
|
|
354
|
-
attempts,
|
|
355
|
-
status: 'dead-letter',
|
|
356
|
-
error: message
|
|
357
|
-
},
|
|
358
|
-
message
|
|
359
|
-
)
|
|
360
|
-
return false
|
|
361
|
-
}
|
|
362
|
-
|
|
363
|
-
const nextAttemptAt =
|
|
364
|
-
Date.now() + Fylo.WRITE_RETRY_BASE_MS * Math.max(1, 2 ** (attempts - 1))
|
|
365
|
-
|
|
366
|
-
await Fylo.queueRedis.setJobStatus(job.jobId, 'failed', {
|
|
367
|
-
error: message,
|
|
368
|
-
attempts,
|
|
369
|
-
nextAttemptAt
|
|
370
|
-
})
|
|
371
|
-
await Fylo.queueRedis.setDocStatus(job.collection, job.docId, 'failed', job.jobId)
|
|
372
|
-
return false
|
|
373
|
-
} finally {
|
|
374
|
-
await Fylo.queueRedis.releaseDocLock(job.collection, job.docId, job.jobId)
|
|
375
|
-
}
|
|
376
|
-
}
|
|
377
|
-
|
|
378
|
-
/**
|
|
379
|
-
* Imports data from a URL into a collection.
|
|
380
|
-
* @param collection The name of the collection.
|
|
381
|
-
* @param url The URL of the data to import.
|
|
382
|
-
* @param limit The maximum number of documents to import.
|
|
383
|
-
*/
|
|
384
|
-
async importBulkData<T extends Record<string, any>>(
|
|
385
|
-
collection: string,
|
|
386
|
-
url: URL,
|
|
387
|
-
limit?: number
|
|
388
|
-
) {
|
|
389
|
-
const res = await fetch(url)
|
|
390
|
-
|
|
391
|
-
if (!res.headers.get('content-type')?.includes('application/json'))
|
|
392
|
-
throw new Error('Response is not JSON')
|
|
393
|
-
|
|
394
|
-
let count = 0
|
|
395
|
-
let batchNum = 0
|
|
396
|
-
|
|
397
|
-
const flush = async (batch: T[]) => {
|
|
398
|
-
if (!batch.length) return
|
|
399
|
-
|
|
400
|
-
const items =
|
|
401
|
-
limit && count + batch.length > limit ? batch.slice(0, limit - count) : batch
|
|
402
|
-
|
|
403
|
-
batchNum++
|
|
404
|
-
|
|
405
|
-
const start = Date.now()
|
|
406
|
-
await this.batchPutData(collection, items)
|
|
407
|
-
count += items.length
|
|
408
|
-
|
|
409
|
-
if (count % 10000 === 0) console.log('Count:', count)
|
|
410
|
-
|
|
411
|
-
if (Fylo.LOGGING) {
|
|
412
|
-
const bytes = JSON.stringify(items).length
|
|
413
|
-
const elapsed = Date.now() - start
|
|
414
|
-
const bytesPerSec = (bytes / (elapsed / 1000)).toFixed(2)
|
|
415
|
-
console.log(
|
|
416
|
-
`Batch ${batchNum} of ${bytes} bytes took ${elapsed === Infinity ? 'Infinity' : elapsed}ms (${bytesPerSec} bytes/sec)`
|
|
417
|
-
)
|
|
418
|
-
}
|
|
419
|
-
}
|
|
420
|
-
|
|
421
|
-
// Detect format from the first byte of the body:
|
|
422
|
-
// 0x5b ('[') → JSON array: buffer the full body, then parse and process in slices.
|
|
423
|
-
// Otherwise → NDJSON stream: parse incrementally with Bun.JSONL.parseChunk, which
|
|
424
|
-
// accepts Uint8Array directly (zero-copy for ASCII) and tracks the split-line
|
|
425
|
-
// remainder internally via the returned `read` offset — no manual incomplete-
|
|
426
|
-
// line state machine needed.
|
|
427
|
-
let isJsonArray: boolean | null = null
|
|
428
|
-
const jsonArrayChunks: Uint8Array[] = []
|
|
429
|
-
let jsonArrayLength = 0
|
|
430
|
-
|
|
431
|
-
let pending = new Uint8Array(0)
|
|
432
|
-
let batch: T[] = []
|
|
433
|
-
|
|
434
|
-
for await (const chunk of res.body as unknown as AsyncIterable<Uint8Array>) {
|
|
435
|
-
if (isJsonArray === null) isJsonArray = chunk[0] === 0x5b
|
|
436
|
-
|
|
437
|
-
if (isJsonArray) {
|
|
438
|
-
jsonArrayChunks.push(chunk)
|
|
439
|
-
jsonArrayLength += chunk.length
|
|
440
|
-
continue
|
|
441
|
-
}
|
|
442
|
-
|
|
443
|
-
// Prepend any leftover bytes from the previous iteration (an unterminated line),
|
|
444
|
-
// then parse. `read` points past the last complete line; `pending` holds the rest.
|
|
445
|
-
const merged = new Uint8Array(pending.length + chunk.length)
|
|
446
|
-
merged.set(pending)
|
|
447
|
-
merged.set(chunk, pending.length)
|
|
448
|
-
|
|
449
|
-
const { values, read } = Bun.JSONL.parseChunk(merged)
|
|
450
|
-
pending = merged.subarray(read)
|
|
451
|
-
|
|
452
|
-
for (const item of values) {
|
|
453
|
-
batch.push(item as T)
|
|
454
|
-
if (batch.length === Fylo.MAX_CPUS) {
|
|
455
|
-
await flush(batch)
|
|
456
|
-
batch = []
|
|
457
|
-
if (limit && count >= limit) return count
|
|
458
|
-
}
|
|
459
|
-
}
|
|
460
|
-
}
|
|
461
|
-
|
|
462
|
-
if (isJsonArray) {
|
|
463
|
-
// Reassemble buffered chunks into a single Uint8Array and parse as JSON.
|
|
464
|
-
const body = new Uint8Array(jsonArrayLength)
|
|
465
|
-
let offset = 0
|
|
466
|
-
for (const c of jsonArrayChunks) {
|
|
467
|
-
body.set(c, offset)
|
|
468
|
-
offset += c.length
|
|
469
|
-
}
|
|
470
|
-
|
|
471
|
-
const data = JSON.parse(new TextDecoder().decode(body))
|
|
472
|
-
const items: T[] = Array.isArray(data) ? data : [data]
|
|
473
|
-
|
|
474
|
-
for (let i = 0; i < items.length; i += Fylo.MAX_CPUS) {
|
|
475
|
-
if (limit && count >= limit) break
|
|
476
|
-
await flush(items.slice(i, i + Fylo.MAX_CPUS))
|
|
477
|
-
}
|
|
478
|
-
} else {
|
|
479
|
-
// Flush the in-progress batch and any final line that had no trailing newline.
|
|
480
|
-
if (pending.length > 0) {
|
|
481
|
-
const { values } = Bun.JSONL.parseChunk(pending)
|
|
482
|
-
for (const item of values) batch.push(item as T)
|
|
483
|
-
}
|
|
484
|
-
|
|
485
|
-
if (batch.length > 0) await flush(batch)
|
|
486
|
-
}
|
|
487
|
-
|
|
488
|
-
return count
|
|
489
|
-
}
|
|
490
|
-
|
|
491
|
-
/**
|
|
492
|
-
* Exports data from a collection to a URL.
|
|
493
|
-
* @param collection The name of the collection.
|
|
494
|
-
* @returns The current data exported from the collection.
|
|
495
|
-
*/
|
|
496
|
-
static async *exportBulkData<T extends Record<string, any>>(collection: string) {
|
|
497
|
-
if (Fylo.defaultEngineKind() === 's3-files') {
|
|
498
|
-
yield* Fylo.defaultS3Files.exportBulkData<T>(collection)
|
|
499
|
-
return
|
|
500
|
-
}
|
|
501
|
-
|
|
502
|
-
// Kick off the first S3 list immediately so there is no idle time at the start.
|
|
503
|
-
let listPromise: Promise<Bun.S3ListObjectsResponse> | null = S3.list(collection, {
|
|
504
|
-
delimiter: '/'
|
|
505
|
-
})
|
|
506
|
-
|
|
507
|
-
while (listPromise !== null) {
|
|
508
|
-
const data: Bun.S3ListObjectsResponse = await listPromise
|
|
509
|
-
|
|
510
|
-
if (!data.commonPrefixes?.length) break
|
|
511
|
-
|
|
512
|
-
const ids = data.commonPrefixes
|
|
513
|
-
.map((item) => item.prefix!.split('/')[0]!)
|
|
514
|
-
.filter((key) => TTID.isTTID(key)) as _ttid[]
|
|
515
|
-
|
|
516
|
-
// Start fetching the next page immediately — before awaiting doc reads —
|
|
517
|
-
// so the S3 list round-trip overlaps with document reconstruction.
|
|
518
|
-
listPromise =
|
|
519
|
-
data.isTruncated && data.nextContinuationToken
|
|
520
|
-
? S3.list(collection, {
|
|
521
|
-
delimiter: '/',
|
|
522
|
-
continuationToken: data.nextContinuationToken
|
|
523
|
-
})
|
|
524
|
-
: null
|
|
525
|
-
|
|
526
|
-
const results = await Promise.allSettled(
|
|
527
|
-
ids.map((id) => this.getDoc<T>(collection, id).once())
|
|
528
|
-
)
|
|
529
|
-
|
|
530
|
-
for (const result of results) {
|
|
531
|
-
if (result.status === 'fulfilled') {
|
|
532
|
-
for (const id in result.value) yield result.value[id]
|
|
533
|
-
}
|
|
534
|
-
}
|
|
535
|
-
}
|
|
536
|
-
}
|
|
537
|
-
|
|
538
|
-
/**
|
|
539
|
-
* Gets a document from a collection.
|
|
540
|
-
* @param collection The name of the collection.
|
|
541
|
-
* @param _id The ID of the document.
|
|
542
|
-
* @param onlyId Whether to only return the ID of the document.
|
|
543
|
-
* @returns The document or the ID of the document.
|
|
544
|
-
*/
|
|
545
|
-
static getDoc<T extends Record<string, any>>(
|
|
546
|
-
collection: string,
|
|
547
|
-
_id: _ttid,
|
|
548
|
-
onlyId: boolean = false
|
|
549
|
-
) {
|
|
550
|
-
if (Fylo.defaultEngineKind() === 's3-files') {
|
|
551
|
-
return Fylo.defaultS3Files.getDoc<T>(collection, _id, onlyId)
|
|
552
|
-
}
|
|
553
|
-
|
|
554
|
-
return {
|
|
555
|
-
/**
|
|
556
|
-
* Async iterator (listener) for the document.
|
|
557
|
-
*/
|
|
558
|
-
async *[Symbol.asyncIterator]() {
|
|
559
|
-
const doc = await this.once()
|
|
560
|
-
|
|
561
|
-
if (Object.keys(doc).length > 0) yield doc
|
|
562
|
-
|
|
563
|
-
let finished = false
|
|
564
|
-
|
|
565
|
-
const iter = Dir.searchDocs<T>(
|
|
566
|
-
collection,
|
|
567
|
-
`**/${_id.split('-')[0]}*`,
|
|
568
|
-
{},
|
|
569
|
-
{ listen: true, skip: true }
|
|
570
|
-
)
|
|
571
|
-
|
|
572
|
-
do {
|
|
573
|
-
const { value, done } = await iter.next({ count: 0 })
|
|
574
|
-
|
|
575
|
-
if (value === undefined && !done) continue
|
|
576
|
-
|
|
577
|
-
if (done) {
|
|
578
|
-
finished = true
|
|
579
|
-
break
|
|
580
|
-
}
|
|
581
|
-
|
|
582
|
-
const doc = value as Record<_ttid, T>
|
|
583
|
-
|
|
584
|
-
const keys = Object.keys(doc)
|
|
585
|
-
|
|
586
|
-
if (onlyId && keys.length > 0) {
|
|
587
|
-
yield keys.shift()!
|
|
588
|
-
continue
|
|
589
|
-
} else if (keys.length > 0) {
|
|
590
|
-
yield doc
|
|
591
|
-
continue
|
|
592
|
-
}
|
|
593
|
-
} while (!finished)
|
|
594
|
-
},
|
|
595
|
-
|
|
596
|
-
/**
|
|
597
|
-
* Gets the document once.
|
|
598
|
-
*/
|
|
599
|
-
async once() {
|
|
600
|
-
const items = await Walker.getDocData(collection, _id)
|
|
601
|
-
|
|
602
|
-
if (items.length === 0) return {}
|
|
603
|
-
|
|
604
|
-
const data = await Dir.reconstructData(collection, items)
|
|
605
|
-
|
|
606
|
-
return { [_id]: data } as Record<_ttid, T>
|
|
607
|
-
},
|
|
608
|
-
|
|
609
|
-
/**
|
|
610
|
-
* Async iterator (listener) for the document's deletion.
|
|
611
|
-
*/
|
|
612
|
-
async *onDelete() {
|
|
613
|
-
let finished = false
|
|
614
|
-
|
|
615
|
-
const iter = Dir.searchDocs<T>(
|
|
616
|
-
collection,
|
|
617
|
-
`**/${_id.split('-')[0]}*`,
|
|
618
|
-
{},
|
|
619
|
-
{ listen: true, skip: true },
|
|
620
|
-
true
|
|
621
|
-
)
|
|
622
|
-
|
|
623
|
-
do {
|
|
624
|
-
const { value, done } = await iter.next({ count: 0 })
|
|
625
|
-
|
|
626
|
-
if (value === undefined && !done) continue
|
|
627
|
-
|
|
628
|
-
if (done) {
|
|
629
|
-
finished = true
|
|
630
|
-
break
|
|
631
|
-
}
|
|
632
|
-
|
|
633
|
-
yield value as _ttid
|
|
634
|
-
} while (!finished)
|
|
635
|
-
}
|
|
636
|
-
}
|
|
637
|
-
}
|
|
638
|
-
|
|
639
|
-
/**
|
|
640
|
-
* Puts multiple documents into a collection.
|
|
641
|
-
* @param collection The name of the collection.
|
|
642
|
-
* @param batch The documents to put.
|
|
643
|
-
* @returns The IDs of the documents.
|
|
644
|
-
*/
|
|
645
|
-
async batchPutData<T extends Record<string, any>>(collection: string, batch: Array<T>) {
|
|
646
|
-
const batches: Array<Array<T>> = []
|
|
647
|
-
const ids: _ttid[] = []
|
|
648
|
-
|
|
649
|
-
if (batch.length > navigator.hardwareConcurrency) {
|
|
650
|
-
for (let i = 0; i < batch.length; i += navigator.hardwareConcurrency) {
|
|
651
|
-
batches.push(batch.slice(i, i + navigator.hardwareConcurrency))
|
|
652
|
-
}
|
|
653
|
-
} else batches.push(batch)
|
|
654
|
-
|
|
655
|
-
for (const batch of batches) {
|
|
656
|
-
const res = await Promise.allSettled(
|
|
657
|
-
batch.map((data) => this.putData(collection, data))
|
|
658
|
-
)
|
|
659
|
-
|
|
660
|
-
for (const _id of res
|
|
661
|
-
.filter((item) => item.status === 'fulfilled')
|
|
662
|
-
.map((item) => item.value)) {
|
|
663
|
-
ids.push(_id)
|
|
664
|
-
}
|
|
665
|
-
}
|
|
666
|
-
|
|
667
|
-
return ids
|
|
668
|
-
}
|
|
669
|
-
|
|
670
|
-
async queuePutData<T extends Record<string, any>>(
|
|
671
|
-
collection: string,
|
|
672
|
-
data: Record<_ttid, T> | T
|
|
673
|
-
): Promise<QueuedWriteResult> {
|
|
674
|
-
if (this.engineKind === 's3-files')
|
|
675
|
-
throw new Error('queuePutData is not supported in s3-files engine')
|
|
676
|
-
|
|
677
|
-
const { _id, doc } = await this.prepareInsert(collection, data)
|
|
678
|
-
const job = WriteQueue.createInsertJob(collection, _id, doc)
|
|
679
|
-
|
|
680
|
-
await Fylo.queueRedis.enqueueWrite(job)
|
|
681
|
-
|
|
682
|
-
return {
|
|
683
|
-
jobId: job.jobId,
|
|
684
|
-
docId: _id,
|
|
685
|
-
status: 'queued'
|
|
686
|
-
}
|
|
687
|
-
}
|
|
688
|
-
|
|
689
|
-
async queuePatchDoc<T extends Record<string, any>>(
|
|
690
|
-
collection: string,
|
|
691
|
-
newDoc: Record<_ttid, Partial<T>>,
|
|
692
|
-
oldDoc: Record<_ttid, T> = {}
|
|
693
|
-
): Promise<QueuedWriteResult> {
|
|
694
|
-
if (this.engineKind === 's3-files')
|
|
695
|
-
throw new Error('queuePatchDoc is not supported in s3-files engine')
|
|
696
|
-
|
|
697
|
-
const docId = Object.keys(newDoc).shift() as _ttid
|
|
698
|
-
|
|
699
|
-
if (!docId) throw new Error('this document does not contain an TTID')
|
|
700
|
-
|
|
701
|
-
const job = WriteQueue.createUpdateJob(collection, docId, { newDoc, oldDoc })
|
|
702
|
-
|
|
703
|
-
await Fylo.queueRedis.enqueueWrite(job)
|
|
704
|
-
|
|
705
|
-
return {
|
|
706
|
-
jobId: job.jobId,
|
|
707
|
-
docId,
|
|
708
|
-
status: 'queued'
|
|
709
|
-
}
|
|
710
|
-
}
|
|
711
|
-
|
|
712
|
-
async queueDelDoc(collection: string, _id: _ttid): Promise<QueuedWriteResult> {
|
|
713
|
-
if (this.engineKind === 's3-files')
|
|
714
|
-
throw new Error('queueDelDoc is not supported in s3-files engine')
|
|
715
|
-
|
|
716
|
-
const job = WriteQueue.createDeleteJob(collection, _id)
|
|
717
|
-
|
|
718
|
-
await Fylo.queueRedis.enqueueWrite(job)
|
|
719
|
-
|
|
720
|
-
return {
|
|
721
|
-
jobId: job.jobId,
|
|
722
|
-
docId: _id,
|
|
723
|
-
status: 'queued'
|
|
724
|
-
}
|
|
725
|
-
}
|
|
726
|
-
|
|
727
|
-
/**
|
|
728
|
-
* Puts a document into a collection.
|
|
729
|
-
* @param collection The name of the collection.
|
|
730
|
-
* @param data The document to put.
|
|
731
|
-
* @returns The ID of the document.
|
|
732
|
-
*/
|
|
733
|
-
private static async uniqueTTID(
|
|
734
|
-
existingId?: string,
|
|
735
|
-
claimInRedis: boolean = true
|
|
736
|
-
): Promise<_ttid> {
|
|
737
|
-
// Serialize TTID generation so concurrent callers (e.g. batchPutData)
|
|
738
|
-
// never invoke TTID.generate() at the same sub-millisecond instant.
|
|
739
|
-
let _id!: _ttid
|
|
740
|
-
const prev = Fylo.ttidLock
|
|
741
|
-
Fylo.ttidLock = prev.then(async () => {
|
|
742
|
-
_id = existingId ? TTID.generate(existingId) : TTID.generate()
|
|
743
|
-
// Claim in Redis for cross-process uniqueness (no-op if Redis unavailable)
|
|
744
|
-
if (claimInRedis && !(await Dir.claimTTID(_id)))
|
|
745
|
-
throw new Error('TTID collision — retry')
|
|
746
|
-
})
|
|
747
|
-
await Fylo.ttidLock
|
|
748
|
-
|
|
749
|
-
return _id
|
|
750
|
-
}
|
|
751
|
-
|
|
752
|
-
private async prepareInsert<T extends Record<string, any>>(
|
|
753
|
-
collection: string,
|
|
754
|
-
data: Record<_ttid, T> | T
|
|
755
|
-
) {
|
|
756
|
-
await Fylo.loadEncryption(collection)
|
|
757
|
-
|
|
758
|
-
const currId = Object.keys(data).shift()!
|
|
759
|
-
const claimInRedis = this.engineKind !== 's3-files'
|
|
760
|
-
const _id = TTID.isTTID(currId)
|
|
761
|
-
? await Fylo.uniqueTTID(currId, claimInRedis)
|
|
762
|
-
: await Fylo.uniqueTTID(undefined, claimInRedis)
|
|
763
|
-
|
|
764
|
-
let doc = TTID.isTTID(currId) ? (Object.values(data).shift() as T) : (data as T)
|
|
765
|
-
|
|
766
|
-
if (Fylo.STRICT) doc = (await Gen.validateData(collection, doc)) as T
|
|
767
|
-
|
|
768
|
-
return { _id, doc }
|
|
769
|
-
}
|
|
770
|
-
|
|
771
|
-
private async executePutDataDirect<T extends Record<string, any>>(
|
|
772
|
-
collection: string,
|
|
773
|
-
_id: _ttid,
|
|
774
|
-
doc: T
|
|
775
|
-
) {
|
|
776
|
-
if (this.engineKind === 's3-files') {
|
|
777
|
-
await this.assertS3FilesEngine().putDocument(collection, _id, doc)
|
|
778
|
-
return _id
|
|
779
|
-
}
|
|
780
|
-
|
|
781
|
-
const keys = await Dir.extractKeys(collection, _id, doc)
|
|
782
|
-
|
|
783
|
-
const results = await Promise.allSettled(
|
|
784
|
-
keys.data.map((item, i) =>
|
|
785
|
-
this.dir.putKeys(collection, { dataKey: item, indexKey: keys.indexes[i] })
|
|
786
|
-
)
|
|
787
|
-
)
|
|
788
|
-
|
|
789
|
-
if (results.some((res) => res.status === 'rejected')) {
|
|
790
|
-
await this.dir.executeRollback()
|
|
791
|
-
throw new Error(`Unable to write to ${collection} collection`)
|
|
792
|
-
}
|
|
793
|
-
|
|
794
|
-
if (Fylo.LOGGING) console.log(`Finished Writing ${_id}`)
|
|
795
|
-
|
|
796
|
-
return _id
|
|
797
|
-
}
|
|
798
|
-
|
|
799
|
-
private async executePatchDocDirect<T extends Record<string, any>>(
|
|
800
|
-
collection: string,
|
|
801
|
-
newDoc: Record<_ttid, Partial<T>>,
|
|
802
|
-
oldDoc: Record<_ttid, T> = {}
|
|
803
|
-
) {
|
|
804
|
-
await Fylo.loadEncryption(collection)
|
|
805
|
-
|
|
806
|
-
const _id = Object.keys(newDoc).shift() as _ttid
|
|
807
|
-
|
|
808
|
-
let _newId = _id
|
|
809
|
-
|
|
810
|
-
if (!_id) throw new Error('this document does not contain an TTID')
|
|
811
|
-
|
|
812
|
-
if (this.engineKind === 's3-files') {
|
|
813
|
-
let existingDoc = oldDoc[_id]
|
|
814
|
-
if (!existingDoc) {
|
|
815
|
-
const existing = await this.assertS3FilesEngine().getDoc<T>(collection, _id).once()
|
|
816
|
-
existingDoc = existing[_id]
|
|
817
|
-
}
|
|
818
|
-
if (!existingDoc) return _id
|
|
819
|
-
|
|
820
|
-
const currData = { ...existingDoc, ...newDoc[_id] } as T
|
|
821
|
-
let docToWrite: T = currData
|
|
822
|
-
_newId = await Fylo.uniqueTTID(_id, false)
|
|
823
|
-
if (Fylo.STRICT) docToWrite = (await Gen.validateData(collection, currData)) as T
|
|
824
|
-
return await this.assertS3FilesEngine().patchDocument(
|
|
825
|
-
collection,
|
|
826
|
-
_id,
|
|
827
|
-
_newId,
|
|
828
|
-
docToWrite,
|
|
829
|
-
existingDoc
|
|
830
|
-
)
|
|
831
|
-
}
|
|
832
|
-
|
|
833
|
-
const dataKeys = await Walker.getDocData(collection, _id)
|
|
834
|
-
|
|
835
|
-
if (dataKeys.length === 0) return _newId
|
|
836
|
-
|
|
837
|
-
if (Object.keys(oldDoc).length === 0) {
|
|
838
|
-
const data = await Dir.reconstructData(collection, dataKeys)
|
|
839
|
-
|
|
840
|
-
oldDoc = { [_id]: data } as Record<_ttid, T>
|
|
841
|
-
}
|
|
842
|
-
|
|
843
|
-
if (Object.keys(oldDoc).length === 0) return _newId
|
|
844
|
-
|
|
845
|
-
const currData = { ...oldDoc[_id] }
|
|
846
|
-
|
|
847
|
-
for (const field in newDoc[_id]) currData[field] = newDoc[_id][field]!
|
|
848
|
-
|
|
849
|
-
_newId = await Fylo.uniqueTTID(_id, this.engineKind === 'legacy-s3')
|
|
850
|
-
|
|
851
|
-
let docToWrite: T = currData as T
|
|
852
|
-
|
|
853
|
-
if (Fylo.STRICT) docToWrite = (await Gen.validateData(collection, currData)) as T
|
|
854
|
-
|
|
855
|
-
const newKeys = await Dir.extractKeys(collection, _newId, docToWrite)
|
|
856
|
-
|
|
857
|
-
const [deleteResults, putResults] = await Promise.all([
|
|
858
|
-
Promise.allSettled(dataKeys.map((key) => this.dir.deleteKeys(collection, key))),
|
|
859
|
-
Promise.allSettled(
|
|
860
|
-
newKeys.data.map((item, i) =>
|
|
861
|
-
this.dir.putKeys(collection, { dataKey: item, indexKey: newKeys.indexes[i] })
|
|
862
|
-
)
|
|
863
|
-
)
|
|
864
|
-
])
|
|
865
|
-
|
|
866
|
-
if (
|
|
867
|
-
deleteResults.some((r) => r.status === 'rejected') ||
|
|
868
|
-
putResults.some((r) => r.status === 'rejected')
|
|
869
|
-
) {
|
|
870
|
-
await this.dir.executeRollback()
|
|
871
|
-
throw new Error(`Unable to update ${collection} collection`)
|
|
872
|
-
}
|
|
873
|
-
|
|
874
|
-
if (Fylo.LOGGING) console.log(`Finished Updating ${_id} to ${_newId}`)
|
|
875
|
-
|
|
876
|
-
return _newId
|
|
877
|
-
}
|
|
878
|
-
|
|
879
|
-
private async executeDelDocDirect(collection: string, _id: _ttid) {
|
|
880
|
-
if (this.engineKind === 's3-files') {
|
|
881
|
-
await this.assertS3FilesEngine().deleteDocument(collection, _id)
|
|
882
|
-
return
|
|
883
|
-
}
|
|
884
|
-
|
|
885
|
-
const keys = await Walker.getDocData(collection, _id)
|
|
886
|
-
|
|
887
|
-
const results = await Promise.allSettled(
|
|
888
|
-
keys.map((key) => this.dir.deleteKeys(collection, key))
|
|
889
|
-
)
|
|
890
|
-
|
|
891
|
-
if (results.some((res) => res.status === 'rejected')) {
|
|
892
|
-
await this.dir.executeRollback()
|
|
893
|
-
throw new Error(`Unable to delete from ${collection} collection`)
|
|
894
|
-
}
|
|
895
|
-
|
|
896
|
-
if (Fylo.LOGGING) console.log(`Finished Deleting ${_id}`)
|
|
897
|
-
}
|
|
898
|
-
|
|
899
|
-
async putData<T extends Record<string, any>>(collection: string, data: T): Promise<_ttid>
|
|
900
|
-
async putData<T extends Record<string, any>>(
|
|
901
|
-
collection: string,
|
|
902
|
-
data: Record<_ttid, T>
|
|
903
|
-
): Promise<_ttid>
|
|
904
|
-
async putData<T extends Record<string, any>>(
|
|
905
|
-
collection: string,
|
|
906
|
-
data: T,
|
|
907
|
-
options: { wait?: true; timeoutMs?: number }
|
|
908
|
-
): Promise<_ttid>
|
|
909
|
-
async putData<T extends Record<string, any>>(
|
|
910
|
-
collection: string,
|
|
911
|
-
data: Record<_ttid, T>,
|
|
912
|
-
options: { wait?: true; timeoutMs?: number }
|
|
913
|
-
): Promise<_ttid>
|
|
914
|
-
async putData<T extends Record<string, any>>(
|
|
915
|
-
collection: string,
|
|
916
|
-
data: T,
|
|
917
|
-
options: { wait: false; timeoutMs?: number }
|
|
918
|
-
): Promise<QueuedWriteResult>
|
|
919
|
-
async putData<T extends Record<string, any>>(
|
|
920
|
-
collection: string,
|
|
921
|
-
data: Record<_ttid, T>,
|
|
922
|
-
options: { wait: false; timeoutMs?: number }
|
|
923
|
-
): Promise<QueuedWriteResult>
|
|
924
|
-
async putData<T extends Record<string, any>>(
|
|
925
|
-
collection: string,
|
|
926
|
-
data: Record<_ttid, T> | T,
|
|
927
|
-
options: { wait?: boolean; timeoutMs?: number } = {}
|
|
928
|
-
): Promise<_ttid | QueuedWriteResult> {
|
|
929
|
-
if (this.engineKind === 's3-files') {
|
|
930
|
-
if (options.wait === false)
|
|
931
|
-
throw new Error('wait:false is not supported in s3-files engine')
|
|
932
|
-
const { _id, doc } = await this.prepareInsert(collection, data)
|
|
933
|
-
await this.executePutDataDirect(collection, _id, doc)
|
|
934
|
-
return _id
|
|
935
|
-
}
|
|
936
|
-
|
|
937
|
-
const queued = await this.queuePutData(collection, data)
|
|
938
|
-
|
|
939
|
-
return await this.runQueuedJob(queued, options, async () => queued.docId)
|
|
940
|
-
}
|
|
941
|
-
|
|
942
|
-
async executeQueuedWrite(job: WriteJob) {
|
|
943
|
-
switch (job.operation) {
|
|
944
|
-
case 'insert':
|
|
945
|
-
await Fylo.loadEncryption(job.collection)
|
|
946
|
-
return await this.executePutDataDirect(job.collection, job.docId, job.payload)
|
|
947
|
-
case 'update':
|
|
948
|
-
return await this.executePatchDocDirect(
|
|
949
|
-
job.collection,
|
|
950
|
-
job.payload.newDoc as Record<_ttid, Partial<Record<string, any>>>,
|
|
951
|
-
job.payload.oldDoc as Record<_ttid, Record<string, any>> | undefined
|
|
952
|
-
)
|
|
953
|
-
case 'delete':
|
|
954
|
-
return await this.executeDelDocDirect(job.collection, job.payload._id as _ttid)
|
|
955
|
-
default:
|
|
956
|
-
throw new Error(`Unsupported queued write operation: ${job.operation}`)
|
|
957
|
-
}
|
|
958
|
-
}
|
|
959
|
-
|
|
960
|
-
/**
|
|
961
|
-
* Patches a document in a collection.
|
|
962
|
-
* @param collection The name of the collection.
|
|
963
|
-
* @param newDoc The new document data.
|
|
964
|
-
* @param oldDoc The old document data.
|
|
965
|
-
* @returns The number of documents patched.
|
|
966
|
-
*/
|
|
967
|
-
async patchDoc<T extends Record<string, any>>(
|
|
968
|
-
collection: string,
|
|
969
|
-
newDoc: Record<_ttid, Partial<T>>,
|
|
970
|
-
oldDoc?: Record<_ttid, T>
|
|
971
|
-
): Promise<_ttid>
|
|
972
|
-
async patchDoc<T extends Record<string, any>>(
|
|
973
|
-
collection: string,
|
|
974
|
-
newDoc: Record<_ttid, Partial<T>>,
|
|
975
|
-
oldDoc: Record<_ttid, T> | undefined,
|
|
976
|
-
options: { wait?: true; timeoutMs?: number }
|
|
977
|
-
): Promise<_ttid>
|
|
978
|
-
async patchDoc<T extends Record<string, any>>(
|
|
979
|
-
collection: string,
|
|
980
|
-
newDoc: Record<_ttid, Partial<T>>,
|
|
981
|
-
oldDoc: Record<_ttid, T> | undefined,
|
|
982
|
-
options: { wait: false; timeoutMs?: number }
|
|
983
|
-
): Promise<QueuedWriteResult>
|
|
984
|
-
async patchDoc<T extends Record<string, any>>(
|
|
985
|
-
collection: string,
|
|
986
|
-
newDoc: Record<_ttid, Partial<T>>,
|
|
987
|
-
oldDoc: Record<_ttid, T> = {},
|
|
988
|
-
options: { wait?: boolean; timeoutMs?: number } = {}
|
|
989
|
-
): Promise<_ttid | QueuedWriteResult> {
|
|
990
|
-
if (this.engineKind === 's3-files') {
|
|
991
|
-
if (options.wait === false)
|
|
992
|
-
throw new Error('wait:false is not supported in s3-files engine')
|
|
993
|
-
const _id = await this.executePatchDocDirect(collection, newDoc, oldDoc)
|
|
994
|
-
return _id
|
|
995
|
-
}
|
|
996
|
-
const queued = await this.queuePatchDoc(collection, newDoc, oldDoc)
|
|
997
|
-
|
|
998
|
-
return await this.runQueuedJob(queued, options, async () => {
|
|
999
|
-
const job = await this.getJobStatus(queued.jobId)
|
|
1000
|
-
return (job?.docId ?? queued.docId) as _ttid
|
|
1001
|
-
})
|
|
1002
|
-
}
|
|
1003
|
-
|
|
1004
|
-
/**
|
|
1005
|
-
* Patches documents in a collection.
|
|
1006
|
-
* @param collection The name of the collection.
|
|
1007
|
-
* @param updateSchema The update schema.
|
|
1008
|
-
* @returns The number of documents patched.
|
|
1009
|
-
*/
|
|
1010
|
-
async patchDocs<T extends Record<string, any>>(
|
|
1011
|
-
collection: string,
|
|
1012
|
-
updateSchema: _storeUpdate<T>
|
|
1013
|
-
) {
|
|
1014
|
-
await Fylo.loadEncryption(collection)
|
|
1015
|
-
|
|
1016
|
-
const processDoc = (doc: Record<_ttid, T>, updateSchema: _storeUpdate<T>) => {
|
|
1017
|
-
for (const _id in doc)
|
|
1018
|
-
return this.patchDoc(collection, { [_id]: updateSchema.$set }, doc)
|
|
1019
|
-
|
|
1020
|
-
return
|
|
1021
|
-
}
|
|
1022
|
-
|
|
1023
|
-
let count = 0
|
|
1024
|
-
|
|
1025
|
-
const promises: Promise<_ttid>[] = []
|
|
1026
|
-
|
|
1027
|
-
if (this.engineKind === 's3-files') {
|
|
1028
|
-
for await (const value of this.findDocs<T>(collection, updateSchema.$where).collect()) {
|
|
1029
|
-
if (typeof value === 'object' && value !== null && !Array.isArray(value)) {
|
|
1030
|
-
const promise = processDoc(value as Record<_ttid, T>, updateSchema)
|
|
1031
|
-
if (promise) {
|
|
1032
|
-
promises.push(promise)
|
|
1033
|
-
count++
|
|
1034
|
-
}
|
|
1035
|
-
}
|
|
1036
|
-
}
|
|
1037
|
-
|
|
1038
|
-
await Promise.all(promises)
|
|
1039
|
-
return count
|
|
1040
|
-
}
|
|
1041
|
-
|
|
1042
|
-
let finished = false
|
|
1043
|
-
|
|
1044
|
-
const exprs = await Query.getExprs(collection, updateSchema.$where ?? {})
|
|
1045
|
-
|
|
1046
|
-
if (exprs.length === 1 && exprs[0] === `**/*`) {
|
|
1047
|
-
for (const doc of await Fylo.allDocs<T>(collection, updateSchema.$where)) {
|
|
1048
|
-
const promise = processDoc(doc, updateSchema)
|
|
1049
|
-
|
|
1050
|
-
if (promise) {
|
|
1051
|
-
promises.push(promise)
|
|
1052
|
-
count++
|
|
1053
|
-
}
|
|
1054
|
-
}
|
|
1055
|
-
} else {
|
|
1056
|
-
const iter = Dir.searchDocs<T>(
|
|
1057
|
-
collection,
|
|
1058
|
-
exprs,
|
|
1059
|
-
{
|
|
1060
|
-
updated: updateSchema?.$where?.$updated,
|
|
1061
|
-
created: updateSchema?.$where?.$created
|
|
1062
|
-
},
|
|
1063
|
-
{ listen: false, skip: false }
|
|
1064
|
-
)
|
|
1065
|
-
|
|
1066
|
-
do {
|
|
1067
|
-
const { value, done } = await iter.next({ count })
|
|
1068
|
-
|
|
1069
|
-
if (value === undefined && !done) continue
|
|
1070
|
-
|
|
1071
|
-
if (done) {
|
|
1072
|
-
finished = true
|
|
1073
|
-
break
|
|
1074
|
-
}
|
|
1075
|
-
|
|
1076
|
-
const promise = processDoc(value as Record<_ttid, T>, updateSchema)
|
|
1077
|
-
|
|
1078
|
-
if (promise) {
|
|
1079
|
-
promises.push(promise)
|
|
1080
|
-
count++
|
|
1081
|
-
}
|
|
1082
|
-
} while (!finished)
|
|
1083
|
-
}
|
|
1084
|
-
|
|
1085
|
-
await Promise.all(promises)
|
|
1086
|
-
|
|
1087
|
-
return count
|
|
1088
|
-
}
|
|
1089
|
-
|
|
1090
|
-
/**
|
|
1091
|
-
* Deletes a document from a collection.
|
|
1092
|
-
* @param collection The name of the collection.
|
|
1093
|
-
* @param _id The ID of the document.
|
|
1094
|
-
* @returns The number of documents deleted.
|
|
1095
|
-
*/
|
|
1096
|
-
async delDoc(collection: string, _id: _ttid): Promise<void>
|
|
1097
|
-
async delDoc(
|
|
1098
|
-
collection: string,
|
|
1099
|
-
_id: _ttid,
|
|
1100
|
-
options: { wait?: true; timeoutMs?: number }
|
|
1101
|
-
): Promise<void>
|
|
1102
|
-
async delDoc(
|
|
1103
|
-
collection: string,
|
|
1104
|
-
_id: _ttid,
|
|
1105
|
-
options: { wait: false; timeoutMs?: number }
|
|
1106
|
-
): Promise<QueuedWriteResult>
|
|
1107
|
-
async delDoc(
|
|
1108
|
-
collection: string,
|
|
1109
|
-
_id: _ttid,
|
|
1110
|
-
options: { wait?: boolean; timeoutMs?: number } = {}
|
|
1111
|
-
): Promise<void | QueuedWriteResult> {
|
|
1112
|
-
if (this.engineKind === 's3-files') {
|
|
1113
|
-
if (options.wait === false)
|
|
1114
|
-
throw new Error('wait:false is not supported in s3-files engine')
|
|
1115
|
-
await this.executeDelDocDirect(collection, _id)
|
|
1116
|
-
return
|
|
1117
|
-
}
|
|
1118
|
-
const queued = await this.queueDelDoc(collection, _id)
|
|
1119
|
-
|
|
1120
|
-
await this.runQueuedJob(queued, options, async () => undefined)
|
|
1121
|
-
}
|
|
1122
|
-
|
|
1123
|
-
/**
|
|
1124
|
-
* Deletes documents from a collection.
|
|
1125
|
-
* @param collection The name of the collection.
|
|
1126
|
-
* @param deleteSchema The delete schema.
|
|
1127
|
-
* @returns The number of documents deleted.
|
|
1128
|
-
*/
|
|
1129
|
-
async delDocs<T extends Record<string, any>>(
|
|
1130
|
-
collection: string,
|
|
1131
|
-
deleteSchema?: _storeDelete<T>
|
|
1132
|
-
) {
|
|
1133
|
-
await Fylo.loadEncryption(collection)
|
|
1134
|
-
|
|
1135
|
-
const processDoc = (doc: Record<_ttid, T>) => {
|
|
1136
|
-
for (const _id in doc) {
|
|
1137
|
-
if (TTID.isTTID(_id)) {
|
|
1138
|
-
return this.delDoc(collection, _id)
|
|
1139
|
-
}
|
|
1140
|
-
}
|
|
1141
|
-
|
|
1142
|
-
return
|
|
1143
|
-
}
|
|
1144
|
-
|
|
1145
|
-
let count = 0
|
|
1146
|
-
|
|
1147
|
-
const promises: Promise<void>[] = []
|
|
1148
|
-
|
|
1149
|
-
if (this.engineKind === 's3-files') {
|
|
1150
|
-
for await (const value of this.findDocs<T>(collection, deleteSchema).collect()) {
|
|
1151
|
-
if (typeof value === 'object' && value !== null && !Array.isArray(value)) {
|
|
1152
|
-
const promise = processDoc(value as Record<_ttid, T>)
|
|
1153
|
-
if (promise) {
|
|
1154
|
-
promises.push(promise)
|
|
1155
|
-
count++
|
|
1156
|
-
}
|
|
1157
|
-
}
|
|
1158
|
-
}
|
|
1159
|
-
|
|
1160
|
-
await Promise.all(promises)
|
|
1161
|
-
return count
|
|
1162
|
-
}
|
|
1163
|
-
|
|
1164
|
-
let finished = false
|
|
1165
|
-
|
|
1166
|
-
const exprs = await Query.getExprs(collection, deleteSchema ?? {})
|
|
1167
|
-
|
|
1168
|
-
if (exprs.length === 1 && exprs[0] === `**/*`) {
|
|
1169
|
-
for (const doc of await Fylo.allDocs<T>(collection, deleteSchema)) {
|
|
1170
|
-
const promise = processDoc(doc)
|
|
1171
|
-
|
|
1172
|
-
if (promise) {
|
|
1173
|
-
promises.push(promise)
|
|
1174
|
-
count++
|
|
1175
|
-
}
|
|
1176
|
-
}
|
|
1177
|
-
} else {
|
|
1178
|
-
const iter = Dir.searchDocs<T>(
|
|
1179
|
-
collection,
|
|
1180
|
-
exprs,
|
|
1181
|
-
{ updated: deleteSchema?.$updated, created: deleteSchema?.$created },
|
|
1182
|
-
{ listen: false, skip: false }
|
|
1183
|
-
)
|
|
1184
|
-
|
|
1185
|
-
do {
|
|
1186
|
-
const { value, done } = await iter.next({ count })
|
|
1187
|
-
|
|
1188
|
-
if (value === undefined && !done) continue
|
|
1189
|
-
|
|
1190
|
-
if (done) {
|
|
1191
|
-
finished = true
|
|
1192
|
-
break
|
|
1193
|
-
}
|
|
1194
|
-
|
|
1195
|
-
const promise = processDoc(value as Record<_ttid, T>)
|
|
1196
|
-
|
|
1197
|
-
if (promise) {
|
|
1198
|
-
promises.push(promise)
|
|
1199
|
-
count++
|
|
1200
|
-
}
|
|
1201
|
-
} while (!finished)
|
|
1202
|
-
}
|
|
1203
|
-
|
|
1204
|
-
await Promise.all(promises)
|
|
1205
|
-
|
|
1206
|
-
return count
|
|
1207
|
-
}
|
|
1208
|
-
|
|
1209
|
-
private static selectValues<T extends Record<string, any>>(selection: Array<keyof T>, data: T) {
|
|
1210
|
-
for (const field in data) {
|
|
1211
|
-
if (!selection.includes(field as keyof T)) delete data[field]
|
|
1212
|
-
}
|
|
1213
|
-
|
|
1214
|
-
return data
|
|
1215
|
-
}
|
|
1216
|
-
|
|
1217
|
-
private static renameFields<T extends Record<string, any>>(
|
|
1218
|
-
rename: Record<keyof T, string>,
|
|
1219
|
-
data: T
|
|
1220
|
-
) {
|
|
1221
|
-
for (const field in data) {
|
|
1222
|
-
if (rename[field]) {
|
|
1223
|
-
data[rename[field]] = data[field]
|
|
1224
|
-
delete data[field]
|
|
1225
|
-
}
|
|
1226
|
-
}
|
|
1227
|
-
|
|
1228
|
-
return data
|
|
1229
|
-
}
|
|
1230
|
-
|
|
1231
|
-
/**
|
|
1232
|
-
* Joins documents from two collections.
|
|
1233
|
-
* @param join The join schema.
|
|
1234
|
-
* @returns The joined documents.
|
|
1235
|
-
*/
|
|
1236
|
-
static async joinDocs<T extends Record<string, any>, U extends Record<string, any>>(
|
|
1237
|
-
join: _join<T, U>
|
|
1238
|
-
) {
|
|
1239
|
-
if (Fylo.defaultEngineKind() === 's3-files') {
|
|
1240
|
-
return await Fylo.defaultS3Files.joinDocs(join)
|
|
1241
|
-
}
|
|
1242
|
-
|
|
1243
|
-
const docs: Record<`${_ttid}, ${_ttid}`, T | U | (T & U) | (Partial<T> & Partial<U>)> = {}
|
|
1244
|
-
|
|
1245
|
-
const compareFields = async (
|
|
1246
|
-
leftField: keyof T,
|
|
1247
|
-
rightField: keyof U,
|
|
1248
|
-
compare: (leftVal: string, rightVal: string) => boolean
|
|
1249
|
-
) => {
|
|
1250
|
-
if (join.$leftCollection === join.$rightCollection)
|
|
1251
|
-
throw new Error('Left and right collections cannot be the same')
|
|
1252
|
-
|
|
1253
|
-
let leftToken: string | undefined
|
|
1254
|
-
const leftFieldIndexes: string[] = []
|
|
1255
|
-
|
|
1256
|
-
do {
|
|
1257
|
-
const leftData = await S3.list(join.$leftCollection, {
|
|
1258
|
-
prefix: String(leftField)
|
|
1259
|
-
})
|
|
1260
|
-
|
|
1261
|
-
if (!leftData.contents) break
|
|
1262
|
-
|
|
1263
|
-
leftFieldIndexes.push(...leftData.contents!.map((content) => content.key!))
|
|
1264
|
-
|
|
1265
|
-
leftToken = leftData.nextContinuationToken
|
|
1266
|
-
} while (leftToken !== undefined)
|
|
1267
|
-
|
|
1268
|
-
let rightToken: string | undefined
|
|
1269
|
-
const rightFieldIndexes: string[] = []
|
|
1270
|
-
|
|
1271
|
-
do {
|
|
1272
|
-
const rightData = await S3.list(join.$rightCollection, {
|
|
1273
|
-
prefix: String(rightField)
|
|
1274
|
-
})
|
|
1275
|
-
|
|
1276
|
-
if (!rightData.contents) break
|
|
1277
|
-
|
|
1278
|
-
rightFieldIndexes.push(...rightData.contents!.map((content) => content.key!))
|
|
1279
|
-
|
|
1280
|
-
rightToken = rightData.nextContinuationToken
|
|
1281
|
-
} while (rightToken !== undefined)
|
|
1282
|
-
|
|
1283
|
-
for (const leftIdx of leftFieldIndexes) {
|
|
1284
|
-
const leftSegs = leftIdx.split('/')
|
|
1285
|
-
const left_id = leftSegs.pop()! as _ttid
|
|
1286
|
-
const leftVal = leftSegs.pop()!
|
|
1287
|
-
|
|
1288
|
-
const leftCollection = join.$leftCollection
|
|
1289
|
-
|
|
1290
|
-
const allVals = new Set<string>()
|
|
1291
|
-
|
|
1292
|
-
for (const rightIdx of rightFieldIndexes) {
|
|
1293
|
-
const rightSegs = rightIdx.split('/')
|
|
1294
|
-
const right_id = rightSegs.pop()! as _ttid
|
|
1295
|
-
const rightVal = rightSegs.pop()!
|
|
1296
|
-
|
|
1297
|
-
const rightCollection = join.$rightCollection
|
|
1298
|
-
|
|
1299
|
-
if (compare(rightVal, leftVal) && !allVals.has(rightVal)) {
|
|
1300
|
-
allVals.add(rightVal)
|
|
1301
|
-
|
|
1302
|
-
switch (join.$mode) {
|
|
1303
|
-
case 'inner':
|
|
1304
|
-
docs[`${left_id}, ${right_id}`] = {
|
|
1305
|
-
[leftField]: Dir.parseValue(leftVal),
|
|
1306
|
-
[rightField]: Dir.parseValue(rightVal)
|
|
1307
|
-
} as Partial<T> & Partial<U>
|
|
1308
|
-
break
|
|
1309
|
-
case 'left':
|
|
1310
|
-
const leftDoc = await this.getDoc<T>(leftCollection, left_id).once()
|
|
1311
|
-
if (Object.keys(leftDoc).length > 0) {
|
|
1312
|
-
let leftData = leftDoc[left_id]
|
|
1313
|
-
if (join.$select)
|
|
1314
|
-
leftData = this.selectValues<T>(
|
|
1315
|
-
join.$select as Array<keyof T>,
|
|
1316
|
-
leftData
|
|
1317
|
-
)
|
|
1318
|
-
if (join.$rename)
|
|
1319
|
-
leftData = this.renameFields<T>(join.$rename, leftData)
|
|
1320
|
-
docs[`${left_id}, ${right_id}`] = leftData as T
|
|
1321
|
-
}
|
|
1322
|
-
break
|
|
1323
|
-
case 'right':
|
|
1324
|
-
const rightDoc = await this.getDoc<U>(
|
|
1325
|
-
rightCollection,
|
|
1326
|
-
right_id
|
|
1327
|
-
).once()
|
|
1328
|
-
if (Object.keys(rightDoc).length > 0) {
|
|
1329
|
-
let rightData = rightDoc[right_id]
|
|
1330
|
-
if (join.$select)
|
|
1331
|
-
rightData = this.selectValues<U>(
|
|
1332
|
-
join.$select as Array<keyof U>,
|
|
1333
|
-
rightData
|
|
1334
|
-
)
|
|
1335
|
-
if (join.$rename)
|
|
1336
|
-
rightData = this.renameFields<U>(join.$rename, rightData)
|
|
1337
|
-
docs[`${left_id}, ${right_id}`] = rightData as U
|
|
1338
|
-
}
|
|
1339
|
-
break
|
|
1340
|
-
case 'outer':
|
|
1341
|
-
let leftFullData: T = {} as T
|
|
1342
|
-
let rightFullData: U = {} as U
|
|
1343
|
-
|
|
1344
|
-
const leftFullDoc = await this.getDoc<T>(
|
|
1345
|
-
leftCollection,
|
|
1346
|
-
left_id
|
|
1347
|
-
).once()
|
|
1348
|
-
|
|
1349
|
-
if (Object.keys(leftFullDoc).length > 0) {
|
|
1350
|
-
let leftData = leftFullDoc[left_id]
|
|
1351
|
-
if (join.$select)
|
|
1352
|
-
leftData = this.selectValues<T>(
|
|
1353
|
-
join.$select as Array<keyof T>,
|
|
1354
|
-
leftData
|
|
1355
|
-
)
|
|
1356
|
-
if (join.$rename)
|
|
1357
|
-
leftData = this.renameFields<T>(join.$rename, leftData)
|
|
1358
|
-
leftFullData = { ...leftData, ...leftFullData } as T
|
|
1359
|
-
}
|
|
1360
|
-
|
|
1361
|
-
const rightFullDoc = await this.getDoc<U>(
|
|
1362
|
-
rightCollection,
|
|
1363
|
-
right_id
|
|
1364
|
-
).once()
|
|
1365
|
-
|
|
1366
|
-
if (Object.keys(rightFullDoc).length > 0) {
|
|
1367
|
-
let rightData = rightFullDoc[right_id]
|
|
1368
|
-
if (join.$select)
|
|
1369
|
-
rightData = this.selectValues<U>(
|
|
1370
|
-
join.$select as Array<keyof U>,
|
|
1371
|
-
rightData
|
|
1372
|
-
)
|
|
1373
|
-
if (join.$rename)
|
|
1374
|
-
rightData = this.renameFields<U>(join.$rename, rightData)
|
|
1375
|
-
rightFullData = { ...rightData, ...rightFullData } as U
|
|
1376
|
-
}
|
|
1377
|
-
|
|
1378
|
-
docs[`${left_id}, ${right_id}`] = {
|
|
1379
|
-
...leftFullData,
|
|
1380
|
-
...rightFullData
|
|
1381
|
-
} as T & U
|
|
1382
|
-
break
|
|
1383
|
-
}
|
|
1384
|
-
|
|
1385
|
-
if (join.$limit && Object.keys(docs).length === join.$limit) break
|
|
1386
|
-
}
|
|
1387
|
-
}
|
|
1388
|
-
|
|
1389
|
-
if (join.$limit && Object.keys(docs).length === join.$limit) break
|
|
1390
|
-
}
|
|
1391
|
-
}
|
|
1392
|
-
|
|
1393
|
-
for (const field in join.$on) {
|
|
1394
|
-
if (join.$on[field]!.$eq)
|
|
1395
|
-
await compareFields(
|
|
1396
|
-
field,
|
|
1397
|
-
join.$on[field]!.$eq,
|
|
1398
|
-
(leftVal, rightVal) => leftVal === rightVal
|
|
1399
|
-
)
|
|
1400
|
-
|
|
1401
|
-
if (join.$on[field]!.$ne)
|
|
1402
|
-
await compareFields(
|
|
1403
|
-
field,
|
|
1404
|
-
join.$on[field]!.$ne,
|
|
1405
|
-
(leftVal, rightVal) => leftVal !== rightVal
|
|
1406
|
-
)
|
|
1407
|
-
|
|
1408
|
-
if (join.$on[field]!.$gt)
|
|
1409
|
-
await compareFields(
|
|
1410
|
-
field,
|
|
1411
|
-
join.$on[field]!.$gt,
|
|
1412
|
-
(leftVal, rightVal) => Number(leftVal) > Number(rightVal)
|
|
1413
|
-
)
|
|
1414
|
-
|
|
1415
|
-
if (join.$on[field]!.$lt)
|
|
1416
|
-
await compareFields(
|
|
1417
|
-
field,
|
|
1418
|
-
join.$on[field]!.$lt,
|
|
1419
|
-
(leftVal, rightVal) => Number(leftVal) < Number(rightVal)
|
|
1420
|
-
)
|
|
1421
|
-
|
|
1422
|
-
if (join.$on[field]!.$gte)
|
|
1423
|
-
await compareFields(
|
|
1424
|
-
field,
|
|
1425
|
-
join.$on[field]!.$gte,
|
|
1426
|
-
(leftVal, rightVal) => Number(leftVal) >= Number(rightVal)
|
|
1427
|
-
)
|
|
1428
|
-
|
|
1429
|
-
if (join.$on[field]!.$lte)
|
|
1430
|
-
await compareFields(
|
|
1431
|
-
field,
|
|
1432
|
-
join.$on[field]!.$lte,
|
|
1433
|
-
(leftVal, rightVal) => Number(leftVal) <= Number(rightVal)
|
|
1434
|
-
)
|
|
1435
|
-
}
|
|
1436
|
-
|
|
1437
|
-
if (join.$groupby) {
|
|
1438
|
-
const groupedDocs: Record<string, Record<string, Partial<T | U>>> = {} as Record<
|
|
1439
|
-
string,
|
|
1440
|
-
Record<string, Partial<T | U>>
|
|
1441
|
-
>
|
|
1442
|
-
|
|
1443
|
-
for (const ids in docs) {
|
|
1444
|
-
const data = docs[ids as `${_ttid}, ${_ttid}`]
|
|
1445
|
-
|
|
1446
|
-
// @ts-expect-error - Object.groupBy not yet in TS lib types
|
|
1447
|
-
const grouping = Object.groupBy([data], (elem) => elem[join.$groupby!])
|
|
1448
|
-
|
|
1449
|
-
for (const group in grouping) {
|
|
1450
|
-
if (groupedDocs[group]) groupedDocs[group][ids] = data
|
|
1451
|
-
else groupedDocs[group] = { [ids]: data }
|
|
1452
|
-
}
|
|
1453
|
-
}
|
|
1454
|
-
|
|
1455
|
-
if (join.$onlyIds) {
|
|
1456
|
-
const groupedIds: Record<string, _ttid[]> = {}
|
|
1457
|
-
|
|
1458
|
-
for (const group in groupedDocs) {
|
|
1459
|
-
const doc = groupedDocs[group]
|
|
1460
|
-
groupedIds[group] = Object.keys(doc).flat()
|
|
1461
|
-
}
|
|
1462
|
-
|
|
1463
|
-
return groupedIds
|
|
1464
|
-
}
|
|
1465
|
-
|
|
1466
|
-
return groupedDocs
|
|
1467
|
-
}
|
|
1468
|
-
|
|
1469
|
-
if (join.$onlyIds) return Array.from(new Set(Object.keys(docs).flat()))
|
|
1470
|
-
|
|
1471
|
-
return docs
|
|
1472
|
-
}
|
|
1473
|
-
|
|
1474
|
-
private static async allDocs<T extends Record<string, any>>(
|
|
1475
|
-
collection: string,
|
|
1476
|
-
query?: _storeQuery<T>
|
|
1477
|
-
) {
|
|
1478
|
-
if (Fylo.defaultEngineKind() === 's3-files') {
|
|
1479
|
-
const results: Array<Record<_ttid, T>> = []
|
|
1480
|
-
for await (const data of Fylo.defaultS3Files.findDocs<T>(collection, query).collect()) {
|
|
1481
|
-
if (typeof data === 'object' && !Array.isArray(data))
|
|
1482
|
-
results.push(data as Record<_ttid, T>)
|
|
1483
|
-
}
|
|
1484
|
-
return results
|
|
1485
|
-
}
|
|
1486
|
-
|
|
1487
|
-
const res = await S3.list(collection, {
|
|
1488
|
-
delimiter: '/',
|
|
1489
|
-
maxKeys: !query || !query.$limit ? undefined : query.$limit
|
|
1490
|
-
})
|
|
1491
|
-
|
|
1492
|
-
const ids =
|
|
1493
|
-
(res.commonPrefixes
|
|
1494
|
-
?.map((item) => item.prefix!.split('/')[0]!)
|
|
1495
|
-
.filter((key) => TTID.isTTID(key)) as _ttid[]) ?? ([] as _ttid[])
|
|
1496
|
-
|
|
1497
|
-
const docs = await Promise.allSettled(
|
|
1498
|
-
ids.map((id) => Fylo.getDoc<T>(collection, id).once())
|
|
1499
|
-
)
|
|
1500
|
-
|
|
1501
|
-
return docs
|
|
1502
|
-
.filter((item) => item.status === 'fulfilled')
|
|
1503
|
-
.map((item) => item.value)
|
|
1504
|
-
.filter((doc) => Object.keys(doc).length > 0)
|
|
1505
|
-
}
|
|
1506
|
-
|
|
1507
|
-
/**
|
|
1508
|
-
* Finds documents in a collection.
|
|
1509
|
-
* @param collection The name of the collection.
|
|
1510
|
-
* @param query The query schema.
|
|
1511
|
-
* @returns The found documents.
|
|
1512
|
-
*/
|
|
1513
|
-
static findDocs<T extends Record<string, any>>(collection: string, query?: _storeQuery<T>) {
|
|
1514
|
-
if (Fylo.defaultEngineKind() === 's3-files') {
|
|
1515
|
-
return Fylo.defaultS3Files.findDocs<T>(collection, query)
|
|
1516
|
-
}
|
|
1517
|
-
|
|
1518
|
-
const processDoc = (doc: Record<_ttid, T>, query?: _storeQuery<T>) => {
|
|
1519
|
-
if (Object.keys(doc).length > 0) {
|
|
1520
|
-
// Post-filter for operators that cannot be expressed as globs ($ne, $gt, $gte, $lt, $lte).
|
|
1521
|
-
// $ops use OR semantics: a document passes if it matches at least one op.
|
|
1522
|
-
if (query?.$ops) {
|
|
1523
|
-
for (const [_id, data] of Object.entries(doc)) {
|
|
1524
|
-
let matchesAny = false
|
|
1525
|
-
for (const op of query.$ops) {
|
|
1526
|
-
let opMatches = true
|
|
1527
|
-
for (const col in op) {
|
|
1528
|
-
const val = (data as Record<string, unknown>)[col]
|
|
1529
|
-
const cond = op[col as keyof T]!
|
|
1530
|
-
if (cond.$ne !== undefined && val == cond.$ne) {
|
|
1531
|
-
opMatches = false
|
|
1532
|
-
break
|
|
1533
|
-
}
|
|
1534
|
-
if (cond.$gt !== undefined && !(Number(val) > cond.$gt)) {
|
|
1535
|
-
opMatches = false
|
|
1536
|
-
break
|
|
1537
|
-
}
|
|
1538
|
-
if (cond.$gte !== undefined && !(Number(val) >= cond.$gte)) {
|
|
1539
|
-
opMatches = false
|
|
1540
|
-
break
|
|
1541
|
-
}
|
|
1542
|
-
if (cond.$lt !== undefined && !(Number(val) < cond.$lt)) {
|
|
1543
|
-
opMatches = false
|
|
1544
|
-
break
|
|
1545
|
-
}
|
|
1546
|
-
if (cond.$lte !== undefined && !(Number(val) <= cond.$lte)) {
|
|
1547
|
-
opMatches = false
|
|
1548
|
-
break
|
|
1549
|
-
}
|
|
1550
|
-
}
|
|
1551
|
-
if (opMatches) {
|
|
1552
|
-
matchesAny = true
|
|
1553
|
-
break
|
|
1554
|
-
}
|
|
1555
|
-
}
|
|
1556
|
-
if (!matchesAny) delete doc[_id as _ttid]
|
|
1557
|
-
}
|
|
1558
|
-
if (Object.keys(doc).length === 0) return
|
|
1559
|
-
}
|
|
1560
|
-
|
|
1561
|
-
for (let [_id, data] of Object.entries(doc)) {
|
|
1562
|
-
if (query && query.$select && query.$select.length > 0) {
|
|
1563
|
-
data = this.selectValues<T>(query.$select as Array<keyof T>, data)
|
|
1564
|
-
}
|
|
1565
|
-
|
|
1566
|
-
if (query && query.$rename) data = this.renameFields<T>(query.$rename, data)
|
|
1567
|
-
|
|
1568
|
-
doc[_id] = data
|
|
1569
|
-
}
|
|
1570
|
-
|
|
1571
|
-
if (query && query.$groupby) {
|
|
1572
|
-
const docGroup: Record<string, Record<string, Partial<T>>> = {}
|
|
1573
|
-
|
|
1574
|
-
for (const [id, data] of Object.entries(doc)) {
|
|
1575
|
-
const groupValue = data[query.$groupby] as string
|
|
1576
|
-
|
|
1577
|
-
if (groupValue) {
|
|
1578
|
-
delete data[query.$groupby]
|
|
1579
|
-
|
|
1580
|
-
docGroup[groupValue] = {
|
|
1581
|
-
[id]: data as Partial<T>
|
|
1582
|
-
} as Record<_ttid, Partial<T>>
|
|
1583
|
-
}
|
|
1584
|
-
}
|
|
1585
|
-
|
|
1586
|
-
if (query && query.$onlyIds) {
|
|
1587
|
-
for (const [groupValue, doc] of Object.entries(docGroup)) {
|
|
1588
|
-
for (const id in doc as Record<_ttid, T>) {
|
|
1589
|
-
// @ts-expect-error - dynamic key assignment on grouped object
|
|
1590
|
-
docGroup[groupValue][id] = null
|
|
1591
|
-
}
|
|
1592
|
-
}
|
|
1593
|
-
|
|
1594
|
-
return docGroup
|
|
1595
|
-
}
|
|
1596
|
-
|
|
1597
|
-
return docGroup
|
|
1598
|
-
}
|
|
1599
|
-
|
|
1600
|
-
if (query && query.$onlyIds) {
|
|
1601
|
-
return Object.keys(doc).shift()
|
|
1602
|
-
}
|
|
1603
|
-
|
|
1604
|
-
return doc
|
|
1605
|
-
}
|
|
1606
|
-
|
|
1607
|
-
return
|
|
1608
|
-
}
|
|
1609
|
-
|
|
1610
|
-
return {
|
|
1611
|
-
/**
|
|
1612
|
-
* Async iterator (listener) for the documents.
|
|
1613
|
-
*/
|
|
1614
|
-
async *[Symbol.asyncIterator]() {
|
|
1615
|
-
await Fylo.loadEncryption(collection)
|
|
1616
|
-
|
|
1617
|
-
const expression = await Query.getExprs(collection, query ?? {})
|
|
1618
|
-
|
|
1619
|
-
if (expression.length === 1 && expression[0] === `**/*`) {
|
|
1620
|
-
for (const doc of await Fylo.allDocs<T>(collection, query))
|
|
1621
|
-
yield processDoc(doc, query)
|
|
1622
|
-
}
|
|
1623
|
-
|
|
1624
|
-
let count = 0
|
|
1625
|
-
let finished = false
|
|
1626
|
-
|
|
1627
|
-
const iter = Dir.searchDocs<T>(
|
|
1628
|
-
collection,
|
|
1629
|
-
expression,
|
|
1630
|
-
{ updated: query?.$updated, created: query?.$created },
|
|
1631
|
-
{ listen: true, skip: true }
|
|
1632
|
-
)
|
|
1633
|
-
|
|
1634
|
-
do {
|
|
1635
|
-
const { value, done } = await iter.next({ count, limit: query?.$limit })
|
|
1636
|
-
|
|
1637
|
-
if (value === undefined && !done) continue
|
|
1638
|
-
|
|
1639
|
-
if (done) {
|
|
1640
|
-
finished = true
|
|
1641
|
-
break
|
|
1642
|
-
}
|
|
1643
|
-
|
|
1644
|
-
const result = processDoc(value as Record<_ttid, T>, query)
|
|
1645
|
-
if (result !== undefined) {
|
|
1646
|
-
count++
|
|
1647
|
-
yield result
|
|
1648
|
-
}
|
|
1649
|
-
} while (!finished)
|
|
1650
|
-
},
|
|
1651
|
-
|
|
1652
|
-
/**
|
|
1653
|
-
* Async iterator for the documents.
|
|
1654
|
-
*/
|
|
1655
|
-
async *collect() {
|
|
1656
|
-
await Fylo.loadEncryption(collection)
|
|
1657
|
-
|
|
1658
|
-
const expression = await Query.getExprs(collection, query ?? {})
|
|
1659
|
-
|
|
1660
|
-
if (expression.length === 1 && expression[0] === `**/*`) {
|
|
1661
|
-
for (const doc of await Fylo.allDocs<T>(collection, query))
|
|
1662
|
-
yield processDoc(doc, query)
|
|
1663
|
-
} else {
|
|
1664
|
-
let count = 0
|
|
1665
|
-
let finished = false
|
|
1666
|
-
|
|
1667
|
-
const iter = Dir.searchDocs<T>(
|
|
1668
|
-
collection,
|
|
1669
|
-
expression,
|
|
1670
|
-
{ updated: query?.$updated, created: query?.$created },
|
|
1671
|
-
{ listen: false, skip: false }
|
|
1672
|
-
)
|
|
1673
|
-
|
|
1674
|
-
do {
|
|
1675
|
-
const { value, done } = await iter.next({ count, limit: query?.$limit })
|
|
1676
|
-
|
|
1677
|
-
if (value === undefined && !done) continue
|
|
1678
|
-
|
|
1679
|
-
if (done) {
|
|
1680
|
-
finished = true
|
|
1681
|
-
break
|
|
1682
|
-
}
|
|
1683
|
-
|
|
1684
|
-
const result = processDoc(value as Record<_ttid, T>, query)
|
|
1685
|
-
if (result !== undefined) {
|
|
1686
|
-
count++
|
|
1687
|
-
yield result
|
|
1688
|
-
}
|
|
1689
|
-
} while (!finished)
|
|
1690
|
-
}
|
|
1691
|
-
},
|
|
1692
|
-
|
|
1693
|
-
/**
|
|
1694
|
-
* Async iterator (listener) for the document's deletion.
|
|
1695
|
-
*/
|
|
1696
|
-
async *onDelete() {
|
|
1697
|
-
await Fylo.loadEncryption(collection)
|
|
1698
|
-
|
|
1699
|
-
let count = 0
|
|
1700
|
-
let finished = false
|
|
1701
|
-
|
|
1702
|
-
const iter = Dir.searchDocs<T>(
|
|
1703
|
-
collection,
|
|
1704
|
-
await Query.getExprs(collection, query ?? {}),
|
|
1705
|
-
{},
|
|
1706
|
-
{ listen: true, skip: true },
|
|
1707
|
-
true
|
|
1708
|
-
)
|
|
1709
|
-
|
|
1710
|
-
do {
|
|
1711
|
-
const { value, done } = await iter.next({ count })
|
|
1712
|
-
|
|
1713
|
-
if (value === undefined && !done) continue
|
|
1714
|
-
|
|
1715
|
-
if (done) {
|
|
1716
|
-
finished = true
|
|
1717
|
-
break
|
|
1718
|
-
}
|
|
1719
|
-
|
|
1720
|
-
if (value) yield value as _ttid
|
|
1721
|
-
} while (!finished)
|
|
1722
|
-
}
|
|
1723
|
-
}
|
|
1724
|
-
}
|
|
1725
|
-
}
|
|
1726
|
-
|
|
1727
|
-
export { migrateLegacyS3ToS3Files } from './migrate'
|