@ddd-ts/event-sourcing-firestore 0.0.37 → 0.0.39
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/dist/_virtual/_rolldown/runtime.js +29 -0
- package/dist/firestore.event-lake.aggregate-store.d.ts +35 -0
- package/dist/firestore.event-lake.aggregate-store.d.ts.map +1 -0
- package/dist/firestore.event-lake.aggregate-store.js +38 -0
- package/dist/firestore.event-lake.aggregate-store.mjs +36 -0
- package/dist/firestore.event-lake.aggregate-store.spec.d.ts +2 -0
- package/dist/firestore.event-lake.aggregate-store.spec.d.ts.map +1 -0
- package/dist/firestore.event-lake.storage-layer.d.ts +14 -0
- package/dist/firestore.event-lake.storage-layer.d.ts.map +1 -0
- package/dist/firestore.event-lake.storage-layer.js +67 -0
- package/dist/firestore.event-lake.storage-layer.mjs +65 -0
- package/dist/firestore.event-lake.store.d.ts +6 -0
- package/dist/firestore.event-lake.store.d.ts.map +1 -0
- package/dist/firestore.event-lake.store.js +14 -0
- package/dist/firestore.event-lake.store.mjs +13 -0
- package/dist/firestore.event-lake.store.spec.d.ts +2 -0
- package/dist/firestore.event-lake.store.spec.d.ts.map +1 -0
- package/dist/firestore.event-stream-store.spec.d.ts +2 -0
- package/dist/firestore.event-stream-store.spec.d.ts.map +1 -0
- package/dist/firestore.event-stream.aggregate-store.d.ts +30 -0
- package/dist/firestore.event-stream.aggregate-store.d.ts.map +1 -0
- package/dist/firestore.event-stream.aggregate-store.js +38 -0
- package/dist/firestore.event-stream.aggregate-store.mjs +36 -0
- package/dist/firestore.event-stream.aggregate-store.spec.d.ts +2 -0
- package/dist/firestore.event-stream.aggregate-store.spec.d.ts.map +1 -0
- package/dist/firestore.event-stream.storage-layer.d.ts +15 -0
- package/dist/firestore.event-stream.storage-layer.d.ts.map +1 -0
- package/dist/firestore.event-stream.storage-layer.js +67 -0
- package/dist/firestore.event-stream.storage-layer.mjs +65 -0
- package/dist/firestore.event-stream.store.d.ts +6 -0
- package/dist/firestore.event-stream.store.d.ts.map +1 -0
- package/dist/firestore.event-stream.store.js +14 -0
- package/dist/firestore.event-stream.store.mjs +13 -0
- package/dist/firestore.projected-stream.reader.d.ts +13 -0
- package/dist/firestore.projected-stream.reader.d.ts.map +1 -0
- package/dist/firestore.projected-stream.reader.js +35 -0
- package/dist/firestore.projected-stream.reader.mjs +34 -0
- package/dist/firestore.projected-stream.reader.spec.d.ts +2 -0
- package/dist/firestore.projected-stream.reader.spec.d.ts.map +1 -0
- package/dist/firestore.projected-stream.storage-layer.d.ts +31 -0
- package/dist/firestore.projected-stream.storage-layer.d.ts.map +1 -0
- package/dist/firestore.projected-stream.storage-layer.js +121 -0
- package/dist/firestore.projected-stream.storage-layer.mjs +118 -0
- package/dist/firestore.snapshotter.d.ts +6 -0
- package/dist/firestore.snapshotter.d.ts.map +1 -0
- package/dist/firestore.snapshotter.js +36 -0
- package/dist/firestore.snapshotter.mjs +35 -0
- package/dist/index.d.ts +11 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +30 -0
- package/dist/index.mjs +12 -0
- package/dist/projection/cases/attempts.spec.d.ts +2 -0
- package/dist/projection/cases/attempts.spec.d.ts.map +1 -0
- package/dist/projection/cases/batchlast.spec.d.ts +2 -0
- package/dist/projection/cases/batchlast.spec.d.ts.map +1 -0
- package/dist/projection/cases/bigshuffle.spec.d.ts +2 -0
- package/dist/projection/cases/bigshuffle.spec.d.ts.map +1 -0
- package/dist/projection/cases/burst.spec.d.ts +2 -0
- package/dist/projection/cases/burst.spec.d.ts.map +1 -0
- package/dist/projection/cases/claimtimeout.spec.d.ts +2 -0
- package/dist/projection/cases/claimtimeout.spec.d.ts.map +1 -0
- package/dist/projection/cases/concurrency.spec.d.ts +2 -0
- package/dist/projection/cases/concurrency.spec.d.ts.map +1 -0
- package/dist/projection/cases/deduplicate.spec.d.ts +2 -0
- package/dist/projection/cases/deduplicate.spec.d.ts.map +1 -0
- package/dist/projection/cases/defer.spec.d.ts +2 -0
- package/dist/projection/cases/defer.spec.d.ts.map +1 -0
- package/dist/projection/cases/lock.spec.d.ts +2 -0
- package/dist/projection/cases/lock.spec.d.ts.map +1 -0
- package/dist/projection/cases/skip.spec.d.ts +2 -0
- package/dist/projection/cases/skip.spec.d.ts.map +1 -0
- package/dist/projection/cases/stress.spec.d.ts +2 -0
- package/dist/projection/cases/stress.spec.d.ts.map +1 -0
- package/dist/projection/event-coordinator.d.ts +16 -0
- package/dist/projection/event-coordinator.d.ts.map +1 -0
- package/dist/projection/event-coordinator.js +47 -0
- package/dist/projection/event-coordinator.mjs +47 -0
- package/dist/projection/firestore.projector.d.ts +128 -0
- package/dist/projection/firestore.projector.d.ts.map +1 -0
- package/dist/projection/firestore.projector.js +538 -0
- package/dist/projection/firestore.projector.mjs +533 -0
- package/dist/projection/testkit/case-fixture.d.ts +610 -0
- package/dist/projection/testkit/case-fixture.d.ts.map +1 -0
- package/dist/projection/testkit.d.ts +44 -0
- package/dist/projection/testkit.d.ts.map +1 -0
- package/dist/projection/trace.decorator.d.ts +2 -0
- package/dist/projection/trace.decorator.d.ts.map +1 -0
- package/dist/utils/promise-with-resolvers.d.ts +7 -0
- package/dist/utils/promise-with-resolvers.d.ts.map +1 -0
- package/dist/utils/promise-with-resolvers.js +17 -0
- package/dist/utils/promise-with-resolvers.mjs +16 -0
- package/package.json +43 -41
|
@@ -0,0 +1,533 @@
|
|
|
1
|
+
import { EventCoordinator } from "./event-coordinator.mjs";
|
|
2
|
+
import { Cursor, EventId, Lock, ProjectedStreamReader } from "@ddd-ts/core";
|
|
3
|
+
import { DefaultConverter } from "@ddd-ts/store-firestore";
|
|
4
|
+
import { FieldValue, Timestamp } from "firebase-admin/firestore";
|
|
5
|
+
import { Mapping, MicrosecondTimestamp, Optional, Shape } from "@ddd-ts/shape";
|
|
6
|
+
|
|
7
|
+
//#region src/projection/firestore.projector.ts
|
|
8
|
+
const Status = {
|
|
9
|
+
SUCCESS: "OK",
|
|
10
|
+
FAILURE: "FAILURE",
|
|
11
|
+
DEFERRED: "DEFERRED"
|
|
12
|
+
};
|
|
13
|
+
const TaskState = {
|
|
14
|
+
ENQUEUED: "ENQUEUED",
|
|
15
|
+
PROCESSED: "PROCESSED",
|
|
16
|
+
MISSING: "MISSING"
|
|
17
|
+
};
|
|
18
|
+
const wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
19
|
+
const RETENTION = MicrosecondTimestamp.MONTH;
|
|
20
|
+
var FirestoreProjector = class {
|
|
21
|
+
_unclaim = true;
|
|
22
|
+
constructor(projection, reader, queue, config = {
|
|
23
|
+
retry: {
|
|
24
|
+
attempts: 10,
|
|
25
|
+
minDelay: 10,
|
|
26
|
+
maxDelay: 200,
|
|
27
|
+
backoff: 1.5
|
|
28
|
+
},
|
|
29
|
+
enqueue: { batchSize: 100 },
|
|
30
|
+
onProcessError: (error) => {
|
|
31
|
+
console.error("Error processing event:", error);
|
|
32
|
+
},
|
|
33
|
+
onEnqueueError: (error) => {
|
|
34
|
+
console.error("Error enqueuing tasks:", error);
|
|
35
|
+
}
|
|
36
|
+
}) {
|
|
37
|
+
this.projection = projection;
|
|
38
|
+
this.reader = reader;
|
|
39
|
+
this.queue = queue;
|
|
40
|
+
this.config = config;
|
|
41
|
+
}
|
|
42
|
+
async *breathe() {
|
|
43
|
+
const { attempts, minDelay, maxDelay, backoff } = this.config.retry;
|
|
44
|
+
for (let i = 0; i < attempts; i++) {
|
|
45
|
+
const reset = () => {
|
|
46
|
+
i--;
|
|
47
|
+
};
|
|
48
|
+
yield [i, reset];
|
|
49
|
+
const margin = maxDelay - minDelay;
|
|
50
|
+
const jitter = Math.random() * margin;
|
|
51
|
+
await wait((backoff * i + 1) * minDelay + jitter);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
eventCoordinators = /* @__PURE__ */ new Map();
|
|
55
|
+
getEventCoordinator(checkpointId) {
|
|
56
|
+
const key = checkpointId.serialize();
|
|
57
|
+
let coordinator = this.eventCoordinators.get(key);
|
|
58
|
+
if (!coordinator) {
|
|
59
|
+
coordinator = new EventCoordinator();
|
|
60
|
+
coordinator.onEmpty(() => this.eventCoordinators.delete(key));
|
|
61
|
+
this.eventCoordinators.set(key, coordinator);
|
|
62
|
+
}
|
|
63
|
+
return coordinator;
|
|
64
|
+
}
|
|
65
|
+
async handle(savedChange) {
|
|
66
|
+
const checkpointId = this.projection.getCheckpointId(savedChange);
|
|
67
|
+
const eventCoordinator = this.getEventCoordinator(checkpointId);
|
|
68
|
+
eventCoordinator.addEvent(savedChange);
|
|
69
|
+
await eventCoordinator.waitCurrentEvent();
|
|
70
|
+
if (!eventCoordinator.canProceed(savedChange)) {
|
|
71
|
+
eventCoordinator.cleanEvent(savedChange);
|
|
72
|
+
return;
|
|
73
|
+
}
|
|
74
|
+
const disposeEventCoordinator = eventCoordinator.start(savedChange);
|
|
75
|
+
const target = await this.getCursor(savedChange);
|
|
76
|
+
if (!target) {
|
|
77
|
+
disposeEventCoordinator();
|
|
78
|
+
throw new Error(`Cursor not found for event ${savedChange.id.serialize()}`);
|
|
79
|
+
}
|
|
80
|
+
const errors = [];
|
|
81
|
+
for await (const [attempt, reset] of this.breathe()) {
|
|
82
|
+
const source = this.projection.getSource(savedChange);
|
|
83
|
+
const [status, message] = await this.attempt(source, checkpointId, target);
|
|
84
|
+
if (status === Status.DEFERRED) {
|
|
85
|
+
reset();
|
|
86
|
+
continue;
|
|
87
|
+
}
|
|
88
|
+
if (status === Status.SUCCESS) {
|
|
89
|
+
await this.queue.cleanup(checkpointId);
|
|
90
|
+
disposeEventCoordinator();
|
|
91
|
+
return;
|
|
92
|
+
}
|
|
93
|
+
errors.push(message);
|
|
94
|
+
}
|
|
95
|
+
disposeEventCoordinator();
|
|
96
|
+
throw new Error(`Failed to handle event ${savedChange.id.serialize()}: ${errors.join(", ")}`);
|
|
97
|
+
}
|
|
98
|
+
async getCursor(savedChange) {
|
|
99
|
+
return await this.reader.getCursor(savedChange);
|
|
100
|
+
}
|
|
101
|
+
async attempt(source, checkpointId, target) {
|
|
102
|
+
const headCursor = await this.getQueueHead(checkpointId);
|
|
103
|
+
const isTargetAfterHead = !headCursor || target.isAfter(headCursor);
|
|
104
|
+
if (isTargetAfterHead) {
|
|
105
|
+
const [status, message] = await this.enqueue(source, headCursor, checkpointId, target);
|
|
106
|
+
if (status === Status.DEFERRED) return [Status.DEFERRED, message];
|
|
107
|
+
}
|
|
108
|
+
if (!isTargetAfterHead) {
|
|
109
|
+
const processed = await this.checkIsProcessed(checkpointId, target);
|
|
110
|
+
if (processed === TaskState.PROCESSED) return [Status.SUCCESS, "Target event already processed"];
|
|
111
|
+
if (processed === TaskState.MISSING) {
|
|
112
|
+
const [status, message] = await this.enqueueOne(checkpointId, target);
|
|
113
|
+
if (status === Status.DEFERRED) return [Status.FAILURE, message];
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
const unprocessed = await this.getUnprocessed(checkpointId);
|
|
117
|
+
if (!unprocessed.length) return [Status.FAILURE, "No unprocessed tasks found"];
|
|
118
|
+
const batch = Task.batch(unprocessed);
|
|
119
|
+
if (!batch.length) return [Status.DEFERRED, "No tasks available to claim, deferring"];
|
|
120
|
+
const claimer = ClaimerId.generate();
|
|
121
|
+
const [status, message] = await this.claimTasks(checkpointId, claimer, batch);
|
|
122
|
+
if (status === Status.FAILURE) return [Status.DEFERRED, message];
|
|
123
|
+
return await this.processEvents(checkpointId, claimer, target.eventId);
|
|
124
|
+
}
|
|
125
|
+
async getQueueHead(checkpointId) {
|
|
126
|
+
return await this.queue.head(checkpointId);
|
|
127
|
+
}
|
|
128
|
+
async readSourceStream(source, head, checkpointId, target) {
|
|
129
|
+
const shard = checkpointId.shard();
|
|
130
|
+
const headCursor = head;
|
|
131
|
+
const limit = this.config.enqueue.batchSize;
|
|
132
|
+
return this.reader.slice(source, shard, headCursor, target, limit);
|
|
133
|
+
}
|
|
134
|
+
async enqueue(source, head, checkpointId, target) {
|
|
135
|
+
const tasks = (await this.readSourceStream(source, head, checkpointId, target)).map((e) => {
|
|
136
|
+
const settings = this.projection.getTaskSettings(e);
|
|
137
|
+
return Task.new(e, settings);
|
|
138
|
+
});
|
|
139
|
+
return await this.queue.enqueue(checkpointId, tasks);
|
|
140
|
+
}
|
|
141
|
+
async enqueueOne(checkpointId, target) {
|
|
142
|
+
const event = await this.reader.get(target);
|
|
143
|
+
if (!event) throw new Error(`Event not found for cursor ${target.ref}`);
|
|
144
|
+
const settings = this.projection.getTaskSettings(event);
|
|
145
|
+
const task = Task.new(event, settings);
|
|
146
|
+
return await this.queue.enqueue(checkpointId, [task]);
|
|
147
|
+
}
|
|
148
|
+
async checkIsProcessed(checkpointId, cursor) {
|
|
149
|
+
return await this.queue.isProcessed(checkpointId, cursor);
|
|
150
|
+
}
|
|
151
|
+
async getUnprocessed(checkpointId) {
|
|
152
|
+
return await this.queue.unprocessed(checkpointId);
|
|
153
|
+
}
|
|
154
|
+
async claimTasks(checkpointId, claimer, batch) {
|
|
155
|
+
try {
|
|
156
|
+
await this.queue.claim(checkpointId, claimer, batch);
|
|
157
|
+
return [Status.SUCCESS, "Tasks claimed successfully"];
|
|
158
|
+
} catch (e) {
|
|
159
|
+
return [Status.FAILURE, e];
|
|
160
|
+
}
|
|
161
|
+
}
|
|
162
|
+
async processEvents(checkpointId, claimer, targetEventId) {
|
|
163
|
+
const tasks = await this.queue.claimed(checkpointId, claimer);
|
|
164
|
+
const filtered = (await Promise.all(tasks.map((t) => this.reader.get(t.cursor)))).filter((t) => !!t);
|
|
165
|
+
if (!filtered.length) return [Status.DEFERRED, "No events to process in claimed tasks"];
|
|
166
|
+
const context = {
|
|
167
|
+
onProcessed: this.queue.processed.bind(this.queue, claimer),
|
|
168
|
+
checkpointId,
|
|
169
|
+
assertBeforeInsert: this.assertBeforeInsert.bind(this, checkpointId, claimer, filtered)
|
|
170
|
+
};
|
|
171
|
+
const hasTarget = tasks.some((t) => t.id.equals(targetEventId));
|
|
172
|
+
try {
|
|
173
|
+
if ((await this.projection.process(filtered, context)).some((id) => id?.equals(targetEventId))) return [Status.SUCCESS, "Target event processed successfully"];
|
|
174
|
+
return [Status.DEFERRED, "Target event not processed yet"];
|
|
175
|
+
} catch (e) {
|
|
176
|
+
this.config.onProcessError(e);
|
|
177
|
+
if (this._unclaim) await this.queue.unclaim(checkpointId, tasks);
|
|
178
|
+
if (!hasTarget) return [Status.DEFERRED, "Target event not in claimed batch, deferring"];
|
|
179
|
+
return [Status.FAILURE, e];
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
async assertBeforeInsert(checkpointId, claimer, events) {
|
|
183
|
+
const claimedTasks = await this.queue.claimed(checkpointId, claimer);
|
|
184
|
+
const claimedTasksMap = new Map(claimedTasks.map((t) => [t.id.serialize(), t]));
|
|
185
|
+
for (const event of events) {
|
|
186
|
+
const task = claimedTasksMap.get(event.id.serialize());
|
|
187
|
+
if (!task) throw new Error(`Task not found for event ${event.id.serialize()} in claimer ${claimer.serialize()}`);
|
|
188
|
+
if (task.claimIds?.[0] !== claimer.serialize()) throw new Error(`Task ${task.id.serialize()} claimer mismatch: expected ${claimer.serialize()}, found ${task.claimIds?.[0]}`);
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
};
|
|
192
|
+
var AlreadyEnqueuedError = class extends Error {
|
|
193
|
+
constructor() {
|
|
194
|
+
super("Tasks already enqueued");
|
|
195
|
+
this.name = "AlreadyEnqueuedError";
|
|
196
|
+
}
|
|
197
|
+
};
|
|
198
|
+
var FirestoreQueueStore = class {
|
|
199
|
+
converter = new DefaultConverter();
|
|
200
|
+
collection;
|
|
201
|
+
constructor(db) {
|
|
202
|
+
this.db = db;
|
|
203
|
+
this.collection = db.collection("checkpoints");
|
|
204
|
+
}
|
|
205
|
+
timestampToMicroseconds(timestamp) {
|
|
206
|
+
return new MicrosecondTimestamp(BigInt(timestamp.seconds) * BigInt(1e6) + BigInt(timestamp.nanoseconds) / BigInt(1e3));
|
|
207
|
+
}
|
|
208
|
+
microsecondsToTimestamp(microseconds) {
|
|
209
|
+
const seconds = BigInt(microseconds.micros) / 1000000n;
|
|
210
|
+
const nanoseconds = BigInt(microseconds.micros) % 1000000n * 1000n;
|
|
211
|
+
return new Timestamp(Number(seconds), Number(nanoseconds));
|
|
212
|
+
}
|
|
213
|
+
async enqueue(checkpointId, tasks) {
|
|
214
|
+
const batch = this.collection.firestore.batch();
|
|
215
|
+
for (const task of tasks) {
|
|
216
|
+
const ref = this.queued(checkpointId, task.id);
|
|
217
|
+
batch.create(ref, {
|
|
218
|
+
ref: this.db.doc(task.cursor.ref),
|
|
219
|
+
...this.converter.toFirestore(task.serialize())
|
|
220
|
+
});
|
|
221
|
+
}
|
|
222
|
+
try {
|
|
223
|
+
await batch.commit();
|
|
224
|
+
return [Status.SUCCESS, "Tasks enqueued successfully"];
|
|
225
|
+
} catch (err) {
|
|
226
|
+
if (err.code === 6) return [Status.DEFERRED, new AlreadyEnqueuedError()];
|
|
227
|
+
return [Status.DEFERRED, err];
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
async claim(checkpointId, claimer, tasks) {
|
|
231
|
+
const batch = this.collection.firestore.batch();
|
|
232
|
+
for (const task of tasks) {
|
|
233
|
+
if (task.claimIds.length > 0) throw new Error(`Task ${task.id.serialize()} is already claimed by ${task.claimIds.join(", ")}`);
|
|
234
|
+
const ref = this.queued(checkpointId, task.id);
|
|
235
|
+
batch.update(ref, {
|
|
236
|
+
claimer: claimer.serialize(),
|
|
237
|
+
claimedAt: FieldValue.serverTimestamp(),
|
|
238
|
+
[`claimsMetadata.${claimer.serialize()}`]: { claimedAt: FieldValue.serverTimestamp() },
|
|
239
|
+
claimIds: FieldValue.arrayUnion(claimer.serialize()),
|
|
240
|
+
attempts: FieldValue.increment(1),
|
|
241
|
+
remaining: FieldValue.increment(-1)
|
|
242
|
+
}, { lastUpdateTime: this.microsecondsToTimestamp(task.lastUpdateTime) });
|
|
243
|
+
}
|
|
244
|
+
await batch.commit();
|
|
245
|
+
}
|
|
246
|
+
async head(checkpointId) {
|
|
247
|
+
const headDoc = (await this.queue(checkpointId).orderBy("occurredAt", "desc").orderBy("revision", "desc").limit(1).get()).docs[0];
|
|
248
|
+
if (!headDoc) return;
|
|
249
|
+
const headData = this.converter.fromFirestoreSnapshot(headDoc);
|
|
250
|
+
if (!headData) return;
|
|
251
|
+
return headDoc ? Cursor.deserialize({
|
|
252
|
+
ref: headData.ref,
|
|
253
|
+
occurredAt: headData.occurredAt,
|
|
254
|
+
revision: headData.revision,
|
|
255
|
+
eventId: headData.id
|
|
256
|
+
}) : void 0;
|
|
257
|
+
}
|
|
258
|
+
async unprocessed(checkpointId) {
|
|
259
|
+
const tasks = (await this.queue(checkpointId).where("processed", "==", false).where("remaining", ">", 0).orderBy("occurredAt", "asc").orderBy("revision", "asc").limit(100).get()).docs.map((doc) => {
|
|
260
|
+
const data = this.converter.fromFirestoreSnapshot(doc);
|
|
261
|
+
const timestamp = doc.updateTime ? this.timestampToMicroseconds(doc.updateTime) : void 0;
|
|
262
|
+
return Task.deserializeWithLastUpdateTime(data, timestamp);
|
|
263
|
+
});
|
|
264
|
+
const expiredTasks = [];
|
|
265
|
+
for (const task of tasks) {
|
|
266
|
+
const originalClaimIds = task.claimIds;
|
|
267
|
+
task.checkTimeout();
|
|
268
|
+
if (originalClaimIds.length > task.claimIds.length) expiredTasks.push(task);
|
|
269
|
+
}
|
|
270
|
+
if (expiredTasks.length > 0) {
|
|
271
|
+
const batch = this.collection.firestore.batch();
|
|
272
|
+
for (const task of expiredTasks) {
|
|
273
|
+
const ref = this.queued(checkpointId, task.id);
|
|
274
|
+
batch.update(ref, {
|
|
275
|
+
claimer: FieldValue.delete(),
|
|
276
|
+
claimedAt: FieldValue.delete(),
|
|
277
|
+
claimIds: task.claimIds
|
|
278
|
+
}, { lastUpdateTime: this.microsecondsToTimestamp(task.lastUpdateTime) });
|
|
279
|
+
}
|
|
280
|
+
await batch.commit();
|
|
281
|
+
}
|
|
282
|
+
return tasks;
|
|
283
|
+
}
|
|
284
|
+
async claimed(checkpointId, claimer) {
|
|
285
|
+
return (await this.queue(checkpointId).where("claimIds", "array-contains", claimer.serialize()).orderBy("occurredAt", "asc").orderBy("revision", "asc").get()).docs.map((doc) => {
|
|
286
|
+
const data = this.converter.fromFirestoreSnapshot(doc);
|
|
287
|
+
const timestamp = doc.updateTime ? this.timestampToMicroseconds(doc.updateTime) : void 0;
|
|
288
|
+
return Task.deserializeWithLastUpdateTime(data, timestamp);
|
|
289
|
+
}).filter((task) => task.claimIds[0] === claimer.serialize());
|
|
290
|
+
}
|
|
291
|
+
async unclaim(checkpointId, tasks) {
|
|
292
|
+
const batch = this.collection.firestore.batch();
|
|
293
|
+
for (const task of tasks) {
|
|
294
|
+
const ref = this.queued(checkpointId, task.id);
|
|
295
|
+
batch.update(ref, {
|
|
296
|
+
claimer: FieldValue.delete(),
|
|
297
|
+
claimedAt: FieldValue.delete(),
|
|
298
|
+
claimIds: FieldValue.arrayRemove(task.currentClaimId)
|
|
299
|
+
}, { lastUpdateTime: this.microsecondsToTimestamp(task.lastUpdateTime) });
|
|
300
|
+
}
|
|
301
|
+
await batch.commit();
|
|
302
|
+
}
|
|
303
|
+
/**
|
|
304
|
+
* If the task exists, then looks for a processed flag.
|
|
305
|
+
* If not found, check if the cursor is older than the retention time for processed event.
|
|
306
|
+
* If so, consider it processed.
|
|
307
|
+
* Otherwise, consider it missing.
|
|
308
|
+
*/
|
|
309
|
+
async isProcessed(checkpointId, cursor) {
|
|
310
|
+
const doc = await this.queued(checkpointId, cursor.eventId).get();
|
|
311
|
+
if (doc.exists) {
|
|
312
|
+
const data = doc.data();
|
|
313
|
+
if (!data) throw new Error("No data in queued document");
|
|
314
|
+
if (data.processed === true) return TaskState.PROCESSED;
|
|
315
|
+
return TaskState.ENQUEUED;
|
|
316
|
+
}
|
|
317
|
+
const lastRetention = MicrosecondTimestamp.now().sub(RETENTION);
|
|
318
|
+
if (cursor.isOlderThan(lastRetention)) return TaskState.PROCESSED;
|
|
319
|
+
return TaskState.MISSING;
|
|
320
|
+
}
|
|
321
|
+
checkpoint(id) {
|
|
322
|
+
return this.collection.doc(id.name).collection("shards");
|
|
323
|
+
}
|
|
324
|
+
queue(id) {
|
|
325
|
+
return this.checkpoint(id).doc(id.shard()).collection("queue");
|
|
326
|
+
}
|
|
327
|
+
queued(id, eventId) {
|
|
328
|
+
return this.queue(id).doc(eventId.serialize());
|
|
329
|
+
}
|
|
330
|
+
async processed(claimerId, id, eventIds, context = {}) {
|
|
331
|
+
const { transaction: trx, batchWriter } = context;
|
|
332
|
+
if (trx) {
|
|
333
|
+
for (const eventId of eventIds) {
|
|
334
|
+
const ref = this.queued(id, eventId);
|
|
335
|
+
trx.transaction.update(ref, {
|
|
336
|
+
processed: true,
|
|
337
|
+
[`claimsMetadata.${claimerId.serialize()}.processedAt`]: FieldValue.serverTimestamp()
|
|
338
|
+
});
|
|
339
|
+
}
|
|
340
|
+
return;
|
|
341
|
+
}
|
|
342
|
+
await Promise.all(eventIds.map((eventId) => this.queued(id, eventId).update({
|
|
343
|
+
processed: true,
|
|
344
|
+
[`claimsMetadata.${claimerId.serialize()}.processedAt`]: FieldValue.serverTimestamp()
|
|
345
|
+
})));
|
|
346
|
+
}
|
|
347
|
+
async getTailCursor(id) {
|
|
348
|
+
const tailDoc = (await this.queue(id).where("remaining", ">", 0).orderBy("occurredAt", "asc").orderBy("revision", "asc").limit(1).get()).docs[0];
|
|
349
|
+
if (!tailDoc) return;
|
|
350
|
+
const tailData = this.converter.fromFirestoreSnapshot(tailDoc);
|
|
351
|
+
if (!tailData) return;
|
|
352
|
+
return tailDoc ? Cursor.deserialize({
|
|
353
|
+
ref: tailData.ref,
|
|
354
|
+
occurredAt: tailData.occurredAt,
|
|
355
|
+
revision: tailData.revision,
|
|
356
|
+
eventId: tailData.id
|
|
357
|
+
}) : void 0;
|
|
358
|
+
}
|
|
359
|
+
async cleanup(id) {
|
|
360
|
+
const aMonthAgo = MicrosecondTimestamp.now().sub(MicrosecondTimestamp.WEEK.mult(4));
|
|
361
|
+
const query = this.queue(id).where("remaining", ">", 0).where("occurredAt", "<", aMonthAgo.serialize()).orderBy("occurredAt", "asc").orderBy("revision", "asc");
|
|
362
|
+
const TRAIL = 1;
|
|
363
|
+
const snapshot = await query.get();
|
|
364
|
+
if (snapshot.size < TRAIL) return;
|
|
365
|
+
const stopper = snapshot.docs.findIndex((doc) => !doc.data().processed);
|
|
366
|
+
const cleanable = snapshot.docs.slice(0, stopper);
|
|
367
|
+
const cleaning = cleanable.slice(0, cleanable.length - TRAIL);
|
|
368
|
+
if (cleaning.length === 0) return;
|
|
369
|
+
const batch = this.collection.firestore.batch();
|
|
370
|
+
for (const queued of cleaning) batch.delete(queued.ref);
|
|
371
|
+
await batch.commit();
|
|
372
|
+
}
|
|
373
|
+
async flush(id) {
|
|
374
|
+
const stream = this.queue(id).stream();
|
|
375
|
+
const writer = this.collection.firestore.bulkWriter();
|
|
376
|
+
for await (const queued of stream) writer.delete(queued.ref);
|
|
377
|
+
await writer.close();
|
|
378
|
+
}
|
|
379
|
+
/**
|
|
380
|
+
* This method adds a fake processed event to the queue.
|
|
381
|
+
* It is useful for initializing the tail cursor of a new projection, at the
|
|
382
|
+
* same time as the projection's initial state is created, reset, or updated.
|
|
383
|
+
* By default, it will use the current time as the occurredAt timestamp.
|
|
384
|
+
* You can override this by providing a specific timestamp.
|
|
385
|
+
*
|
|
386
|
+
* This ensures that the projection can start processing new events from the
|
|
387
|
+
* correct point in time, avoiding reprocessing of old events.
|
|
388
|
+
*/
|
|
389
|
+
async seed(checkpointId) {
|
|
390
|
+
const cursor = new Cursor({
|
|
391
|
+
ref: "seed",
|
|
392
|
+
occurredAt: MicrosecondTimestamp.now(),
|
|
393
|
+
revision: 0,
|
|
394
|
+
eventId: EventId.generate()
|
|
395
|
+
});
|
|
396
|
+
const task = new Task({
|
|
397
|
+
id: EventId.generate(),
|
|
398
|
+
ref: "Seed",
|
|
399
|
+
occurredAt: cursor.occurredAt,
|
|
400
|
+
revision: cursor.revision,
|
|
401
|
+
attempts: 0,
|
|
402
|
+
processed: true,
|
|
403
|
+
claimer: void 0,
|
|
404
|
+
claimedAt: void 0,
|
|
405
|
+
claimsMetadata: {},
|
|
406
|
+
claimIds: [],
|
|
407
|
+
lock: new Lock({}),
|
|
408
|
+
remaining: 1,
|
|
409
|
+
claimTimeout: 0,
|
|
410
|
+
skipAfter: 0,
|
|
411
|
+
isolateAfter: 0,
|
|
412
|
+
lastUpdateTime: void 0
|
|
413
|
+
});
|
|
414
|
+
try {
|
|
415
|
+
const serialized = task.serialize();
|
|
416
|
+
const converted = this.converter.toFirestore(serialized);
|
|
417
|
+
await this.queued(checkpointId, task.cursor.eventId).create(converted);
|
|
418
|
+
} catch (e) {
|
|
419
|
+
if (!(e instanceof AlreadyEnqueuedError)) throw e;
|
|
420
|
+
}
|
|
421
|
+
}
|
|
422
|
+
};
|
|
423
|
+
var ClaimerId = class extends EventId {};
|
|
424
|
+
var Task = class Task extends Shape({
|
|
425
|
+
id: EventId,
|
|
426
|
+
ref: String,
|
|
427
|
+
occurredAt: MicrosecondTimestamp,
|
|
428
|
+
revision: Number,
|
|
429
|
+
attempts: Number,
|
|
430
|
+
processed: Boolean,
|
|
431
|
+
claimer: Optional(String),
|
|
432
|
+
claimedAt: Optional(MicrosecondTimestamp),
|
|
433
|
+
claimsMetadata: Mapping([{
|
|
434
|
+
claimedAt: MicrosecondTimestamp,
|
|
435
|
+
processedAt: Optional(MicrosecondTimestamp)
|
|
436
|
+
}]),
|
|
437
|
+
claimIds: [String],
|
|
438
|
+
lock: Lock,
|
|
439
|
+
skipAfter: Number,
|
|
440
|
+
remaining: Number,
|
|
441
|
+
isolateAfter: Number,
|
|
442
|
+
claimTimeout: Number,
|
|
443
|
+
lastUpdateTime: Optional(MicrosecondTimestamp)
|
|
444
|
+
}) {
|
|
445
|
+
get cursor() {
|
|
446
|
+
return new Cursor({
|
|
447
|
+
ref: this.ref,
|
|
448
|
+
occurredAt: this.occurredAt,
|
|
449
|
+
revision: this.revision,
|
|
450
|
+
eventId: this.id
|
|
451
|
+
});
|
|
452
|
+
}
|
|
453
|
+
static new(fact, config) {
|
|
454
|
+
return new Task({
|
|
455
|
+
id: fact.id,
|
|
456
|
+
attempts: 0,
|
|
457
|
+
claimer: void 0,
|
|
458
|
+
processed: false,
|
|
459
|
+
claimedAt: void 0,
|
|
460
|
+
claimsMetadata: {},
|
|
461
|
+
claimIds: [],
|
|
462
|
+
lock: config.lock,
|
|
463
|
+
claimTimeout: config.claimTimeout,
|
|
464
|
+
skipAfter: config.skipAfter,
|
|
465
|
+
isolateAfter: config.isolateAfter,
|
|
466
|
+
remaining: config.skipAfter,
|
|
467
|
+
ref: fact.ref,
|
|
468
|
+
revision: fact.revision,
|
|
469
|
+
occurredAt: fact.occurredAt,
|
|
470
|
+
lastUpdateTime: void 0
|
|
471
|
+
});
|
|
472
|
+
}
|
|
473
|
+
get currentClaimId() {
|
|
474
|
+
return this.claimIds.at(-1);
|
|
475
|
+
}
|
|
476
|
+
get isProcessing() {
|
|
477
|
+
return this.currentClaimId !== void 0;
|
|
478
|
+
}
|
|
479
|
+
get isProcessed() {
|
|
480
|
+
return !!this.processed;
|
|
481
|
+
}
|
|
482
|
+
get shouldSkip() {
|
|
483
|
+
return this.attempts > this.skipAfter;
|
|
484
|
+
}
|
|
485
|
+
get shouldIsolate() {
|
|
486
|
+
return this.attempts > this.isolateAfter;
|
|
487
|
+
}
|
|
488
|
+
checkTimeout() {
|
|
489
|
+
const claimer = this.currentClaimId;
|
|
490
|
+
if (!claimer) return;
|
|
491
|
+
const claimInfo = this.claimsMetadata[claimer];
|
|
492
|
+
if (!claimInfo || !claimInfo.claimedAt) return;
|
|
493
|
+
if (MicrosecondTimestamp.now().micros - claimInfo.claimedAt.micros > BigInt(this.claimTimeout) * 1000n) this.claimIds = this.claimIds.filter((id) => id !== claimer);
|
|
494
|
+
}
|
|
495
|
+
static deserializeWithLastUpdateTime(data, timestamp) {
|
|
496
|
+
return Task.deserialize({
|
|
497
|
+
...data,
|
|
498
|
+
lastUpdateTime: timestamp
|
|
499
|
+
});
|
|
500
|
+
}
|
|
501
|
+
static batch(tasks) {
|
|
502
|
+
const locks = [];
|
|
503
|
+
const batchLocks = [];
|
|
504
|
+
const batch = [];
|
|
505
|
+
for (const task of tasks) {
|
|
506
|
+
if (task.shouldSkip) continue;
|
|
507
|
+
if (task.shouldIsolate) {
|
|
508
|
+
if (batch.length > 0) return batch;
|
|
509
|
+
batch.push(task);
|
|
510
|
+
return batch;
|
|
511
|
+
}
|
|
512
|
+
if (locks.some((l) => l.restrains(task.lock))) {
|
|
513
|
+
locks.push(task.lock);
|
|
514
|
+
continue;
|
|
515
|
+
}
|
|
516
|
+
if (batchLocks.some((l) => l.restrains(task.lock, false))) {
|
|
517
|
+
locks.push(task.lock);
|
|
518
|
+
continue;
|
|
519
|
+
}
|
|
520
|
+
if (task.isProcessed) continue;
|
|
521
|
+
if (task.isProcessing) {
|
|
522
|
+
locks.push(task.lock);
|
|
523
|
+
continue;
|
|
524
|
+
}
|
|
525
|
+
batch.push(task);
|
|
526
|
+
batchLocks.push(task.lock);
|
|
527
|
+
}
|
|
528
|
+
return batch;
|
|
529
|
+
}
|
|
530
|
+
};
|
|
531
|
+
|
|
532
|
+
//#endregion
|
|
533
|
+
export { AlreadyEnqueuedError, ClaimerId, FirestoreProjector, FirestoreQueueStore, Task };
|