@ddd-ts/event-sourcing-firestore 0.0.36 → 0.0.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. package/LICENSE +21 -0
  2. package/dist/_virtual/_rolldown/runtime.js +29 -0
  3. package/dist/firestore.event-lake.aggregate-store.d.ts +4 -8
  4. package/dist/firestore.event-lake.aggregate-store.d.ts.map +1 -1
  5. package/dist/firestore.event-lake.aggregate-store.js +35 -33
  6. package/dist/firestore.event-lake.aggregate-store.mjs +36 -0
  7. package/dist/firestore.event-lake.storage-layer.d.ts +2 -2
  8. package/dist/firestore.event-lake.storage-layer.d.ts.map +1 -1
  9. package/dist/firestore.event-lake.storage-layer.js +67 -117
  10. package/dist/firestore.event-lake.storage-layer.mjs +65 -0
  11. package/dist/firestore.event-lake.store.d.ts +1 -1
  12. package/dist/firestore.event-lake.store.d.ts.map +1 -1
  13. package/dist/firestore.event-lake.store.js +14 -12
  14. package/dist/firestore.event-lake.store.mjs +13 -0
  15. package/dist/firestore.event-stream.aggregate-store.d.ts +2 -2
  16. package/dist/firestore.event-stream.aggregate-store.d.ts.map +1 -1
  17. package/dist/firestore.event-stream.aggregate-store.js +35 -37
  18. package/dist/firestore.event-stream.aggregate-store.mjs +36 -0
  19. package/dist/firestore.event-stream.storage-layer.d.ts +2 -2
  20. package/dist/firestore.event-stream.storage-layer.d.ts.map +1 -1
  21. package/dist/firestore.event-stream.storage-layer.js +67 -110
  22. package/dist/firestore.event-stream.storage-layer.mjs +65 -0
  23. package/dist/firestore.event-stream.store.d.ts +1 -1
  24. package/dist/firestore.event-stream.store.d.ts.map +1 -1
  25. package/dist/firestore.event-stream.store.js +14 -12
  26. package/dist/firestore.event-stream.store.mjs +13 -0
  27. package/dist/firestore.projected-stream.reader.d.ts +1 -1
  28. package/dist/firestore.projected-stream.reader.d.ts.map +1 -1
  29. package/dist/firestore.projected-stream.reader.js +35 -37
  30. package/dist/firestore.projected-stream.reader.mjs +34 -0
  31. package/dist/firestore.projected-stream.storage-layer.d.ts +1 -1
  32. package/dist/firestore.projected-stream.storage-layer.d.ts.map +1 -1
  33. package/dist/firestore.projected-stream.storage-layer.js +119 -140
  34. package/dist/firestore.projected-stream.storage-layer.mjs +118 -0
  35. package/dist/firestore.snapshotter.js +36 -40
  36. package/dist/firestore.snapshotter.mjs +35 -0
  37. package/dist/index.js +30 -32
  38. package/dist/index.mjs +12 -0
  39. package/dist/projection/firestore.projector.d.ts +1 -1
  40. package/dist/projection/firestore.projector.d.ts.map +1 -1
  41. package/dist/projection/firestore.projector.js +477 -630
  42. package/dist/projection/firestore.projector.mjs +479 -0
  43. package/dist/projection/testkit/case-fixture.d.ts.map +1 -1
  44. package/dist/projection/testkit.d.ts +1 -1
  45. package/dist/projection/testkit.d.ts.map +1 -1
  46. package/package.json +43 -32
  47. package/dist/firestore.event-lake.aggregate-store.js.map +0 -1
  48. package/dist/firestore.event-lake.aggregate-store.spec.js +0 -58
  49. package/dist/firestore.event-lake.aggregate-store.spec.js.map +0 -1
  50. package/dist/firestore.event-lake.storage-layer.js.map +0 -1
  51. package/dist/firestore.event-lake.store.js.map +0 -1
  52. package/dist/firestore.event-lake.store.spec.js +0 -50
  53. package/dist/firestore.event-lake.store.spec.js.map +0 -1
  54. package/dist/firestore.event-stream-store.spec.js +0 -50
  55. package/dist/firestore.event-stream-store.spec.js.map +0 -1
  56. package/dist/firestore.event-stream.aggregate-store.js.map +0 -1
  57. package/dist/firestore.event-stream.aggregate-store.spec.js +0 -54
  58. package/dist/firestore.event-stream.aggregate-store.spec.js.map +0 -1
  59. package/dist/firestore.event-stream.storage-layer.js.map +0 -1
  60. package/dist/firestore.event-stream.store.js.map +0 -1
  61. package/dist/firestore.projected-stream.reader.js.map +0 -1
  62. package/dist/firestore.projected-stream.reader.spec.js +0 -54
  63. package/dist/firestore.projected-stream.reader.spec.js.map +0 -1
  64. package/dist/firestore.projected-stream.storage-layer.js.map +0 -1
  65. package/dist/firestore.snapshotter.js.map +0 -1
  66. package/dist/index.js.map +0 -1
  67. package/dist/projection/cases/attempts.spec.js +0 -42
  68. package/dist/projection/cases/attempts.spec.js.map +0 -1
  69. package/dist/projection/cases/batchlast.spec.js +0 -51
  70. package/dist/projection/cases/batchlast.spec.js.map +0 -1
  71. package/dist/projection/cases/bigshuffle.spec.js +0 -59
  72. package/dist/projection/cases/bigshuffle.spec.js.map +0 -1
  73. package/dist/projection/cases/burst.spec.js +0 -38
  74. package/dist/projection/cases/burst.spec.js.map +0 -1
  75. package/dist/projection/cases/claimtimeout.spec.js +0 -40
  76. package/dist/projection/cases/claimtimeout.spec.js.map +0 -1
  77. package/dist/projection/cases/concurrency.spec.js +0 -49
  78. package/dist/projection/cases/concurrency.spec.js.map +0 -1
  79. package/dist/projection/cases/deduplicate.spec.js +0 -22
  80. package/dist/projection/cases/deduplicate.spec.js.map +0 -1
  81. package/dist/projection/cases/defer.spec.js +0 -44
  82. package/dist/projection/cases/defer.spec.js.map +0 -1
  83. package/dist/projection/cases/lock.spec.js +0 -91
  84. package/dist/projection/cases/lock.spec.js.map +0 -1
  85. package/dist/projection/cases/skip.spec.js +0 -86
  86. package/dist/projection/cases/skip.spec.js.map +0 -1
  87. package/dist/projection/cases/stress.spec.js +0 -73
  88. package/dist/projection/cases/stress.spec.js.map +0 -1
  89. package/dist/projection/firestore.projector.js.map +0 -1
  90. package/dist/projection/testkit/case-fixture.js +0 -341
  91. package/dist/projection/testkit/case-fixture.js.map +0 -1
  92. package/dist/projection/testkit.js +0 -77
  93. package/dist/projection/testkit.js.map +0 -1
  94. package/dist/projection/trace.decorator.js +0 -35
  95. package/dist/projection/trace.decorator.js.map +0 -1
@@ -1,637 +1,484 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Task = exports.ClaimerId = exports.FirestoreQueueStore = exports.AlreadyEnqueuedError = exports.FirestoreProjector = void 0;
4
- const firestore_1 = require("firebase-admin/firestore");
5
- const core_1 = require("@ddd-ts/core");
6
- const shape_1 = require("@ddd-ts/shape");
7
- const store_firestore_1 = require("@ddd-ts/store-firestore");
1
+ const require_runtime = require('../_virtual/_rolldown/runtime.js');
2
+ let _ddd_ts_core = require("@ddd-ts/core");
3
+ let _ddd_ts_store_firestore = require("@ddd-ts/store-firestore");
4
+ let firebase_admin_firestore = require("firebase-admin/firestore");
5
+ let _ddd_ts_shape = require("@ddd-ts/shape");
6
+
7
+ //#region src/projection/firestore.projector.ts
8
8
  const Status = {
9
- SUCCESS: "OK",
10
- FAILURE: "FAILURE",
11
- DEFERRED: "DEFERRED",
9
+ SUCCESS: "OK",
10
+ FAILURE: "FAILURE",
11
+ DEFERRED: "DEFERRED"
12
12
  };
13
13
  const TaskState = {
14
- ENQUEUED: "ENQUEUED",
15
- PROCESSED: "PROCESSED",
16
- MISSING: "MISSING",
14
+ ENQUEUED: "ENQUEUED",
15
+ PROCESSED: "PROCESSED",
16
+ MISSING: "MISSING"
17
17
  };
18
18
  const wait = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
19
- const RETENTION = shape_1.MicrosecondTimestamp.MONTH;
20
- class FirestoreProjector {
21
- projection;
22
- reader;
23
- queue;
24
- config;
25
- _unclaim = true;
26
- constructor(projection, reader, queue, config = {
27
- retry: { attempts: 10, minDelay: 10, maxDelay: 200, backoff: 1.5 },
28
- enqueue: { batchSize: 100 },
29
- onProcessError: (error) => {
30
- console.error("Error processing event:", error);
31
- },
32
- onEnqueueError: (error) => {
33
- console.error("Error enqueuing tasks:", error);
34
- },
35
- }) {
36
- this.projection = projection;
37
- this.reader = reader;
38
- this.queue = queue;
39
- this.config = config;
40
- }
41
- async *breathe() {
42
- const { attempts, minDelay, maxDelay, backoff } = this.config.retry;
43
- for (let i = 0; i < attempts; i++) {
44
- const reset = () => {
45
- i--;
46
- };
47
- yield [i, reset];
48
- const margin = maxDelay - minDelay;
49
- const jitter = Math.random() * margin;
50
- const backedoff = (backoff * i + 1) * minDelay;
51
- const jitteredDelay = backedoff + jitter;
52
- await wait(jitteredDelay);
53
- }
54
- }
55
- // @Trace("projector.handle", ($, e) => ({
56
- // eventId: e.id.serialize(),
57
- // eventName: e.name,
58
- // eventRevision: e.revision,
59
- // eventReference: e.ref,
60
- // projectionName: $.projection.constructor.name,
61
- // checkpointId: $.projection.getCheckpointId(e).serialize(),
62
- // }))
63
- async handle(savedChange) {
64
- const checkpointId = this.projection.getCheckpointId(savedChange);
65
- const target = await this.getCursor(savedChange);
66
- if (!target) {
67
- throw new Error(`Cursor not found for event ${savedChange.id.serialize()}`);
68
- }
69
- const errors = [];
70
- for await (const [attempt, reset] of this.breathe()) {
71
- // console.log(`Attempt ${attempt} for event ${savedChange.id.serialize()}`);
72
- const source = this.projection.getSource(savedChange);
73
- const [status, message] = await this.attempt(source, checkpointId, target);
74
- if (status === Status.DEFERRED) {
75
- reset();
76
- continue;
77
- }
78
- if (status === Status.SUCCESS) {
79
- await this.queue.cleanup(checkpointId);
80
- return;
81
- }
82
- errors.push(message);
83
- }
84
- throw new Error(`Failed to handle event ${savedChange.id.serialize()}: ${errors.join(", ")}`);
85
- }
86
- // @Trace("projector.reader.getCursor")
87
- async getCursor(savedChange) {
88
- return await this.reader.getCursor(savedChange);
89
- }
90
- // @Trace("projector.attempt")
91
- async attempt(source, checkpointId, target) {
92
- const headCursor = await this.getQueueHead(checkpointId);
93
- const isTargetAfterHead = !headCursor || target.isAfter(headCursor);
94
- if (isTargetAfterHead) {
95
- const [status, message] = await this.enqueue(source, headCursor, checkpointId, target);
96
- if (status === Status.DEFERRED) {
97
- return [Status.DEFERRED, message];
98
- }
99
- }
100
- if (!isTargetAfterHead) {
101
- const processed = await this.checkIsProcessed(checkpointId, target);
102
- if (processed === TaskState.PROCESSED) {
103
- return [Status.SUCCESS, "Target event already processed"];
104
- }
105
- if (processed === TaskState.MISSING) {
106
- const [status, message] = await this.enqueueOne(checkpointId, target);
107
- if (status === Status.DEFERRED) {
108
- return [Status.FAILURE, message];
109
- }
110
- }
111
- }
112
- const unprocessed = await this.getUnprocessed(checkpointId);
113
- if (!unprocessed.length) {
114
- return [Status.FAILURE, "No unprocessed tasks found"];
115
- }
116
- const batch = Task.batch(unprocessed);
117
- if (!batch.length) {
118
- return [
119
- Status.DEFERRED,
120
- "No tasks available to claim, deferring",
121
- ];
122
- }
123
- const claimer = ClaimerId.generate();
124
- const [status, message] = await this.claimTasks(checkpointId, claimer, batch);
125
- if (status === Status.FAILURE) {
126
- return [Status.DEFERRED, message];
127
- }
128
- return await this.processEvents(checkpointId, claimer, target.eventId);
129
- }
130
- // @Trace("projector.queue.head")
131
- async getQueueHead(checkpointId) {
132
- return await this.queue.head(checkpointId);
133
- }
134
- // @Trace("projector.readSourceStream")
135
- async readSourceStream(source, head, checkpointId, target) {
136
- const shard = checkpointId.shard();
137
- const headCursor = head;
138
- const limit = this.config.enqueue.batchSize;
139
- return this.reader.slice(source, shard, headCursor, target, limit);
140
- }
141
- // @Trace("projector.enqueue")
142
- async enqueue(source, head, checkpointId, target) {
143
- const events = await this.readSourceStream(source, head, checkpointId, target);
144
- const tasks = events.map((e) => {
145
- const settings = this.projection.getTaskSettings(e);
146
- return Task.new(e, settings);
147
- });
148
- return await this.queue.enqueue(checkpointId, tasks);
149
- }
150
- async enqueueOne(checkpointId, target) {
151
- const event = await this.reader.get(target);
152
- if (!event) {
153
- throw new Error(`Event not found for cursor ${target.ref}`);
154
- }
155
- const settings = this.projection.getTaskSettings(event);
156
- const task = Task.new(event, settings);
157
- return await this.queue.enqueue(checkpointId, [task]);
158
- }
159
- // @Trace("projector.queue.isProcessed")
160
- async checkIsProcessed(checkpointId, cursor) {
161
- return await this.queue.isProcessed(checkpointId, cursor);
162
- }
163
- // @Trace("projector.queue.unprocessed")
164
- async getUnprocessed(checkpointId) {
165
- return await this.queue.unprocessed(checkpointId);
166
- }
167
- // @Trace("projector.queue.claim")
168
- async claimTasks(checkpointId, claimer, batch) {
169
- try {
170
- await this.queue.claim(checkpointId, claimer, batch);
171
- return [Status.SUCCESS, "Tasks claimed successfully"];
172
- }
173
- catch (e) {
174
- return [Status.FAILURE, e];
175
- }
176
- }
177
- // @Trace("projector.processEvents")
178
- async processEvents(checkpointId, claimer, targetEventId) {
179
- const tasks = await this.queue.claimed(checkpointId, claimer);
180
- const todo = await Promise.all(tasks.map((t) => this.reader.get(t.cursor)));
181
- const filtered = todo.filter((t) => !!t);
182
- if (!filtered.length) {
183
- // Nothing to process, possibly all tasks were for events that no longer exist
184
- return [
185
- Status.DEFERRED,
186
- "No events to process in claimed tasks",
187
- ];
188
- }
189
- const onProcessed = this.queue.processed.bind(this.queue);
190
- const context = { onProcessed, checkpointId };
191
- const hasTarget = tasks.some((t) => t.id.equals(targetEventId));
192
- try {
193
- const processed = await this.projection.process(filtered, context);
194
- if (processed.some((id) => id?.equals(targetEventId))) {
195
- return [Status.SUCCESS, "Target event processed successfully"];
196
- }
197
- return [Status.DEFERRED, "Target event not processed yet"];
198
- }
199
- catch (e) {
200
- this.config.onProcessError(e);
201
- if (this._unclaim) {
202
- await this.queue.unclaim(checkpointId, tasks);
203
- }
204
- if (!hasTarget) {
205
- return [
206
- Status.DEFERRED,
207
- "Target event not in claimed batch, deferring",
208
- ];
209
- }
210
- return [Status.FAILURE, e];
211
- }
212
- }
213
- }
214
- exports.FirestoreProjector = FirestoreProjector;
215
- class AlreadyEnqueuedError extends Error {
216
- constructor() {
217
- super("Tasks already enqueued");
218
- this.name = "AlreadyEnqueuedError";
219
- }
220
- }
19
+ const RETENTION = _ddd_ts_shape.MicrosecondTimestamp.MONTH;
20
+ var FirestoreProjector = class {
21
+ _unclaim = true;
22
+ constructor(projection, reader, queue, config = {
23
+ retry: {
24
+ attempts: 10,
25
+ minDelay: 10,
26
+ maxDelay: 200,
27
+ backoff: 1.5
28
+ },
29
+ enqueue: { batchSize: 100 },
30
+ onProcessError: (error) => {
31
+ console.error("Error processing event:", error);
32
+ },
33
+ onEnqueueError: (error) => {
34
+ console.error("Error enqueuing tasks:", error);
35
+ }
36
+ }) {
37
+ this.projection = projection;
38
+ this.reader = reader;
39
+ this.queue = queue;
40
+ this.config = config;
41
+ }
42
+ async *breathe() {
43
+ const { attempts, minDelay, maxDelay, backoff } = this.config.retry;
44
+ for (let i = 0; i < attempts; i++) {
45
+ const reset = () => {
46
+ i--;
47
+ };
48
+ yield [i, reset];
49
+ const margin = maxDelay - minDelay;
50
+ const jitter = Math.random() * margin;
51
+ await wait((backoff * i + 1) * minDelay + jitter);
52
+ }
53
+ }
54
+ async handle(savedChange) {
55
+ const checkpointId = this.projection.getCheckpointId(savedChange);
56
+ const target = await this.getCursor(savedChange);
57
+ if (!target) throw new Error(`Cursor not found for event ${savedChange.id.serialize()}`);
58
+ const errors = [];
59
+ for await (const [attempt, reset] of this.breathe()) {
60
+ const source = this.projection.getSource(savedChange);
61
+ const [status, message] = await this.attempt(source, checkpointId, target);
62
+ if (status === Status.DEFERRED) {
63
+ reset();
64
+ continue;
65
+ }
66
+ if (status === Status.SUCCESS) {
67
+ await this.queue.cleanup(checkpointId);
68
+ return;
69
+ }
70
+ errors.push(message);
71
+ }
72
+ throw new Error(`Failed to handle event ${savedChange.id.serialize()}: ${errors.join(", ")}`);
73
+ }
74
+ async getCursor(savedChange) {
75
+ return await this.reader.getCursor(savedChange);
76
+ }
77
+ async attempt(source, checkpointId, target) {
78
+ const headCursor = await this.getQueueHead(checkpointId);
79
+ const isTargetAfterHead = !headCursor || target.isAfter(headCursor);
80
+ if (isTargetAfterHead) {
81
+ const [status, message] = await this.enqueue(source, headCursor, checkpointId, target);
82
+ if (status === Status.DEFERRED) return [Status.DEFERRED, message];
83
+ }
84
+ if (!isTargetAfterHead) {
85
+ const processed = await this.checkIsProcessed(checkpointId, target);
86
+ if (processed === TaskState.PROCESSED) return [Status.SUCCESS, "Target event already processed"];
87
+ if (processed === TaskState.MISSING) {
88
+ const [status, message] = await this.enqueueOne(checkpointId, target);
89
+ if (status === Status.DEFERRED) return [Status.FAILURE, message];
90
+ }
91
+ }
92
+ const unprocessed = await this.getUnprocessed(checkpointId);
93
+ if (!unprocessed.length) return [Status.FAILURE, "No unprocessed tasks found"];
94
+ const batch = Task.batch(unprocessed);
95
+ if (!batch.length) return [Status.DEFERRED, "No tasks available to claim, deferring"];
96
+ const claimer = ClaimerId.generate();
97
+ const [status, message] = await this.claimTasks(checkpointId, claimer, batch);
98
+ if (status === Status.FAILURE) return [Status.DEFERRED, message];
99
+ return await this.processEvents(checkpointId, claimer, target.eventId);
100
+ }
101
+ async getQueueHead(checkpointId) {
102
+ return await this.queue.head(checkpointId);
103
+ }
104
+ async readSourceStream(source, head, checkpointId, target) {
105
+ const shard = checkpointId.shard();
106
+ const headCursor = head;
107
+ const limit = this.config.enqueue.batchSize;
108
+ return this.reader.slice(source, shard, headCursor, target, limit);
109
+ }
110
+ async enqueue(source, head, checkpointId, target) {
111
+ const tasks = (await this.readSourceStream(source, head, checkpointId, target)).map((e) => {
112
+ const settings = this.projection.getTaskSettings(e);
113
+ return Task.new(e, settings);
114
+ });
115
+ return await this.queue.enqueue(checkpointId, tasks);
116
+ }
117
+ async enqueueOne(checkpointId, target) {
118
+ const event = await this.reader.get(target);
119
+ if (!event) throw new Error(`Event not found for cursor ${target.ref}`);
120
+ const settings = this.projection.getTaskSettings(event);
121
+ const task = Task.new(event, settings);
122
+ return await this.queue.enqueue(checkpointId, [task]);
123
+ }
124
+ async checkIsProcessed(checkpointId, cursor) {
125
+ return await this.queue.isProcessed(checkpointId, cursor);
126
+ }
127
+ async getUnprocessed(checkpointId) {
128
+ return await this.queue.unprocessed(checkpointId);
129
+ }
130
+ async claimTasks(checkpointId, claimer, batch) {
131
+ try {
132
+ await this.queue.claim(checkpointId, claimer, batch);
133
+ return [Status.SUCCESS, "Tasks claimed successfully"];
134
+ } catch (e) {
135
+ return [Status.FAILURE, e];
136
+ }
137
+ }
138
+ async processEvents(checkpointId, claimer, targetEventId) {
139
+ const tasks = await this.queue.claimed(checkpointId, claimer);
140
+ const filtered = (await Promise.all(tasks.map((t) => this.reader.get(t.cursor)))).filter((t) => !!t);
141
+ if (!filtered.length) return [Status.DEFERRED, "No events to process in claimed tasks"];
142
+ const context = {
143
+ onProcessed: this.queue.processed.bind(this.queue),
144
+ checkpointId
145
+ };
146
+ const hasTarget = tasks.some((t) => t.id.equals(targetEventId));
147
+ try {
148
+ if ((await this.projection.process(filtered, context)).some((id) => id?.equals(targetEventId))) return [Status.SUCCESS, "Target event processed successfully"];
149
+ return [Status.DEFERRED, "Target event not processed yet"];
150
+ } catch (e) {
151
+ this.config.onProcessError(e);
152
+ if (this._unclaim) await this.queue.unclaim(checkpointId, tasks);
153
+ if (!hasTarget) return [Status.DEFERRED, "Target event not in claimed batch, deferring"];
154
+ return [Status.FAILURE, e];
155
+ }
156
+ }
157
+ };
158
+ var AlreadyEnqueuedError = class extends Error {
159
+ constructor() {
160
+ super("Tasks already enqueued");
161
+ this.name = "AlreadyEnqueuedError";
162
+ }
163
+ };
164
+ var FirestoreQueueStore = class {
165
+ converter = new _ddd_ts_store_firestore.DefaultConverter();
166
+ collection;
167
+ constructor(db) {
168
+ this.db = db;
169
+ this.collection = db.collection("checkpoints");
170
+ }
171
+ timestampToMicroseconds(timestamp) {
172
+ return new _ddd_ts_shape.MicrosecondTimestamp(BigInt(timestamp.seconds) * BigInt(1e6) + BigInt(timestamp.nanoseconds) / BigInt(1e3));
173
+ }
174
+ microsecondsToTimestamp(microseconds) {
175
+ const seconds = BigInt(microseconds.micros) / 1000000n;
176
+ const nanoseconds = BigInt(microseconds.micros) % 1000000n * 1000n;
177
+ return new firebase_admin_firestore.Timestamp(Number(seconds), Number(nanoseconds));
178
+ }
179
+ async enqueue(checkpointId, tasks) {
180
+ const batch = this.collection.firestore.batch();
181
+ for (const task of tasks) {
182
+ const ref = this.queued(checkpointId, task.id);
183
+ batch.create(ref, {
184
+ ref: this.db.doc(task.cursor.ref),
185
+ ...this.converter.toFirestore(task.serialize())
186
+ });
187
+ }
188
+ try {
189
+ await batch.commit();
190
+ return [Status.SUCCESS, "Tasks enqueued successfully"];
191
+ } catch (err) {
192
+ if (err.code === 6) return [Status.DEFERRED, new AlreadyEnqueuedError()];
193
+ return [Status.DEFERRED, err];
194
+ }
195
+ }
196
+ async claim(checkpointId, claimer, tasks) {
197
+ const batch = this.collection.firestore.batch();
198
+ for (const task of tasks) {
199
+ const ref = this.queued(checkpointId, task.id);
200
+ batch.update(ref, {
201
+ claimer: claimer.serialize(),
202
+ claimedAt: firebase_admin_firestore.FieldValue.serverTimestamp(),
203
+ attempts: firebase_admin_firestore.FieldValue.increment(1),
204
+ remaining: firebase_admin_firestore.FieldValue.increment(-1)
205
+ }, { lastUpdateTime: this.microsecondsToTimestamp(task.lastUpdateTime) });
206
+ }
207
+ await batch.commit();
208
+ }
209
+ async head(checkpointId) {
210
+ const headDoc = (await this.queue(checkpointId).orderBy("occurredAt", "desc").orderBy("revision", "desc").limit(1).get()).docs[0];
211
+ if (!headDoc) return;
212
+ const headData = this.converter.fromFirestoreSnapshot(headDoc);
213
+ if (!headData) return;
214
+ return headDoc ? _ddd_ts_core.Cursor.deserialize({
215
+ ref: headData.ref,
216
+ occurredAt: headData.occurredAt,
217
+ revision: headData.revision,
218
+ eventId: headData.id
219
+ }) : void 0;
220
+ }
221
+ async unprocessed(checkpointId) {
222
+ const tasks = (await this.queue(checkpointId).where("processed", "==", false).where("remaining", ">", 0).orderBy("occurredAt", "asc").orderBy("revision", "asc").limit(100).get()).docs.map((doc) => {
223
+ const data = this.converter.fromFirestoreSnapshot(doc);
224
+ const timestamp = doc.updateTime ? this.timestampToMicroseconds(doc.updateTime) : void 0;
225
+ return Task.deserializeWithLastUpdateTime(data, timestamp);
226
+ });
227
+ const expiredTasks = [];
228
+ for (const task of tasks) {
229
+ const originalClaimer = task.claimer;
230
+ task.checkTimeout();
231
+ if (originalClaimer && !task.claimer) expiredTasks.push(task);
232
+ }
233
+ if (expiredTasks.length > 0) {
234
+ const batch = this.collection.firestore.batch();
235
+ for (const task of expiredTasks) {
236
+ const ref = this.queued(checkpointId, task.id);
237
+ batch.update(ref, {
238
+ claimer: firebase_admin_firestore.FieldValue.delete(),
239
+ claimedAt: firebase_admin_firestore.FieldValue.delete(),
240
+ attempts: task.attempts,
241
+ remaining: task.remaining
242
+ });
243
+ }
244
+ await batch.commit();
245
+ }
246
+ return tasks;
247
+ }
248
+ async claimed(checkpointId, claimer) {
249
+ return (await this.queue(checkpointId).where("claimer", "==", claimer.serialize()).orderBy("occurredAt", "asc").orderBy("revision", "asc").get()).docs.map((doc) => {
250
+ const data = this.converter.fromFirestoreSnapshot(doc);
251
+ const timestamp = doc.updateTime ? this.timestampToMicroseconds(doc.updateTime) : void 0;
252
+ return Task.deserializeWithLastUpdateTime(data, timestamp);
253
+ });
254
+ }
255
+ async unclaim(checkpointId, tasks) {
256
+ const batch = this.collection.firestore.batch();
257
+ for (const task of tasks) {
258
+ const ref = this.queued(checkpointId, task.id);
259
+ batch.update(ref, {
260
+ claimer: firebase_admin_firestore.FieldValue.delete(),
261
+ claimedAt: firebase_admin_firestore.FieldValue.delete()
262
+ });
263
+ }
264
+ await batch.commit();
265
+ }
266
+ /**
267
+ * If the task exists, then looks for a processed flag.
268
+ * If not found, check if the cursor is older than the retention time for processed event.
269
+ * If so, consider it processed.
270
+ * Otherwise, consider it missing.
271
+ */
272
+ async isProcessed(checkpointId, cursor) {
273
+ const doc = await this.queued(checkpointId, cursor.eventId).get();
274
+ if (doc.exists) {
275
+ const data = doc.data();
276
+ if (!data) throw new Error("No data in queued document");
277
+ if (data.processed === true) return TaskState.PROCESSED;
278
+ return TaskState.ENQUEUED;
279
+ }
280
+ const lastRetention = _ddd_ts_shape.MicrosecondTimestamp.now().sub(RETENTION);
281
+ if (cursor.isOlderThan(lastRetention)) return TaskState.PROCESSED;
282
+ return TaskState.MISSING;
283
+ }
284
+ checkpoint(id) {
285
+ return this.collection.doc(id.name).collection("shards");
286
+ }
287
+ queue(id) {
288
+ return this.checkpoint(id).doc(id.shard()).collection("queue");
289
+ }
290
+ queued(id, eventId) {
291
+ return this.queue(id).doc(eventId.serialize());
292
+ }
293
+ async processed(id, eventIds, context = {}) {
294
+ const { transaction: trx, batchWriter } = context;
295
+ if (trx) {
296
+ for (const eventId of eventIds) {
297
+ const ref = this.queued(id, eventId);
298
+ trx.transaction.update(ref, { processed: true });
299
+ }
300
+ return;
301
+ }
302
+ await Promise.all(eventIds.map((eventId) => this.queued(id, eventId).update({ processed: true })));
303
+ }
304
+ async getTailCursor(id) {
305
+ const tailDoc = (await this.queue(id).where("remaining", ">", 0).orderBy("occurredAt", "asc").orderBy("revision", "asc").limit(1).get()).docs[0];
306
+ if (!tailDoc) return;
307
+ const tailData = this.converter.fromFirestoreSnapshot(tailDoc);
308
+ if (!tailData) return;
309
+ return tailDoc ? _ddd_ts_core.Cursor.deserialize({
310
+ ref: tailData.ref,
311
+ occurredAt: tailData.occurredAt,
312
+ revision: tailData.revision,
313
+ eventId: tailData.id
314
+ }) : void 0;
315
+ }
316
+ async cleanup(id) {
317
+ const aMonthAgo = _ddd_ts_shape.MicrosecondTimestamp.now().sub(_ddd_ts_shape.MicrosecondTimestamp.WEEK.mult(4));
318
+ const query = this.queue(id).where("remaining", ">", 0).where("occurredAt", "<", aMonthAgo.serialize()).orderBy("occurredAt", "asc").orderBy("revision", "asc");
319
+ const TRAIL = 1;
320
+ const snapshot = await query.get();
321
+ if (snapshot.size < TRAIL) return;
322
+ const stopper = snapshot.docs.findIndex((doc) => !doc.data().processed);
323
+ const cleanable = snapshot.docs.slice(0, stopper);
324
+ const cleaning = cleanable.slice(0, cleanable.length - TRAIL);
325
+ if (cleaning.length === 0) return;
326
+ const batch = this.collection.firestore.batch();
327
+ for (const queued of cleaning) batch.delete(queued.ref);
328
+ await batch.commit();
329
+ }
330
+ async flush(id) {
331
+ const stream = this.queue(id).stream();
332
+ const writer = this.collection.firestore.bulkWriter();
333
+ for await (const queued of stream) writer.delete(queued.ref);
334
+ await writer.close();
335
+ }
336
+ /**
337
+ * This method adds a fake processed event to the queue.
338
+ * It is useful for initializing the tail cursor of a new projection, at the
339
+ * same time as the projection's initial state is created, reset, or updated.
340
+ * By default, it will use the current time as the occurredAt timestamp.
341
+ * You can override this by providing a specific timestamp.
342
+ *
343
+ * This ensures that the projection can start processing new events from the
344
+ * correct point in time, avoiding reprocessing of old events.
345
+ */
346
+ async seed(checkpointId) {
347
+ const cursor = new _ddd_ts_core.Cursor({
348
+ ref: "seed",
349
+ occurredAt: _ddd_ts_shape.MicrosecondTimestamp.now(),
350
+ revision: 0,
351
+ eventId: _ddd_ts_core.EventId.generate()
352
+ });
353
+ const task = new Task({
354
+ id: _ddd_ts_core.EventId.generate(),
355
+ ref: "Seed",
356
+ occurredAt: cursor.occurredAt,
357
+ revision: cursor.revision,
358
+ attempts: 0,
359
+ processed: true,
360
+ claimer: void 0,
361
+ claimedAt: void 0,
362
+ lock: new _ddd_ts_core.Lock({}),
363
+ remaining: 1,
364
+ claimTimeout: 0,
365
+ skipAfter: 0,
366
+ isolateAfter: 0,
367
+ lastUpdateTime: void 0
368
+ });
369
+ try {
370
+ const serialized = task.serialize();
371
+ const converted = this.converter.toFirestore(serialized);
372
+ await this.queued(checkpointId, task.cursor.eventId).create(converted);
373
+ } catch (e) {
374
+ if (!(e instanceof AlreadyEnqueuedError)) throw e;
375
+ }
376
+ }
377
+ };
378
+ var ClaimerId = class extends _ddd_ts_core.EventId {};
379
+ var Task = class Task extends (0, _ddd_ts_shape.Shape)({
380
+ id: _ddd_ts_core.EventId,
381
+ ref: String,
382
+ occurredAt: _ddd_ts_shape.MicrosecondTimestamp,
383
+ revision: Number,
384
+ attempts: Number,
385
+ processed: Boolean,
386
+ claimer: (0, _ddd_ts_shape.Optional)(String),
387
+ claimedAt: (0, _ddd_ts_shape.Optional)(_ddd_ts_shape.MicrosecondTimestamp),
388
+ lock: _ddd_ts_core.Lock,
389
+ skipAfter: Number,
390
+ remaining: Number,
391
+ isolateAfter: Number,
392
+ claimTimeout: Number,
393
+ lastUpdateTime: (0, _ddd_ts_shape.Optional)(_ddd_ts_shape.MicrosecondTimestamp)
394
+ }) {
395
+ get cursor() {
396
+ return new _ddd_ts_core.Cursor({
397
+ ref: this.ref,
398
+ occurredAt: this.occurredAt,
399
+ revision: this.revision,
400
+ eventId: this.id
401
+ });
402
+ }
403
+ static new(fact, config) {
404
+ return new Task({
405
+ id: fact.id,
406
+ attempts: 0,
407
+ claimer: void 0,
408
+ processed: false,
409
+ claimedAt: void 0,
410
+ lock: config.lock,
411
+ claimTimeout: config.claimTimeout,
412
+ skipAfter: config.skipAfter,
413
+ isolateAfter: config.isolateAfter,
414
+ remaining: config.skipAfter,
415
+ ref: fact.ref,
416
+ revision: fact.revision,
417
+ occurredAt: fact.occurredAt,
418
+ lastUpdateTime: void 0
419
+ });
420
+ }
421
+ get isProcessing() {
422
+ return !!this.claimer;
423
+ }
424
+ get isProcessed() {
425
+ return !!this.processed;
426
+ }
427
+ get shouldSkip() {
428
+ return this.attempts > this.skipAfter;
429
+ }
430
+ get shouldIsolate() {
431
+ return this.attempts > this.isolateAfter;
432
+ }
433
+ checkTimeout() {
434
+ if (!this.claimedAt) return;
435
+ if (_ddd_ts_shape.MicrosecondTimestamp.now().micros - this.claimedAt.micros > BigInt(this.claimTimeout) * 1000n) {
436
+ this.claimedAt = void 0;
437
+ this.claimer = void 0;
438
+ this.attempts += 1;
439
+ this.remaining -= 1;
440
+ }
441
+ }
442
+ static deserializeWithLastUpdateTime(data, timestamp) {
443
+ return Task.deserialize({
444
+ ...data,
445
+ lastUpdateTime: timestamp
446
+ });
447
+ }
448
+ static batch(tasks) {
449
+ const locks = [];
450
+ const batchLocks = [];
451
+ const batch = [];
452
+ for (const task of tasks) {
453
+ if (task.shouldSkip) continue;
454
+ if (task.shouldIsolate) {
455
+ if (batch.length > 0) return batch;
456
+ batch.push(task);
457
+ return batch;
458
+ }
459
+ if (locks.some((l) => l.restrains(task.lock))) {
460
+ locks.push(task.lock);
461
+ continue;
462
+ }
463
+ if (batchLocks.some((l) => l.restrains(task.lock, false))) {
464
+ locks.push(task.lock);
465
+ continue;
466
+ }
467
+ if (task.isProcessed) continue;
468
+ if (task.isProcessing) {
469
+ locks.push(task.lock);
470
+ continue;
471
+ }
472
+ batch.push(task);
473
+ batchLocks.push(task.lock);
474
+ }
475
+ return batch;
476
+ }
477
+ };
478
+
479
+ //#endregion
221
480
  exports.AlreadyEnqueuedError = AlreadyEnqueuedError;
222
- class FirestoreQueueStore {
223
- db;
224
- converter = new store_firestore_1.DefaultConverter();
225
- collection;
226
- constructor(db) {
227
- this.db = db;
228
- this.collection = db.collection("checkpoints");
229
- }
230
- timestampToMicroseconds(timestamp) {
231
- const microseconds = BigInt(timestamp.seconds) * BigInt(1_000_000) +
232
- BigInt(timestamp.nanoseconds) / BigInt(1_000);
233
- return new shape_1.MicrosecondTimestamp(microseconds);
234
- }
235
- microsecondsToTimestamp(microseconds) {
236
- const seconds = BigInt(microseconds.micros) / 1000000n;
237
- const nanoseconds = (BigInt(microseconds.micros) % 1000000n) * 1000n; // Convert to nanoseconds
238
- return new firestore_1.Timestamp(Number(seconds), Number(nanoseconds));
239
- }
240
- async enqueue(checkpointId, tasks) {
241
- // console.log(
242
- // `Enqueuing ${tasks.length} tasks for checkpoint ${checkpointId.serialize()}`,
243
- // );
244
- const batch = this.collection.firestore.batch();
245
- for (const task of tasks) {
246
- const ref = this.queued(checkpointId, task.id);
247
- batch.create(ref, {
248
- ref: this.db.doc(task.cursor.ref),
249
- ...this.converter.toFirestore(task.serialize()),
250
- });
251
- }
252
- try {
253
- await batch.commit();
254
- return [Status.SUCCESS, "Tasks enqueued successfully"];
255
- }
256
- catch (err) {
257
- if (err.code === 6) {
258
- return [Status.DEFERRED, new AlreadyEnqueuedError()];
259
- }
260
- return [Status.DEFERRED, err];
261
- }
262
- }
263
- async claim(checkpointId, claimer, tasks) {
264
- const batch = this.collection.firestore.batch();
265
- for (const task of tasks) {
266
- const ref = this.queued(checkpointId, task.id);
267
- batch.update(ref, {
268
- claimer: claimer.serialize(),
269
- claimedAt: firestore_1.FieldValue.serverTimestamp(),
270
- attempts: firestore_1.FieldValue.increment(1),
271
- remaining: firestore_1.FieldValue.increment(-1),
272
- }, { lastUpdateTime: this.microsecondsToTimestamp(task.lastUpdateTime) });
273
- }
274
- await batch.commit();
275
- }
276
- async head(checkpointId) {
277
- const head = this.queue(checkpointId)
278
- .orderBy("occurredAt", "desc")
279
- .orderBy("revision", "desc")
280
- .limit(1);
281
- const headDoc = (await head.get()).docs[0];
282
- if (!headDoc) {
283
- return undefined;
284
- }
285
- const headData = this.converter.fromFirestoreSnapshot(headDoc);
286
- if (!headData) {
287
- return undefined;
288
- }
289
- const headCursor = headDoc
290
- ? core_1.Cursor.deserialize({
291
- ref: headData.ref,
292
- occurredAt: headData.occurredAt,
293
- revision: headData.revision,
294
- eventId: headData.id,
295
- })
296
- : undefined;
297
- return headCursor;
298
- }
299
- async unprocessed(checkpointId) {
300
- const query = this.queue(checkpointId)
301
- .where("processed", "==", false)
302
- .where("remaining", ">", 0)
303
- .orderBy("occurredAt", "asc")
304
- .orderBy("revision", "asc")
305
- .limit(100);
306
- const snapshot = await query.get();
307
- const tasks = snapshot.docs.map((doc) => {
308
- const data = this.converter.fromFirestoreSnapshot(doc);
309
- const timestamp = doc.updateTime
310
- ? this.timestampToMicroseconds(doc.updateTime)
311
- : undefined;
312
- return Task.deserializeWithLastUpdateTime(data, timestamp);
313
- });
314
- // Check for timeouts and unclaim expired tasks
315
- const expiredTasks = [];
316
- for (const task of tasks) {
317
- const originalClaimer = task.claimer;
318
- task.checkTimeout();
319
- // If timeout cleared the claimer, we need to update the database
320
- if (originalClaimer && !task.claimer) {
321
- expiredTasks.push(task);
322
- }
323
- }
324
- // Update expired tasks in the database
325
- if (expiredTasks.length > 0) {
326
- const batch = this.collection.firestore.batch();
327
- for (const task of expiredTasks) {
328
- const ref = this.queued(checkpointId, task.id);
329
- batch.update(ref, {
330
- claimer: firestore_1.FieldValue.delete(),
331
- claimedAt: firestore_1.FieldValue.delete(),
332
- attempts: task.attempts,
333
- remaining: task.remaining,
334
- });
335
- }
336
- await batch.commit();
337
- }
338
- return tasks;
339
- }
340
- async claimed(checkpointId, claimer) {
341
- const query = this.queue(checkpointId)
342
- .where("claimer", "==", claimer.serialize())
343
- .orderBy("occurredAt", "asc")
344
- .orderBy("revision", "asc");
345
- const snapshot = await query.get();
346
- return snapshot.docs.map((doc) => {
347
- const data = this.converter.fromFirestoreSnapshot(doc);
348
- const timestamp = doc.updateTime
349
- ? this.timestampToMicroseconds(doc.updateTime)
350
- : undefined;
351
- return Task.deserializeWithLastUpdateTime(data, timestamp);
352
- });
353
- }
354
- async unclaim(checkpointId, tasks) {
355
- const batch = this.collection.firestore.batch();
356
- for (const task of tasks) {
357
- const ref = this.queued(checkpointId, task.id);
358
- batch.update(ref, {
359
- claimer: firestore_1.FieldValue.delete(),
360
- claimedAt: firestore_1.FieldValue.delete(),
361
- });
362
- }
363
- await batch.commit();
364
- }
365
- /**
366
- * If the task exists, then looks for a processed flag.
367
- * If not found, check if the cursor is older than the retention time for processed event.
368
- * If so, consider it processed.
369
- * Otherwise, consider it missing.
370
- */
371
- async isProcessed(checkpointId, cursor) {
372
- const doc = await this.queued(checkpointId, cursor.eventId).get();
373
- if (doc.exists) {
374
- const data = doc.data();
375
- if (!data)
376
- throw new Error("No data in queued document");
377
- if (data.processed === true)
378
- return TaskState.PROCESSED;
379
- return TaskState.ENQUEUED;
380
- }
381
- const lastRetention = shape_1.MicrosecondTimestamp.now().sub(RETENTION);
382
- if (cursor.isOlderThan(lastRetention)) {
383
- return TaskState.PROCESSED;
384
- }
385
- return TaskState.MISSING;
386
- }
387
- checkpoint(id) {
388
- return this.collection.doc(id.name).collection("shards");
389
- }
390
- queue(id) {
391
- return this.checkpoint(id).doc(id.shard()).collection("queue");
392
- }
393
- queued(id, eventId) {
394
- return this.queue(id).doc(eventId.serialize());
395
- }
396
- async processed(id, eventIds, context = {}) {
397
- const { transaction: trx, batchWriter } = context;
398
- if (trx) {
399
- for (const eventId of eventIds) {
400
- const ref = this.queued(id, eventId);
401
- trx.transaction.update(ref, { processed: true });
402
- }
403
- return;
404
- }
405
- await Promise.all(eventIds.map((eventId) => this.queued(id, eventId).update({
406
- processed: true,
407
- })));
408
- return;
409
- }
410
- async getTailCursor(id) {
411
- const tail = this.queue(id)
412
- .where("remaining", ">", 0)
413
- .orderBy("occurredAt", "asc")
414
- .orderBy("revision", "asc")
415
- .limit(1);
416
- const tailDoc = (await tail.get()).docs[0];
417
- if (!tailDoc) {
418
- return undefined;
419
- }
420
- const tailData = this.converter.fromFirestoreSnapshot(tailDoc);
421
- if (!tailData) {
422
- return undefined;
423
- }
424
- const tailCursor = tailDoc
425
- ? core_1.Cursor.deserialize({
426
- ref: tailData.ref,
427
- occurredAt: tailData.occurredAt,
428
- revision: tailData.revision,
429
- eventId: tailData.id,
430
- })
431
- : undefined;
432
- return tailCursor;
433
- }
434
- async cleanup(id) {
435
- const aMonthAgo = shape_1.MicrosecondTimestamp.now().sub(shape_1.MicrosecondTimestamp.WEEK.mult(4));
436
- const query = this.queue(id)
437
- .where("remaining", ">", 0)
438
- .where("occurredAt", "<", aMonthAgo.serialize()) // Only consider events older than 4 weeks
439
- .orderBy("occurredAt", "asc")
440
- .orderBy("revision", "asc");
441
- const MIN_TRAIL = 1; // Keep at least one processed document to maintain the tail cursor
442
- const TRAIL = MIN_TRAIL + 0; // Extra buffer to optimize isProcessed checks
443
- const snapshot = await query.get();
444
- if (snapshot.size < TRAIL)
445
- return;
446
- const stopper = snapshot.docs.findIndex((doc) => !doc.data().processed);
447
- const cleanable = snapshot.docs.slice(0, stopper);
448
- const cleaning = cleanable.slice(0, cleanable.length - TRAIL);
449
- if (cleaning.length === 0)
450
- return;
451
- const batch = this.collection.firestore.batch();
452
- for (const queued of cleaning)
453
- batch.delete(queued.ref);
454
- await batch.commit();
455
- }
456
- async flush(id) {
457
- const stream = this.queue(id).stream();
458
- const writer = this.collection.firestore.bulkWriter();
459
- for await (const queued of stream)
460
- writer.delete(queued.ref);
461
- await writer.close();
462
- }
463
- /**
464
- * This method adds a fake processed event to the queue.
465
- * It is useful for initializing the tail cursor of a new projection, at the
466
- * same time as the projection's initial state is created, reset, or updated.
467
- * By default, it will use the current time as the occurredAt timestamp.
468
- * You can override this by providing a specific timestamp.
469
- *
470
- * This ensures that the projection can start processing new events from the
471
- * correct point in time, avoiding reprocessing of old events.
472
- */
473
- async seed(checkpointId) {
474
- const cursor = new core_1.Cursor({
475
- ref: "seed",
476
- occurredAt: shape_1.MicrosecondTimestamp.now(),
477
- revision: 0,
478
- eventId: core_1.EventId.generate(),
479
- });
480
- const task = new Task({
481
- id: core_1.EventId.generate(),
482
- ref: "Seed",
483
- occurredAt: cursor.occurredAt,
484
- revision: cursor.revision,
485
- attempts: 0,
486
- processed: true,
487
- claimer: undefined,
488
- claimedAt: undefined,
489
- lock: new core_1.Lock({}),
490
- remaining: 1,
491
- claimTimeout: 0,
492
- skipAfter: 0,
493
- isolateAfter: 0,
494
- lastUpdateTime: undefined,
495
- });
496
- try {
497
- const serialized = task.serialize();
498
- const converted = this.converter.toFirestore(serialized);
499
- await this.queued(checkpointId, task.cursor.eventId).create(converted);
500
- }
501
- catch (e) {
502
- // Ignore if already exists
503
- if (!(e instanceof AlreadyEnqueuedError)) {
504
- throw e;
505
- }
506
- }
507
- }
508
- }
509
- exports.FirestoreQueueStore = FirestoreQueueStore;
510
- class ClaimerId extends core_1.EventId {
511
- }
512
481
  exports.ClaimerId = ClaimerId;
513
- class Task extends (0, shape_1.Shape)({
514
- id: core_1.EventId,
515
- ref: String,
516
- occurredAt: shape_1.MicrosecondTimestamp,
517
- revision: Number,
518
- attempts: Number,
519
- processed: Boolean,
520
- claimer: (0, shape_1.Optional)(String),
521
- claimedAt: (0, shape_1.Optional)(shape_1.MicrosecondTimestamp),
522
- lock: core_1.Lock,
523
- skipAfter: Number,
524
- remaining: Number,
525
- isolateAfter: Number,
526
- claimTimeout: Number,
527
- lastUpdateTime: (0, shape_1.Optional)(shape_1.MicrosecondTimestamp),
528
- }) {
529
- get cursor() {
530
- return new core_1.Cursor({
531
- ref: this.ref,
532
- occurredAt: this.occurredAt,
533
- revision: this.revision,
534
- eventId: this.id,
535
- });
536
- }
537
- static new(fact, config) {
538
- return new Task({
539
- id: fact.id,
540
- attempts: 0,
541
- claimer: undefined,
542
- processed: false,
543
- claimedAt: undefined,
544
- lock: config.lock,
545
- claimTimeout: config.claimTimeout,
546
- skipAfter: config.skipAfter,
547
- isolateAfter: config.isolateAfter,
548
- remaining: config.skipAfter,
549
- ref: fact.ref,
550
- revision: fact.revision,
551
- occurredAt: fact.occurredAt,
552
- lastUpdateTime: undefined,
553
- });
554
- }
555
- get isProcessing() {
556
- return !!this.claimer;
557
- }
558
- get isProcessed() {
559
- return !!this.processed;
560
- }
561
- get shouldSkip() {
562
- return this.attempts > this.skipAfter;
563
- }
564
- get shouldIsolate() {
565
- return this.attempts > this.isolateAfter;
566
- }
567
- checkTimeout() {
568
- if (!this.claimedAt)
569
- return;
570
- const now = shape_1.MicrosecondTimestamp.now();
571
- const elapsedMicros = now.micros - this.claimedAt.micros;
572
- const timeoutMicros = BigInt(this.claimTimeout) * 1000n; // Convert ms to microseconds
573
- if (elapsedMicros > timeoutMicros) {
574
- this.claimedAt = undefined;
575
- this.claimer = undefined;
576
- this.attempts += 1;
577
- this.remaining -= 1;
578
- }
579
- }
580
- static deserializeWithLastUpdateTime(data, timestamp) {
581
- const task = Task.deserialize({
582
- ...data,
583
- lastUpdateTime: timestamp,
584
- });
585
- return task;
586
- }
587
- static batch(tasks) {
588
- // console.log(
589
- // JSON.stringify(
590
- // tasks.map((t) => t.serialize()),
591
- // null,
592
- // 2,
593
- // ),
594
- // );
595
- const locks = [];
596
- const batchLocks = [];
597
- const batch = [];
598
- for (const task of tasks) {
599
- if (task.shouldSkip) {
600
- // console.log(
601
- // `Skipping task ${task.id.serialize()} due to skipAfter limit`,
602
- // );
603
- continue;
604
- }
605
- if (task.shouldIsolate) {
606
- if (batch.length > 0) {
607
- return batch;
608
- }
609
- // console.log(
610
- // `Isolating task ${task.id.serialize()} due to isolateAfter limit`,
611
- // );
612
- batch.push(task);
613
- return batch;
614
- }
615
- if (locks.some((l) => l.restrains(task.lock))) {
616
- locks.push(task.lock);
617
- continue;
618
- }
619
- if (batchLocks.some((l) => l.restrains(task.lock, false))) {
620
- locks.push(task.lock);
621
- continue;
622
- }
623
- if (task.isProcessed) {
624
- continue;
625
- }
626
- if (task.isProcessing) {
627
- locks.push(task.lock);
628
- continue;
629
- }
630
- batch.push(task);
631
- batchLocks.push(task.lock);
632
- }
633
- return batch;
634
- }
635
- }
636
- exports.Task = Task;
637
- //# sourceMappingURL=firestore.projector.js.map
482
+ exports.FirestoreProjector = FirestoreProjector;
483
+ exports.FirestoreQueueStore = FirestoreQueueStore;
484
+ exports.Task = Task;