@event-driven-io/emmett-postgresql 0.20.2-alpha2 → 0.20.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +1201 -62
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +1201 -62
- package/dist/index.js.map +1 -1
- package/package.json +3 -3
- package/dist/cli.cjs +0 -2
- package/dist/cli.cjs.map +0 -1
- package/dist/cli.d.cts +0 -5
- package/dist/cli.d.ts +0 -5
- package/dist/cli.js +0 -2
- package/dist/cli.js.map +0 -1
package/dist/index.cjs
CHANGED
|
@@ -1,9 +1,813 @@
|
|
|
1
|
-
"use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } } function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; } var _class; var _class2; var _class3
|
|
2
|
-
|
|
1
|
+
"use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } } function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; } var _class; var _class2; var _class3;// src/eventStore/postgreSQLEventStore.ts
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
var _dumbo = require('@event-driven-io/dumbo');
|
|
5
|
+
|
|
6
|
+
// ../emmett/dist/chunk-AEEEXE2R.js
|
|
7
|
+
var isNumber = (val) => typeof val === "number" && val === val;
|
|
8
|
+
var isString = (val) => typeof val === "string";
|
|
9
|
+
var isErrorConstructor = (expect) => {
|
|
10
|
+
return typeof expect === "function" && expect.prototype && // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
|
11
|
+
expect.prototype.constructor === expect;
|
|
12
|
+
};
|
|
13
|
+
var EmmettError = class _EmmettError extends Error {
|
|
14
|
+
|
|
15
|
+
constructor(options) {
|
|
16
|
+
const errorCode = options && typeof options === "object" && "errorCode" in options ? options.errorCode : isNumber(options) ? options : 500;
|
|
17
|
+
const message = options && typeof options === "object" && "message" in options ? options.message : isString(options) ? options : `Error with status code '${errorCode}' ocurred during Emmett processing`;
|
|
18
|
+
super(message);
|
|
19
|
+
this.errorCode = errorCode;
|
|
20
|
+
Object.setPrototypeOf(this, _EmmettError.prototype);
|
|
21
|
+
}
|
|
22
|
+
};
|
|
23
|
+
var ConcurrencyError = class _ConcurrencyError extends EmmettError {
|
|
24
|
+
constructor(current, expected, message) {
|
|
25
|
+
super({
|
|
26
|
+
errorCode: 412,
|
|
27
|
+
message: _nullishCoalesce(message, () => ( `Expected version ${expected.toString()} does not match current ${_optionalChain([current, 'optionalAccess', _2 => _2.toString, 'call', _3 => _3()])}`))
|
|
28
|
+
});
|
|
29
|
+
this.current = current;
|
|
30
|
+
this.expected = expected;
|
|
31
|
+
Object.setPrototypeOf(this, _ConcurrencyError.prototype);
|
|
32
|
+
}
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
// ../emmett/dist/index.js
|
|
36
|
+
var _uuid = require('uuid');
|
|
37
|
+
var _webstreamspolyfill = require('web-streams-polyfill');
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
var _asyncretry = require('async-retry'); var _asyncretry2 = _interopRequireDefault(_asyncretry);
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
var event = (type, data, metadata) => {
|
|
56
|
+
return {
|
|
57
|
+
type,
|
|
58
|
+
data,
|
|
59
|
+
metadata
|
|
60
|
+
};
|
|
61
|
+
};
|
|
62
|
+
var STREAM_EXISTS = "STREAM_EXISTS";
|
|
63
|
+
var STREAM_DOES_NOT_EXIST = "STREAM_DOES_NOT_EXIST";
|
|
64
|
+
var NO_CONCURRENCY_CHECK = "NO_CONCURRENCY_CHECK";
|
|
65
|
+
var matchesExpectedVersion = (current, expected, defaultVersion) => {
|
|
66
|
+
if (expected === NO_CONCURRENCY_CHECK) return true;
|
|
67
|
+
if (expected == STREAM_DOES_NOT_EXIST) return current === defaultVersion;
|
|
68
|
+
if (expected == STREAM_EXISTS) return current !== defaultVersion;
|
|
69
|
+
return current === expected;
|
|
70
|
+
};
|
|
71
|
+
var assertExpectedVersionMatchesCurrent = (current, expected, defaultVersion) => {
|
|
72
|
+
expected ??= NO_CONCURRENCY_CHECK;
|
|
73
|
+
if (!matchesExpectedVersion(current, expected, defaultVersion))
|
|
74
|
+
throw new ExpectedVersionConflictError(current, expected);
|
|
75
|
+
};
|
|
76
|
+
var ExpectedVersionConflictError = class _ExpectedVersionConflictError extends ConcurrencyError {
|
|
77
|
+
constructor(current, expected) {
|
|
78
|
+
super(_optionalChain([current, 'optionalAccess', _4 => _4.toString, 'call', _5 => _5()]), _optionalChain([expected, 'optionalAccess', _6 => _6.toString, 'call', _7 => _7()]));
|
|
79
|
+
Object.setPrototypeOf(this, _ExpectedVersionConflictError.prototype);
|
|
80
|
+
}
|
|
81
|
+
};
|
|
82
|
+
var notifyAboutNoActiveReadersStream = (onNoActiveReaderCallback, options = {}) => new NotifyAboutNoActiveReadersStream(onNoActiveReaderCallback, options);
|
|
83
|
+
var NotifyAboutNoActiveReadersStream = (_class = class extends _webstreamspolyfill.TransformStream {
|
|
84
|
+
constructor(onNoActiveReaderCallback, options = {}) {
|
|
85
|
+
super({
|
|
86
|
+
cancel: (reason) => {
|
|
87
|
+
console.log("Stream was canceled. Reason:", reason);
|
|
88
|
+
this.stopChecking();
|
|
89
|
+
}
|
|
90
|
+
});_class.prototype.__init.call(this);_class.prototype.__init2.call(this);;
|
|
91
|
+
this.onNoActiveReaderCallback = onNoActiveReaderCallback;
|
|
92
|
+
this.streamId = _nullishCoalesce(_optionalChain([options, 'optionalAccess', _8 => _8.streamId]), () => ( _uuid.v4.call(void 0, )));
|
|
93
|
+
this.onNoActiveReaderCallback = onNoActiveReaderCallback;
|
|
94
|
+
this.startChecking(_nullishCoalesce(_optionalChain([options, 'optionalAccess', _9 => _9.intervalCheckInMs]), () => ( 20)));
|
|
95
|
+
}
|
|
96
|
+
__init() {this.checkInterval = null}
|
|
97
|
+
|
|
98
|
+
__init2() {this._isStopped = false}
|
|
99
|
+
get hasActiveSubscribers() {
|
|
100
|
+
return !this._isStopped;
|
|
101
|
+
}
|
|
102
|
+
startChecking(interval) {
|
|
103
|
+
this.checkInterval = setInterval(() => {
|
|
104
|
+
this.checkNoActiveReader();
|
|
105
|
+
}, interval);
|
|
106
|
+
}
|
|
107
|
+
stopChecking() {
|
|
108
|
+
if (!this.checkInterval) return;
|
|
109
|
+
clearInterval(this.checkInterval);
|
|
110
|
+
this.checkInterval = null;
|
|
111
|
+
this._isStopped = true;
|
|
112
|
+
this.onNoActiveReaderCallback(this);
|
|
113
|
+
}
|
|
114
|
+
checkNoActiveReader() {
|
|
115
|
+
if (!this.readable.locked && !this._isStopped) {
|
|
116
|
+
this.stopChecking();
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}, _class);
|
|
120
|
+
var deepEquals = (left, right) => {
|
|
121
|
+
if (isEquatable(left)) {
|
|
122
|
+
return left.equals(right);
|
|
123
|
+
}
|
|
124
|
+
if (Array.isArray(left)) {
|
|
125
|
+
return Array.isArray(right) && left.length === right.length && left.every((val, index) => deepEquals(val, right[index]));
|
|
126
|
+
}
|
|
127
|
+
if (typeof left !== "object" || typeof right !== "object" || left === null || right === null) {
|
|
128
|
+
return left === right;
|
|
129
|
+
}
|
|
130
|
+
if (Array.isArray(right)) return false;
|
|
131
|
+
const keys1 = Object.keys(left);
|
|
132
|
+
const keys2 = Object.keys(right);
|
|
133
|
+
if (keys1.length !== keys2.length || !keys1.every((key) => keys2.includes(key)))
|
|
134
|
+
return false;
|
|
135
|
+
for (const key in left) {
|
|
136
|
+
if (left[key] instanceof Function && right[key] instanceof Function)
|
|
137
|
+
continue;
|
|
138
|
+
const isEqual = deepEquals(left[key], right[key]);
|
|
139
|
+
if (!isEqual) {
|
|
140
|
+
return false;
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
return true;
|
|
144
|
+
};
|
|
145
|
+
var isEquatable = (left) => {
|
|
146
|
+
return left && typeof left === "object" && "equals" in left && typeof left["equals"] === "function";
|
|
147
|
+
};
|
|
148
|
+
var asyncRetry = async (fn, opts) => {
|
|
149
|
+
if (opts === void 0 || opts.retries === 0) return fn();
|
|
150
|
+
return _asyncretry2.default.call(void 0,
|
|
151
|
+
async (bail) => {
|
|
152
|
+
try {
|
|
153
|
+
return await fn();
|
|
154
|
+
} catch (error2) {
|
|
155
|
+
if (_optionalChain([opts, 'optionalAccess', _10 => _10.shouldRetryError]) && !opts.shouldRetryError(error2)) {
|
|
156
|
+
bail(error2);
|
|
157
|
+
}
|
|
158
|
+
throw error2;
|
|
159
|
+
}
|
|
160
|
+
},
|
|
161
|
+
_nullishCoalesce(opts, () => ( { retries: 0 }))
|
|
162
|
+
);
|
|
163
|
+
};
|
|
164
|
+
var projection = (definition) => definition;
|
|
165
|
+
var ParseError = class extends Error {
|
|
166
|
+
constructor(text) {
|
|
167
|
+
super(`Cannot parse! ${text}`);
|
|
168
|
+
}
|
|
169
|
+
};
|
|
170
|
+
var JSONParser = {
|
|
171
|
+
stringify: (value, options) => {
|
|
172
|
+
return JSON.stringify(
|
|
173
|
+
_optionalChain([options, 'optionalAccess', _11 => _11.map]) ? options.map(value) : value,
|
|
174
|
+
//TODO: Consider adding support to DateTime and adding specific format to mark that's a bigint
|
|
175
|
+
// eslint-disable-next-line @typescript-eslint/no-unsafe-return
|
|
176
|
+
(_, v) => typeof v === "bigint" ? v.toString() : v
|
|
177
|
+
);
|
|
178
|
+
},
|
|
179
|
+
parse: (text, options) => {
|
|
180
|
+
const parsed = JSON.parse(text, _optionalChain([options, 'optionalAccess', _12 => _12.reviver]));
|
|
181
|
+
if (_optionalChain([options, 'optionalAccess', _13 => _13.typeCheck]) && !_optionalChain([options, 'optionalAccess', _14 => _14.typeCheck, 'call', _15 => _15(parsed)]))
|
|
182
|
+
throw new ParseError(text);
|
|
183
|
+
return _optionalChain([options, 'optionalAccess', _16 => _16.map]) ? options.map(parsed) : parsed;
|
|
184
|
+
}
|
|
185
|
+
};
|
|
186
|
+
var filter = (filter2) => new (0, _webstreamspolyfill.TransformStream)({
|
|
187
|
+
transform(chunk, controller) {
|
|
188
|
+
if (filter2(chunk)) {
|
|
189
|
+
controller.enqueue(chunk);
|
|
190
|
+
}
|
|
191
|
+
}
|
|
192
|
+
});
|
|
193
|
+
var map = (map2) => new (0, _webstreamspolyfill.TransformStream)({
|
|
194
|
+
transform(chunk, controller) {
|
|
195
|
+
controller.enqueue(map2(chunk));
|
|
196
|
+
}
|
|
197
|
+
});
|
|
198
|
+
var reduce = (reducer, initialValue) => new ReduceTransformStream(reducer, initialValue);
|
|
199
|
+
var ReduceTransformStream = class extends _webstreamspolyfill.TransformStream {
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
constructor(reducer, initialValue) {
|
|
203
|
+
super({
|
|
204
|
+
transform: (chunk) => {
|
|
205
|
+
this.accumulator = this.reducer(this.accumulator, chunk);
|
|
206
|
+
},
|
|
207
|
+
flush: (controller) => {
|
|
208
|
+
controller.enqueue(this.accumulator);
|
|
209
|
+
controller.terminate();
|
|
210
|
+
}
|
|
211
|
+
});
|
|
212
|
+
this.accumulator = initialValue;
|
|
213
|
+
this.reducer = reducer;
|
|
214
|
+
}
|
|
215
|
+
};
|
|
216
|
+
var retryStream = (createSourceStream, handleChunk2, retryOptions = { forever: true, minTimeout: 25 }) => new (0, _webstreamspolyfill.TransformStream)({
|
|
217
|
+
start(controller) {
|
|
218
|
+
asyncRetry(
|
|
219
|
+
() => onRestream(createSourceStream, handleChunk2, controller),
|
|
220
|
+
retryOptions
|
|
221
|
+
).catch((error2) => {
|
|
222
|
+
controller.error(error2);
|
|
223
|
+
});
|
|
224
|
+
}
|
|
225
|
+
});
|
|
226
|
+
var onRestream = async (createSourceStream, handleChunk2, controller) => {
|
|
227
|
+
const sourceStream = createSourceStream();
|
|
228
|
+
const reader = sourceStream.getReader();
|
|
229
|
+
try {
|
|
230
|
+
let done;
|
|
231
|
+
do {
|
|
232
|
+
const result = await reader.read();
|
|
233
|
+
done = result.done;
|
|
234
|
+
await handleChunk2(result, controller);
|
|
235
|
+
if (done) {
|
|
236
|
+
controller.terminate();
|
|
237
|
+
}
|
|
238
|
+
} while (!done);
|
|
239
|
+
} finally {
|
|
240
|
+
reader.releaseLock();
|
|
241
|
+
}
|
|
242
|
+
};
|
|
243
|
+
var skip = (limit) => new SkipTransformStream(limit);
|
|
244
|
+
var SkipTransformStream = (_class2 = class extends _webstreamspolyfill.TransformStream {
|
|
245
|
+
__init3() {this.count = 0}
|
|
246
|
+
|
|
247
|
+
constructor(skip2) {
|
|
248
|
+
super({
|
|
249
|
+
transform: (chunk, controller) => {
|
|
250
|
+
this.count++;
|
|
251
|
+
if (this.count > this.skip) {
|
|
252
|
+
controller.enqueue(chunk);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
});_class2.prototype.__init3.call(this);;
|
|
256
|
+
this.skip = skip2;
|
|
257
|
+
}
|
|
258
|
+
}, _class2);
|
|
259
|
+
var stopAfter = (stopCondition) => new (0, _webstreamspolyfill.TransformStream)({
|
|
260
|
+
transform(chunk, controller) {
|
|
261
|
+
controller.enqueue(chunk);
|
|
262
|
+
if (stopCondition(chunk)) {
|
|
263
|
+
controller.terminate();
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
});
|
|
267
|
+
var stopOn = (stopCondition) => new (0, _webstreamspolyfill.TransformStream)({
|
|
268
|
+
async transform(chunk, controller) {
|
|
269
|
+
if (!stopCondition(chunk)) {
|
|
270
|
+
controller.enqueue(chunk);
|
|
271
|
+
return;
|
|
272
|
+
}
|
|
273
|
+
await Promise.resolve();
|
|
274
|
+
controller.terminate();
|
|
275
|
+
}
|
|
276
|
+
});
|
|
277
|
+
var take = (limit) => new TakeTransformStream(limit);
|
|
278
|
+
var TakeTransformStream = (_class3 = class extends _webstreamspolyfill.TransformStream {
|
|
279
|
+
__init4() {this.count = 0}
|
|
280
|
+
|
|
281
|
+
constructor(limit) {
|
|
282
|
+
super({
|
|
283
|
+
transform: (chunk, controller) => {
|
|
284
|
+
if (this.count < this.limit) {
|
|
285
|
+
this.count++;
|
|
286
|
+
controller.enqueue(chunk);
|
|
287
|
+
} else {
|
|
288
|
+
controller.terminate();
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
});_class3.prototype.__init4.call(this);;
|
|
292
|
+
this.limit = limit;
|
|
293
|
+
}
|
|
294
|
+
}, _class3);
|
|
295
|
+
var waitAtMost = (waitTimeInMs) => new (0, _webstreamspolyfill.TransformStream)({
|
|
296
|
+
start(controller) {
|
|
297
|
+
const timeoutId = setTimeout(() => {
|
|
298
|
+
controller.terminate();
|
|
299
|
+
}, waitTimeInMs);
|
|
300
|
+
const originalTerminate = controller.terminate.bind(controller);
|
|
301
|
+
controller.terminate = () => {
|
|
302
|
+
clearTimeout(timeoutId);
|
|
303
|
+
originalTerminate();
|
|
304
|
+
};
|
|
305
|
+
},
|
|
306
|
+
transform(chunk, controller) {
|
|
307
|
+
controller.enqueue(chunk);
|
|
308
|
+
}
|
|
309
|
+
});
|
|
310
|
+
var streamTransformations = {
|
|
311
|
+
filter,
|
|
312
|
+
take,
|
|
313
|
+
TakeTransformStream,
|
|
314
|
+
skip,
|
|
315
|
+
SkipTransformStream,
|
|
316
|
+
map,
|
|
317
|
+
notifyAboutNoActiveReadersStream,
|
|
318
|
+
NotifyAboutNoActiveReadersStream,
|
|
319
|
+
reduce,
|
|
320
|
+
ReduceTransformStream,
|
|
321
|
+
retry: retryStream,
|
|
322
|
+
stopAfter,
|
|
323
|
+
stopOn,
|
|
324
|
+
waitAtMost
|
|
325
|
+
};
|
|
326
|
+
var { retry: retry2 } = streamTransformations;
|
|
327
|
+
var AssertionError = class extends Error {
|
|
328
|
+
constructor(message) {
|
|
329
|
+
super(message);
|
|
330
|
+
}
|
|
331
|
+
};
|
|
332
|
+
var assertFails = (message) => {
|
|
333
|
+
throw new AssertionError(_nullishCoalesce(message, () => ( "That should not ever happened, right?")));
|
|
334
|
+
};
|
|
335
|
+
var assertDeepEqual = (actual, expected, message) => {
|
|
336
|
+
if (!deepEquals(actual, expected))
|
|
337
|
+
throw new AssertionError(
|
|
338
|
+
_nullishCoalesce(message, () => ( `subObj:
|
|
339
|
+
${JSONParser.stringify(expected)}
|
|
3
340
|
is not equal to
|
|
4
|
-
${
|
|
5
|
-
|
|
6
|
-
|
|
341
|
+
${JSONParser.stringify(actual)}`))
|
|
342
|
+
);
|
|
343
|
+
};
|
|
344
|
+
function assertTrue(condition, message) {
|
|
345
|
+
if (!condition) throw new AssertionError(_nullishCoalesce(message, () => ( `Condition is false`)));
|
|
346
|
+
}
|
|
347
|
+
function assertOk(obj, message) {
|
|
348
|
+
if (!obj) throw new AssertionError(_nullishCoalesce(message, () => ( `Condition is not truthy`)));
|
|
349
|
+
}
|
|
350
|
+
function assertEqual(expected, actual, message) {
|
|
351
|
+
if (expected !== actual)
|
|
352
|
+
throw new AssertionError(
|
|
353
|
+
`${_nullishCoalesce(message, () => ( "Objects are not equal"))}:
|
|
354
|
+
Expected: ${JSONParser.stringify(expected)}
|
|
355
|
+
Actual:${JSONParser.stringify(actual)}`
|
|
356
|
+
);
|
|
357
|
+
}
|
|
358
|
+
function assertNotEqual(obj, other, message) {
|
|
359
|
+
if (obj === other)
|
|
360
|
+
throw new AssertionError(
|
|
361
|
+
_nullishCoalesce(message, () => ( `Objects are equal: ${JSONParser.stringify(obj)}`))
|
|
362
|
+
);
|
|
363
|
+
}
|
|
364
|
+
function assertIsNotNull(result) {
|
|
365
|
+
assertNotEqual(result, null);
|
|
366
|
+
assertOk(result);
|
|
367
|
+
}
|
|
368
|
+
var assertThatArray = (array) => {
|
|
369
|
+
return {
|
|
370
|
+
isEmpty: () => assertEqual(array.length, 0),
|
|
371
|
+
isNotEmpty: () => assertNotEqual(array.length, 0),
|
|
372
|
+
hasSize: (length) => assertEqual(array.length, length),
|
|
373
|
+
containsElements: (...other) => {
|
|
374
|
+
assertTrue(other.every((ts) => other.some((o) => deepEquals(ts, o))));
|
|
375
|
+
},
|
|
376
|
+
containsExactlyInAnyOrder: (...other) => {
|
|
377
|
+
assertEqual(array.length, other.length);
|
|
378
|
+
assertTrue(array.every((ts) => other.some((o) => deepEquals(ts, o))));
|
|
379
|
+
},
|
|
380
|
+
containsExactlyInAnyOrderElementsOf: (other) => {
|
|
381
|
+
assertEqual(array.length, other.length);
|
|
382
|
+
assertTrue(array.every((ts) => other.some((o) => deepEquals(ts, o))));
|
|
383
|
+
},
|
|
384
|
+
containsExactlyElementsOf: (other) => {
|
|
385
|
+
assertEqual(array.length, other.length);
|
|
386
|
+
for (let i = 0; i < array.length; i++) {
|
|
387
|
+
assertTrue(deepEquals(array[i], other[i]));
|
|
388
|
+
}
|
|
389
|
+
},
|
|
390
|
+
containsExactly: (elem) => {
|
|
391
|
+
assertEqual(array.length, 1);
|
|
392
|
+
assertTrue(deepEquals(array[0], elem));
|
|
393
|
+
},
|
|
394
|
+
contains: (elem) => {
|
|
395
|
+
assertTrue(array.some((a) => deepEquals(a, elem)));
|
|
396
|
+
},
|
|
397
|
+
containsOnlyOnceElementsOf: (other) => {
|
|
398
|
+
assertTrue(
|
|
399
|
+
other.map((o) => array.filter((a) => deepEquals(a, o)).length).filter((a) => a === 1).length === other.length
|
|
400
|
+
);
|
|
401
|
+
},
|
|
402
|
+
containsAnyOf: (...other) => {
|
|
403
|
+
assertTrue(array.some((a) => other.some((o) => deepEquals(a, o))));
|
|
404
|
+
},
|
|
405
|
+
allMatch: (matches) => {
|
|
406
|
+
assertTrue(array.every(matches));
|
|
407
|
+
},
|
|
408
|
+
anyMatches: (matches) => {
|
|
409
|
+
assertTrue(array.some(matches));
|
|
410
|
+
},
|
|
411
|
+
allMatchAsync: async (matches) => {
|
|
412
|
+
for (const item of array) {
|
|
413
|
+
assertTrue(await matches(item));
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
};
|
|
417
|
+
};
|
|
418
|
+
|
|
419
|
+
// src/eventStore/postgreSQLEventStore.ts
|
|
420
|
+
require('pg');
|
|
421
|
+
|
|
422
|
+
// src/eventStore/projections/index.ts
|
|
423
|
+
|
|
424
|
+
|
|
425
|
+
// src/eventStore/projections/pongo/pongoProjectionSpec.ts
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
var _pongo = require('@event-driven-io/pongo');
|
|
430
|
+
var withCollection = (handle, options) => {
|
|
431
|
+
const { pool, connectionString, inDatabase, inCollection } = options;
|
|
432
|
+
return pool.withConnection(async (connection) => {
|
|
433
|
+
const pongo = _pongo.pongoClient.call(void 0, connectionString, {
|
|
434
|
+
connectionOptions: { connection }
|
|
435
|
+
});
|
|
436
|
+
try {
|
|
437
|
+
const collection = pongo.db(inDatabase).collection(inCollection);
|
|
438
|
+
return handle(collection);
|
|
439
|
+
} finally {
|
|
440
|
+
await pongo.close();
|
|
441
|
+
}
|
|
442
|
+
});
|
|
443
|
+
};
|
|
444
|
+
var withoutIdAndVersion = (doc) => {
|
|
445
|
+
const { _id, _version, ...without } = doc;
|
|
446
|
+
return without;
|
|
447
|
+
};
|
|
448
|
+
var assertDocumentsEqual = (actual, expected) => {
|
|
449
|
+
if ("_id" in expected)
|
|
450
|
+
assertEqual(
|
|
451
|
+
expected._id,
|
|
452
|
+
actual._id,
|
|
453
|
+
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
|
454
|
+
`Document ids are not matching! Expected: ${expected._id}, actual: ${actual._id}`
|
|
455
|
+
);
|
|
456
|
+
return assertDeepEqual(
|
|
457
|
+
withoutIdAndVersion(actual),
|
|
458
|
+
withoutIdAndVersion(expected)
|
|
459
|
+
);
|
|
460
|
+
};
|
|
461
|
+
var documentExists = (document, options) => (assertOptions) => withCollection(
|
|
462
|
+
async (collection) => {
|
|
463
|
+
const result = await collection.findOne(
|
|
464
|
+
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
465
|
+
);
|
|
466
|
+
assertIsNotNull(result);
|
|
467
|
+
assertDocumentsEqual(result, document);
|
|
468
|
+
},
|
|
469
|
+
{ ...options, ...assertOptions }
|
|
470
|
+
);
|
|
471
|
+
var documentsAreTheSame = (documents, options) => (assertOptions) => withCollection(
|
|
472
|
+
async (collection) => {
|
|
473
|
+
const result = await collection.find(
|
|
474
|
+
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
475
|
+
);
|
|
476
|
+
assertEqual(
|
|
477
|
+
documents.length,
|
|
478
|
+
result.length,
|
|
479
|
+
"Different Documents Count than expected"
|
|
480
|
+
);
|
|
481
|
+
for (let i = 0; i < documents.length; i++) {
|
|
482
|
+
assertThatArray(result).contains(documents[i]);
|
|
483
|
+
}
|
|
484
|
+
},
|
|
485
|
+
{ ...options, ...assertOptions }
|
|
486
|
+
);
|
|
487
|
+
var documentsMatchingHaveCount = (expectedCount, options) => (assertOptions) => withCollection(
|
|
488
|
+
async (collection) => {
|
|
489
|
+
const result = await collection.find(
|
|
490
|
+
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
491
|
+
);
|
|
492
|
+
assertEqual(
|
|
493
|
+
expectedCount,
|
|
494
|
+
result.length,
|
|
495
|
+
"Different Documents Count than expected"
|
|
496
|
+
);
|
|
497
|
+
},
|
|
498
|
+
{ ...options, ...assertOptions }
|
|
499
|
+
);
|
|
500
|
+
var documentMatchingExists = (options) => (assertOptions) => withCollection(
|
|
501
|
+
async (collection) => {
|
|
502
|
+
const result = await collection.find(
|
|
503
|
+
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
504
|
+
);
|
|
505
|
+
assertThatArray(result).isNotEmpty();
|
|
506
|
+
},
|
|
507
|
+
{ ...options, ...assertOptions }
|
|
508
|
+
);
|
|
509
|
+
var documentDoesNotExist = (options) => (assertOptions) => withCollection(
|
|
510
|
+
async (collection) => {
|
|
511
|
+
const result = await collection.findOne(
|
|
512
|
+
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
513
|
+
);
|
|
514
|
+
assertIsNotNull(result);
|
|
515
|
+
},
|
|
516
|
+
{ ...options, ...assertOptions }
|
|
517
|
+
);
|
|
518
|
+
var expectPongoDocuments = {
|
|
519
|
+
fromCollection: (collectionName) => {
|
|
520
|
+
return {
|
|
521
|
+
withId: (id) => {
|
|
522
|
+
return {
|
|
523
|
+
toBeEqual: (document) => documentExists(document, {
|
|
524
|
+
withId: id,
|
|
525
|
+
inCollection: collectionName
|
|
526
|
+
}),
|
|
527
|
+
toExist: () => documentMatchingExists({
|
|
528
|
+
withId: id,
|
|
529
|
+
inCollection: collectionName
|
|
530
|
+
}),
|
|
531
|
+
notToExist: () => documentDoesNotExist({
|
|
532
|
+
withId: id,
|
|
533
|
+
inCollection: collectionName
|
|
534
|
+
})
|
|
535
|
+
};
|
|
536
|
+
},
|
|
537
|
+
matching: (filter2) => {
|
|
538
|
+
return {
|
|
539
|
+
toBeTheSame: (documents) => documentsAreTheSame(documents, {
|
|
540
|
+
matchingFilter: filter2,
|
|
541
|
+
inCollection: collectionName
|
|
542
|
+
}),
|
|
543
|
+
toHaveCount: (expectedCount) => documentsMatchingHaveCount(expectedCount, {
|
|
544
|
+
matchingFilter: filter2,
|
|
545
|
+
inCollection: collectionName
|
|
546
|
+
}),
|
|
547
|
+
toExist: () => documentMatchingExists({
|
|
548
|
+
matchingFilter: filter2,
|
|
549
|
+
inCollection: collectionName
|
|
550
|
+
}),
|
|
551
|
+
notToExist: () => documentDoesNotExist({
|
|
552
|
+
matchingFilter: filter2,
|
|
553
|
+
inCollection: collectionName
|
|
554
|
+
})
|
|
555
|
+
};
|
|
556
|
+
}
|
|
557
|
+
};
|
|
558
|
+
}
|
|
559
|
+
};
|
|
560
|
+
|
|
561
|
+
// src/eventStore/projections/pongo/projections.ts
|
|
562
|
+
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
var pongoProjection = ({
|
|
566
|
+
handle,
|
|
567
|
+
canHandle
|
|
568
|
+
}) => postgreSQLProjection({
|
|
569
|
+
canHandle,
|
|
570
|
+
handle: async (events, context) => {
|
|
571
|
+
const { connectionString, client } = context;
|
|
572
|
+
const pongo = _pongo.pongoClient.call(void 0, connectionString, {
|
|
573
|
+
connectionOptions: { client }
|
|
574
|
+
});
|
|
575
|
+
await handle(events, {
|
|
576
|
+
...context,
|
|
577
|
+
pongo
|
|
578
|
+
});
|
|
579
|
+
}
|
|
580
|
+
});
|
|
581
|
+
var pongoMultiStreamProjection = (options) => {
|
|
582
|
+
const { collectionName, getDocumentId, canHandle } = options;
|
|
583
|
+
return pongoProjection({
|
|
584
|
+
handle: async (events, { pongo }) => {
|
|
585
|
+
const collection = pongo.db().collection(collectionName);
|
|
586
|
+
for (const event2 of events) {
|
|
587
|
+
await collection.handle(getDocumentId(event2), async (document) => {
|
|
588
|
+
return "initialState" in options ? await options.evolve(
|
|
589
|
+
_nullishCoalesce(document, () => ( options.initialState())),
|
|
590
|
+
event2
|
|
591
|
+
) : await options.evolve(
|
|
592
|
+
document,
|
|
593
|
+
event2
|
|
594
|
+
);
|
|
595
|
+
});
|
|
596
|
+
}
|
|
597
|
+
},
|
|
598
|
+
canHandle
|
|
599
|
+
});
|
|
600
|
+
};
|
|
601
|
+
var pongoSingleStreamProjection = (options) => {
|
|
602
|
+
return pongoMultiStreamProjection({
|
|
603
|
+
...options,
|
|
604
|
+
getDocumentId: (event2) => event2.metadata.streamName
|
|
605
|
+
});
|
|
606
|
+
};
|
|
607
|
+
|
|
608
|
+
// src/eventStore/projections/postgresProjectionSpec.ts
|
|
609
|
+
|
|
610
|
+
|
|
611
|
+
|
|
612
|
+
|
|
613
|
+
var PostgreSQLProjectionSpec = {
|
|
614
|
+
for: (options) => {
|
|
615
|
+
{
|
|
616
|
+
const { projection: projection2, ...dumoOptions } = options;
|
|
617
|
+
const { connectionString } = dumoOptions;
|
|
618
|
+
return (givenEvents) => {
|
|
619
|
+
return {
|
|
620
|
+
when: (events, options2) => {
|
|
621
|
+
const allEvents = [];
|
|
622
|
+
const run = async (pool) => {
|
|
623
|
+
let globalPosition = 0n;
|
|
624
|
+
const numberOfTimes = _nullishCoalesce(_optionalChain([options2, 'optionalAccess', _17 => _17.numberOfTimes]), () => ( 1));
|
|
625
|
+
for (const event2 of [
|
|
626
|
+
...givenEvents,
|
|
627
|
+
...Array.from({ length: numberOfTimes }).flatMap(() => events)
|
|
628
|
+
]) {
|
|
629
|
+
allEvents.push({
|
|
630
|
+
...event2,
|
|
631
|
+
metadata: {
|
|
632
|
+
...{
|
|
633
|
+
globalPosition: ++globalPosition,
|
|
634
|
+
streamPosition: globalPosition,
|
|
635
|
+
streamName: `test-${_uuid.v4.call(void 0, )}`,
|
|
636
|
+
eventId: _uuid.v4.call(void 0, )
|
|
637
|
+
},
|
|
638
|
+
..._nullishCoalesce(event2.metadata, () => ( {}))
|
|
639
|
+
}
|
|
640
|
+
});
|
|
641
|
+
}
|
|
642
|
+
await pool.withTransaction(
|
|
643
|
+
(transaction) => handleProjections({
|
|
644
|
+
events: allEvents,
|
|
645
|
+
projections: [projection2],
|
|
646
|
+
connection: {
|
|
647
|
+
connectionString,
|
|
648
|
+
transaction
|
|
649
|
+
}
|
|
650
|
+
})
|
|
651
|
+
);
|
|
652
|
+
};
|
|
653
|
+
return {
|
|
654
|
+
then: async (assert, message) => {
|
|
655
|
+
const pool = _dumbo.dumbo.call(void 0, dumoOptions);
|
|
656
|
+
try {
|
|
657
|
+
await run(pool);
|
|
658
|
+
const succeeded = await assert({ pool, connectionString });
|
|
659
|
+
if (succeeded !== void 0 && succeeded === false)
|
|
660
|
+
assertFails(
|
|
661
|
+
_nullishCoalesce(message, () => ( "Projection specification didn't match the criteria"))
|
|
662
|
+
);
|
|
663
|
+
} finally {
|
|
664
|
+
await pool.close();
|
|
665
|
+
}
|
|
666
|
+
},
|
|
667
|
+
thenThrows: async (...args) => {
|
|
668
|
+
const pool = _dumbo.dumbo.call(void 0, dumoOptions);
|
|
669
|
+
try {
|
|
670
|
+
await run(pool);
|
|
671
|
+
throw new AssertionError("Handler did not fail as expected");
|
|
672
|
+
} catch (error) {
|
|
673
|
+
if (error instanceof AssertionError) throw error;
|
|
674
|
+
if (args.length === 0) return;
|
|
675
|
+
if (!isErrorConstructor(args[0])) {
|
|
676
|
+
assertTrue(
|
|
677
|
+
args[0](error),
|
|
678
|
+
`Error didn't match the error condition: ${_optionalChain([error, 'optionalAccess', _18 => _18.toString, 'call', _19 => _19()])}`
|
|
679
|
+
);
|
|
680
|
+
return;
|
|
681
|
+
}
|
|
682
|
+
assertTrue(
|
|
683
|
+
error instanceof args[0],
|
|
684
|
+
`Caught error is not an instance of the expected type: ${_optionalChain([error, 'optionalAccess', _20 => _20.toString, 'call', _21 => _21()])}`
|
|
685
|
+
);
|
|
686
|
+
if (args[1]) {
|
|
687
|
+
assertTrue(
|
|
688
|
+
args[1](error),
|
|
689
|
+
`Error didn't match the error condition: ${_optionalChain([error, 'optionalAccess', _22 => _22.toString, 'call', _23 => _23()])}`
|
|
690
|
+
);
|
|
691
|
+
}
|
|
692
|
+
} finally {
|
|
693
|
+
await pool.close();
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
};
|
|
697
|
+
}
|
|
698
|
+
};
|
|
699
|
+
};
|
|
700
|
+
}
|
|
701
|
+
}
|
|
702
|
+
};
|
|
703
|
+
var eventInStream = (streamName, event2) => {
|
|
704
|
+
return {
|
|
705
|
+
...event2,
|
|
706
|
+
metadata: {
|
|
707
|
+
..._nullishCoalesce(event2.metadata, () => ( {})),
|
|
708
|
+
streamName: _nullishCoalesce(_optionalChain([event2, 'access', _24 => _24.metadata, 'optionalAccess', _25 => _25.streamName]), () => ( streamName))
|
|
709
|
+
}
|
|
710
|
+
};
|
|
711
|
+
};
|
|
712
|
+
var eventsInStream = (streamName, events) => {
|
|
713
|
+
return events.map((e) => eventInStream(streamName, e));
|
|
714
|
+
};
|
|
715
|
+
var newEventsInStream = eventsInStream;
|
|
716
|
+
var assertSQLQueryResultMatches = (sql3, rows) => async ({ pool: { execute } }) => {
|
|
717
|
+
const result = await execute.query(sql3);
|
|
718
|
+
assertThatArray(rows).containsExactlyInAnyOrder(...result.rows);
|
|
719
|
+
};
|
|
720
|
+
var expectSQL = {
|
|
721
|
+
query: (sql3) => ({
|
|
722
|
+
resultRows: {
|
|
723
|
+
toBeTheSame: (rows) => assertSQLQueryResultMatches(sql3, rows)
|
|
724
|
+
}
|
|
725
|
+
})
|
|
726
|
+
};
|
|
727
|
+
|
|
728
|
+
// src/eventStore/projections/index.ts
|
|
729
|
+
var handleProjections = async (options) => {
|
|
730
|
+
const {
|
|
731
|
+
projections: allProjections,
|
|
732
|
+
events,
|
|
733
|
+
connection: { transaction, connectionString }
|
|
734
|
+
} = options;
|
|
735
|
+
const eventTypes = events.map((e) => e.type);
|
|
736
|
+
const projections = allProjections.filter(
|
|
737
|
+
(p) => p.canHandle.some((type) => eventTypes.includes(type))
|
|
738
|
+
);
|
|
739
|
+
const client = await transaction.connection.open();
|
|
740
|
+
for (const projection2 of projections) {
|
|
741
|
+
await projection2.handle(events, {
|
|
742
|
+
connectionString,
|
|
743
|
+
client,
|
|
744
|
+
transaction,
|
|
745
|
+
execute: transaction.execute
|
|
746
|
+
});
|
|
747
|
+
}
|
|
748
|
+
};
|
|
749
|
+
var postgreSQLProjection = (definition) => projection(definition);
|
|
750
|
+
var postgreSQLRawBatchSQLProjection = (handle, ...canHandle) => postgreSQLProjection({
|
|
751
|
+
canHandle,
|
|
752
|
+
handle: async (events, context) => {
|
|
753
|
+
const sqls = await handle(events, context);
|
|
754
|
+
await context.execute.batchCommand(sqls);
|
|
755
|
+
}
|
|
756
|
+
});
|
|
757
|
+
var postgreSQLRawSQLProjection = (handle, ...canHandle) => postgreSQLRawBatchSQLProjection(
|
|
758
|
+
async (events, context) => {
|
|
759
|
+
const sqls = [];
|
|
760
|
+
for (const event2 of events) {
|
|
761
|
+
sqls.push(await handle(event2, context));
|
|
762
|
+
}
|
|
763
|
+
return sqls;
|
|
764
|
+
},
|
|
765
|
+
...canHandle
|
|
766
|
+
);
|
|
767
|
+
|
|
768
|
+
// src/eventStore/schema/index.ts
|
|
769
|
+
|
|
770
|
+
|
|
771
|
+
// src/eventStore/schema/appendToStream.ts
|
|
772
|
+
|
|
773
|
+
|
|
774
|
+
|
|
775
|
+
|
|
776
|
+
|
|
777
|
+
|
|
778
|
+
|
|
779
|
+
// src/eventStore/schema/typing.ts
|
|
780
|
+
var emmettPrefix = "emt";
|
|
781
|
+
var globalTag = "global";
|
|
782
|
+
var defaultTag = "emt:default";
|
|
783
|
+
var globalNames = {
|
|
784
|
+
module: `${emmettPrefix}:module:${globalTag}`,
|
|
785
|
+
tenant: `${emmettPrefix}:tenant:${globalTag}`
|
|
786
|
+
};
|
|
787
|
+
var columns = {
|
|
788
|
+
partition: {
|
|
789
|
+
name: "partition"
|
|
790
|
+
},
|
|
791
|
+
isArchived: { name: "is_archived" }
|
|
792
|
+
};
|
|
793
|
+
var streamsTable = {
|
|
794
|
+
name: `${emmettPrefix}_streams`,
|
|
795
|
+
columns: {
|
|
796
|
+
partition: columns.partition,
|
|
797
|
+
isArchived: columns.isArchived
|
|
798
|
+
}
|
|
799
|
+
};
|
|
800
|
+
var eventsTable = {
|
|
801
|
+
name: `${emmettPrefix}_events`,
|
|
802
|
+
columns: {
|
|
803
|
+
partition: columns.partition,
|
|
804
|
+
isArchived: columns.isArchived
|
|
805
|
+
}
|
|
806
|
+
};
|
|
807
|
+
|
|
808
|
+
// src/eventStore/schema/appendToStream.ts
|
|
809
|
+
var appendEventsSQL = _dumbo.rawSql.call(void 0,
|
|
810
|
+
`CREATE OR REPLACE FUNCTION emt_append_event(
|
|
7
811
|
v_event_ids text[],
|
|
8
812
|
v_events_data jsonb[],
|
|
9
813
|
v_events_metadata jsonb[],
|
|
@@ -29,7 +833,7 @@ Actual:${h.stringify(e)}`)}function se(t,e,n){if(t===e)throw new y(_nullishCoale
|
|
|
29
833
|
BEGIN
|
|
30
834
|
IF v_expected_stream_position IS NULL THEN
|
|
31
835
|
SELECT COALESCE(max(stream_position), 0) INTO v_expected_stream_position
|
|
32
|
-
FROM ${
|
|
836
|
+
FROM ${streamsTable.name}
|
|
33
837
|
WHERE stream_id = v_stream_id AND partition = v_partition;
|
|
34
838
|
END IF;
|
|
35
839
|
|
|
@@ -51,7 +855,7 @@ Actual:${h.stringify(e)}`)}function se(t,e,n){if(t===e)throw new y(_nullishCoale
|
|
|
51
855
|
) AS event
|
|
52
856
|
),
|
|
53
857
|
all_events_insert AS (
|
|
54
|
-
INSERT INTO ${
|
|
858
|
+
INSERT INTO ${eventsTable.name}
|
|
55
859
|
(stream_id, stream_position, partition, event_data, event_metadata, event_schema_version, event_type, event_id, transaction_id)
|
|
56
860
|
SELECT
|
|
57
861
|
v_stream_id, ev.stream_position, v_partition, ev.event_data, ev.event_metadata, ev.schema_version, ev.event_type, ev.event_id, v_transaction_id
|
|
@@ -65,12 +869,12 @@ Actual:${h.stringify(e)}`)}function se(t,e,n){if(t===e)throw new y(_nullishCoale
|
|
|
65
869
|
|
|
66
870
|
|
|
67
871
|
IF v_expected_stream_position = 0 THEN
|
|
68
|
-
INSERT INTO ${
|
|
872
|
+
INSERT INTO ${streamsTable.name}
|
|
69
873
|
(stream_id, stream_position, partition, stream_type, stream_metadata, is_archived)
|
|
70
874
|
VALUES
|
|
71
875
|
(v_stream_id, v_next_stream_position, v_partition, v_stream_type, '{}', FALSE);
|
|
72
876
|
ELSE
|
|
73
|
-
UPDATE ${
|
|
877
|
+
UPDATE ${streamsTable.name} as s
|
|
74
878
|
SET stream_position = v_next_stream_position
|
|
75
879
|
WHERE stream_id = v_stream_id AND stream_position = v_expected_stream_position AND partition = v_partition AND is_archived = FALSE;
|
|
76
880
|
|
|
@@ -85,7 +889,74 @@ Actual:${h.stringify(e)}`)}function se(t,e,n){if(t===e)throw new y(_nullishCoale
|
|
|
85
889
|
RETURN QUERY SELECT TRUE, v_next_stream_position, v_last_global_position, v_transaction_id;
|
|
86
890
|
END;
|
|
87
891
|
$$;
|
|
88
|
-
`
|
|
892
|
+
`
|
|
893
|
+
);
|
|
894
|
+
var appendToStream = (pool, streamName, streamType, events, options) => pool.withTransaction(async (transaction) => {
|
|
895
|
+
const { execute } = transaction;
|
|
896
|
+
if (events.length === 0)
|
|
897
|
+
return { success: false, result: { success: false } };
|
|
898
|
+
let appendResult;
|
|
899
|
+
try {
|
|
900
|
+
const expectedStreamVersion = toExpectedVersion(
|
|
901
|
+
_optionalChain([options, 'optionalAccess', _26 => _26.expectedStreamVersion])
|
|
902
|
+
);
|
|
903
|
+
const eventsToAppend = events.map((e, i) => ({
|
|
904
|
+
...e,
|
|
905
|
+
metadata: {
|
|
906
|
+
streamName,
|
|
907
|
+
eventId: _uuid.v4.call(void 0, ),
|
|
908
|
+
streamPosition: BigInt(i),
|
|
909
|
+
...e.metadata
|
|
910
|
+
}
|
|
911
|
+
}));
|
|
912
|
+
appendResult = await appendEventsRaw(
|
|
913
|
+
execute,
|
|
914
|
+
streamName,
|
|
915
|
+
streamType,
|
|
916
|
+
eventsToAppend,
|
|
917
|
+
{
|
|
918
|
+
expectedStreamVersion
|
|
919
|
+
}
|
|
920
|
+
);
|
|
921
|
+
if (_optionalChain([options, 'optionalAccess', _27 => _27.preCommitHook]))
|
|
922
|
+
await options.preCommitHook(eventsToAppend, { transaction });
|
|
923
|
+
} catch (error) {
|
|
924
|
+
if (!isOptimisticConcurrencyError(error)) throw error;
|
|
925
|
+
appendResult = {
|
|
926
|
+
success: false,
|
|
927
|
+
last_global_position: null,
|
|
928
|
+
next_stream_position: null,
|
|
929
|
+
transaction_id: null
|
|
930
|
+
};
|
|
931
|
+
}
|
|
932
|
+
const {
|
|
933
|
+
success,
|
|
934
|
+
next_stream_position,
|
|
935
|
+
last_global_position,
|
|
936
|
+
transaction_id
|
|
937
|
+
} = appendResult;
|
|
938
|
+
return {
|
|
939
|
+
success,
|
|
940
|
+
result: success && next_stream_position && last_global_position && transaction_id ? {
|
|
941
|
+
success: true,
|
|
942
|
+
nextStreamPosition: BigInt(next_stream_position),
|
|
943
|
+
lastGlobalPosition: BigInt(last_global_position),
|
|
944
|
+
transactionId: transaction_id
|
|
945
|
+
} : { success: false }
|
|
946
|
+
};
|
|
947
|
+
});
|
|
948
|
+
var toExpectedVersion = (expected) => {
|
|
949
|
+
if (expected === void 0) return null;
|
|
950
|
+
if (expected === NO_CONCURRENCY_CHECK) return null;
|
|
951
|
+
if (expected == STREAM_DOES_NOT_EXIST) return null;
|
|
952
|
+
if (expected == STREAM_EXISTS) return null;
|
|
953
|
+
return expected;
|
|
954
|
+
};
|
|
955
|
+
var isOptimisticConcurrencyError = (error) => error instanceof Error && "code" in error && error.code === "23505";
|
|
956
|
+
var appendEventsRaw = (execute, streamId, streamType, events, options) => _dumbo.single.call(void 0,
|
|
957
|
+
execute.command(
|
|
958
|
+
_dumbo.sql.call(void 0,
|
|
959
|
+
`SELECT * FROM emt_append_event(
|
|
89
960
|
ARRAY[%s]::text[],
|
|
90
961
|
ARRAY[%s]::jsonb[],
|
|
91
962
|
ARRAY[%s]::jsonb[],
|
|
@@ -95,22 +966,42 @@ Actual:${h.stringify(e)}`)}function se(t,e,n){if(t===e)throw new y(_nullishCoale
|
|
|
95
966
|
%L::text,
|
|
96
967
|
%s::bigint,
|
|
97
968
|
%L::text
|
|
98
|
-
)`,
|
|
969
|
+
)`,
|
|
970
|
+
events.map((e) => _dumbo.sql.call(void 0, "%L", e.metadata.eventId)).join(","),
|
|
971
|
+
events.map((e) => _dumbo.sql.call(void 0, "%L", JSONParser.stringify(e.data))).join(","),
|
|
972
|
+
events.map((e) => _dumbo.sql.call(void 0, "%L", JSONParser.stringify(_nullishCoalesce(e.metadata, () => ( {}))))).join(","),
|
|
973
|
+
events.map(() => `'1'`).join(","),
|
|
974
|
+
events.map((e) => _dumbo.sql.call(void 0, "%L", e.type)).join(","),
|
|
975
|
+
streamId,
|
|
976
|
+
streamType,
|
|
977
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _28 => _28.expectedStreamVersion]), () => ( "NULL")),
|
|
978
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _29 => _29.partition]), () => ( defaultTag))
|
|
979
|
+
)
|
|
980
|
+
)
|
|
981
|
+
);
|
|
982
|
+
|
|
983
|
+
// src/eventStore/schema/tables.ts
|
|
984
|
+
|
|
985
|
+
var streamsTableSQL = _dumbo.rawSql.call(void 0,
|
|
986
|
+
`CREATE TABLE IF NOT EXISTS ${streamsTable.name}(
|
|
99
987
|
stream_id TEXT NOT NULL,
|
|
100
988
|
stream_position BIGINT NOT NULL,
|
|
101
|
-
partition TEXT NOT NULL DEFAULT '${
|
|
989
|
+
partition TEXT NOT NULL DEFAULT '${globalTag}__${globalTag}',
|
|
102
990
|
stream_type TEXT NOT NULL,
|
|
103
991
|
stream_metadata JSONB NOT NULL,
|
|
104
992
|
is_archived BOOLEAN NOT NULL DEFAULT FALSE,
|
|
105
993
|
PRIMARY KEY (stream_id, stream_position, partition, is_archived),
|
|
106
994
|
UNIQUE (stream_id, partition, is_archived)
|
|
107
|
-
) PARTITION BY LIST (partition);`
|
|
995
|
+
) PARTITION BY LIST (partition);`
|
|
996
|
+
);
|
|
997
|
+
var eventsTableSQL = _dumbo.rawSql.call(void 0,
|
|
998
|
+
`
|
|
108
999
|
CREATE SEQUENCE IF NOT EXISTS emt_global_event_position;
|
|
109
1000
|
|
|
110
|
-
CREATE TABLE IF NOT EXISTS ${
|
|
1001
|
+
CREATE TABLE IF NOT EXISTS ${eventsTable.name}(
|
|
111
1002
|
stream_id TEXT NOT NULL,
|
|
112
1003
|
stream_position BIGINT NOT NULL,
|
|
113
|
-
partition TEXT NOT NULL DEFAULT '${
|
|
1004
|
+
partition TEXT NOT NULL DEFAULT '${globalTag}',
|
|
114
1005
|
event_data JSONB NOT NULL,
|
|
115
1006
|
event_metadata JSONB NOT NULL,
|
|
116
1007
|
event_schema_version TEXT NOT NULL,
|
|
@@ -121,7 +1012,10 @@ Actual:${h.stringify(e)}`)}function se(t,e,n){if(t===e)throw new y(_nullishCoale
|
|
|
121
1012
|
transaction_id XID8 NOT NULL,
|
|
122
1013
|
created TIMESTAMPTZ NOT NULL DEFAULT now(),
|
|
123
1014
|
PRIMARY KEY (stream_id, stream_position, partition, is_archived)
|
|
124
|
-
) PARTITION BY LIST (partition);`
|
|
1015
|
+
) PARTITION BY LIST (partition);`
|
|
1016
|
+
);
|
|
1017
|
+
var subscriptionsTableSQL = _dumbo.rawSql.call(void 0,
|
|
1018
|
+
`
|
|
125
1019
|
CREATE TABLE IF NOT EXISTS emt_subscriptions(
|
|
126
1020
|
subscription_id TEXT NOT NULL PRIMARY KEY,
|
|
127
1021
|
version INT NOT NULL DEFAULT 1,
|
|
@@ -130,11 +1024,17 @@ Actual:${h.stringify(e)}`)}function se(t,e,n){if(t===e)throw new y(_nullishCoale
|
|
|
130
1024
|
last_processed_position BIGINT NOT NULL,
|
|
131
1025
|
last_processed_transaction_id BIGINT NOT NULL
|
|
132
1026
|
);
|
|
133
|
-
`
|
|
1027
|
+
`
|
|
1028
|
+
);
|
|
1029
|
+
var sanitizeNameSQL = _dumbo.rawSql.call(void 0,
|
|
1030
|
+
`CREATE OR REPLACE FUNCTION emt_sanitize_name(input_name TEXT) RETURNS TEXT AS $$
|
|
134
1031
|
BEGIN
|
|
135
1032
|
RETURN REGEXP_REPLACE(input_name, '[^a-zA-Z0-9_]', '_', 'g');
|
|
136
1033
|
END;
|
|
137
|
-
$$ LANGUAGE plpgsql;`
|
|
1034
|
+
$$ LANGUAGE plpgsql;`
|
|
1035
|
+
);
|
|
1036
|
+
var addTablePartitions = _dumbo.rawSql.call(void 0,
|
|
1037
|
+
`
|
|
138
1038
|
CREATE OR REPLACE FUNCTION emt_add_table_partition(tableName TEXT, partition_name TEXT) RETURNS void AS $$
|
|
139
1039
|
DECLARE
|
|
140
1040
|
v_main_partiton_name TEXT;
|
|
@@ -171,193 +1071,432 @@ Actual:${h.stringify(e)}`)}function se(t,e,n){if(t===e)throw new y(_nullishCoale
|
|
|
171
1071
|
v_archived_partiton_name, v_main_partiton_name
|
|
172
1072
|
);
|
|
173
1073
|
END;
|
|
174
|
-
$$ LANGUAGE plpgsql;`
|
|
1074
|
+
$$ LANGUAGE plpgsql;`
|
|
1075
|
+
);
|
|
1076
|
+
var addEventsPartitions = _dumbo.rawSql.call(void 0,
|
|
1077
|
+
`
|
|
175
1078
|
CREATE OR REPLACE FUNCTION emt_add_partition(partition_name TEXT) RETURNS void AS $$
|
|
176
1079
|
BEGIN
|
|
177
|
-
PERFORM emt_add_table_partition('${
|
|
178
|
-
PERFORM emt_add_table_partition('${
|
|
1080
|
+
PERFORM emt_add_table_partition('${eventsTable.name}', partition_name);
|
|
1081
|
+
PERFORM emt_add_table_partition('${streamsTable.name}', partition_name);
|
|
179
1082
|
END;
|
|
180
|
-
$$ LANGUAGE plpgsql;`
|
|
1083
|
+
$$ LANGUAGE plpgsql;`
|
|
1084
|
+
);
|
|
1085
|
+
var addModuleSQL = _dumbo.rawSql.call(void 0,
|
|
1086
|
+
`
|
|
181
1087
|
CREATE OR REPLACE FUNCTION add_module(new_module TEXT) RETURNS void AS $$
|
|
182
1088
|
BEGIN
|
|
183
|
-
-- For ${
|
|
1089
|
+
-- For ${eventsTable.name} table
|
|
184
1090
|
EXECUTE format('
|
|
185
1091
|
CREATE TABLE IF NOT EXISTS %I PARTITION OF %I
|
|
186
1092
|
FOR VALUES IN (emt_sanitize_name(%L || ''__'' || %L)) PARTITION BY LIST (is_archived);',
|
|
187
|
-
emt_sanitize_name('${
|
|
1093
|
+
emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || '${globalTag}'), '${eventsTable.name}', new_module, '${globalTag}'
|
|
188
1094
|
);
|
|
189
1095
|
|
|
190
1096
|
EXECUTE format('
|
|
191
1097
|
CREATE TABLE IF NOT EXISTS %I_active PARTITION OF %I
|
|
192
1098
|
FOR VALUES IN (FALSE);',
|
|
193
|
-
emt_sanitize_name('${
|
|
1099
|
+
emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || '${globalTag}' || '_active'), emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || '${globalTag}')
|
|
194
1100
|
);
|
|
195
1101
|
|
|
196
1102
|
EXECUTE format('
|
|
197
1103
|
CREATE TABLE IF NOT EXISTS %I_archived PARTITION OF %I
|
|
198
1104
|
FOR VALUES IN (TRUE);',
|
|
199
|
-
emt_sanitize_name('${
|
|
1105
|
+
emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || '${globalTag}' || '_archived'), emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || '${globalTag}')
|
|
200
1106
|
);
|
|
201
1107
|
|
|
202
|
-
-- For ${
|
|
1108
|
+
-- For ${streamsTable.name} table
|
|
203
1109
|
EXECUTE format('
|
|
204
1110
|
CREATE TABLE IF NOT EXISTS %I PARTITION OF %I
|
|
205
1111
|
FOR VALUES IN (emt_sanitize_name(%L || ''__'' || %L)) PARTITION BY LIST (is_archived);',
|
|
206
|
-
emt_sanitize_name('${
|
|
1112
|
+
emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || '${globalTag}'), '${streamsTable.name}', new_module, '${globalTag}'
|
|
207
1113
|
);
|
|
208
1114
|
|
|
209
1115
|
EXECUTE format('
|
|
210
1116
|
CREATE TABLE IF NOT EXISTS %I_active PARTITION OF %I
|
|
211
1117
|
FOR VALUES IN (FALSE);',
|
|
212
|
-
emt_sanitize_name('${
|
|
1118
|
+
emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || '${globalTag}' || '_active'), emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || '${globalTag}')
|
|
213
1119
|
);
|
|
214
1120
|
|
|
215
1121
|
EXECUTE format('
|
|
216
1122
|
CREATE TABLE IF NOT EXISTS %I_archived PARTITION OF %I
|
|
217
1123
|
FOR VALUES IN (TRUE);',
|
|
218
|
-
emt_sanitize_name('${
|
|
1124
|
+
emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || '${globalTag}' || '_archived'), emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || '${globalTag}')
|
|
219
1125
|
);
|
|
220
1126
|
END;
|
|
221
1127
|
$$ LANGUAGE plpgsql;
|
|
222
|
-
`
|
|
1128
|
+
`
|
|
1129
|
+
);
|
|
1130
|
+
var addTenantSQL = _dumbo.rawSql.call(void 0,
|
|
1131
|
+
`
|
|
223
1132
|
CREATE OR REPLACE FUNCTION add_tenant(new_module TEXT, new_tenant TEXT) RETURNS void AS $$
|
|
224
1133
|
BEGIN
|
|
225
|
-
-- For ${
|
|
1134
|
+
-- For ${eventsTable.name} table
|
|
226
1135
|
EXECUTE format('
|
|
227
1136
|
CREATE TABLE IF NOT EXISTS %I PARTITION OF %I
|
|
228
1137
|
FOR VALUES IN (emt_sanitize_name(''%s__%s'')) PARTITION BY LIST (is_archived);',
|
|
229
|
-
emt_sanitize_name('${
|
|
1138
|
+
emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || new_tenant), '${eventsTable.name}', new_module, new_tenant
|
|
230
1139
|
);
|
|
231
1140
|
|
|
232
1141
|
EXECUTE format('
|
|
233
1142
|
CREATE TABLE IF NOT EXISTS %I_active PARTITION OF %I
|
|
234
1143
|
FOR VALUES IN (FALSE);',
|
|
235
|
-
emt_sanitize_name('${
|
|
1144
|
+
emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || new_tenant || '_active'), emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || new_tenant)
|
|
236
1145
|
);
|
|
237
1146
|
|
|
238
1147
|
EXECUTE format('
|
|
239
1148
|
CREATE TABLE IF NOT EXISTS %I_archived PARTITION OF %I
|
|
240
1149
|
FOR VALUES IN (TRUE);',
|
|
241
|
-
emt_sanitize_name('${
|
|
1150
|
+
emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || new_tenant || '_archived'), emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || new_tenant)
|
|
242
1151
|
);
|
|
243
1152
|
|
|
244
|
-
-- For ${
|
|
1153
|
+
-- For ${streamsTable.name} table
|
|
245
1154
|
EXECUTE format('
|
|
246
1155
|
CREATE TABLE IF NOT EXISTS %I PARTITION OF %I
|
|
247
1156
|
FOR VALUES IN (emt_sanitize_name(''%s__%s'')) PARTITION BY LIST (is_archived);',
|
|
248
|
-
emt_sanitize_name('${
|
|
1157
|
+
emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || new_tenant), '${streamsTable.name}', new_module, new_tenant
|
|
249
1158
|
);
|
|
250
1159
|
|
|
251
1160
|
EXECUTE format('
|
|
252
1161
|
CREATE TABLE IF NOT EXISTS %I_active PARTITION OF %I
|
|
253
1162
|
FOR VALUES IN (FALSE);',
|
|
254
|
-
emt_sanitize_name('${
|
|
1163
|
+
emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || new_tenant || '_active'), emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || new_tenant)
|
|
255
1164
|
);
|
|
256
1165
|
|
|
257
1166
|
EXECUTE format('
|
|
258
1167
|
CREATE TABLE IF NOT EXISTS %I_archived PARTITION OF %I
|
|
259
1168
|
FOR VALUES IN (TRUE);',
|
|
260
|
-
emt_sanitize_name('${
|
|
1169
|
+
emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || new_tenant || '_archived'), emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || new_tenant)
|
|
261
1170
|
);
|
|
262
1171
|
END;
|
|
263
1172
|
$$ LANGUAGE plpgsql;
|
|
264
|
-
`
|
|
1173
|
+
`
|
|
1174
|
+
);
|
|
1175
|
+
var addModuleForAllTenantsSQL = _dumbo.rawSql.call(void 0,
|
|
1176
|
+
`
|
|
265
1177
|
CREATE OR REPLACE FUNCTION add_module_for_all_tenants(new_module TEXT) RETURNS void AS $$
|
|
266
1178
|
DECLARE
|
|
267
1179
|
tenant_record RECORD;
|
|
268
1180
|
BEGIN
|
|
269
1181
|
PERFORM add_module(new_module);
|
|
270
1182
|
|
|
271
|
-
FOR tenant_record IN SELECT DISTINCT tenant FROM ${
|
|
1183
|
+
FOR tenant_record IN SELECT DISTINCT tenant FROM ${eventsTable.name}
|
|
272
1184
|
LOOP
|
|
273
|
-
-- For ${
|
|
1185
|
+
-- For ${eventsTable.name} table
|
|
274
1186
|
EXECUTE format('
|
|
275
1187
|
CREATE TABLE IF NOT EXISTS %I PARTITION OF %I
|
|
276
1188
|
FOR VALUES IN (emt_sanitize_name(''%s__%s'')) PARTITION BY LIST (is_archived);',
|
|
277
|
-
emt_sanitize_name('${
|
|
1189
|
+
emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || tenant_record.tenant), '${eventsTable.name}', new_module, tenant_record.tenant
|
|
278
1190
|
);
|
|
279
1191
|
|
|
280
1192
|
EXECUTE format('
|
|
281
1193
|
CREATE TABLE IF NOT EXISTS %I_active PARTITION OF %I
|
|
282
1194
|
FOR VALUES IN (FALSE);',
|
|
283
|
-
emt_sanitize_name('${
|
|
1195
|
+
emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || tenant_record.tenant || '_active'), emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || tenant_record.tenant)
|
|
284
1196
|
);
|
|
285
1197
|
|
|
286
1198
|
EXECUTE format('
|
|
287
1199
|
CREATE TABLE IF NOT EXISTS %I_archived PARTITION OF %I
|
|
288
1200
|
FOR VALUES IN (TRUE);',
|
|
289
|
-
emt_sanitize_name('${
|
|
1201
|
+
emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || tenant_record.tenant || '_archived'), emt_sanitize_name('${eventsTable.name}_' || new_module || '__' || tenant_record.tenant)
|
|
290
1202
|
);
|
|
291
1203
|
|
|
292
|
-
-- For ${
|
|
1204
|
+
-- For ${streamsTable.name} table
|
|
293
1205
|
EXECUTE format('
|
|
294
1206
|
CREATE TABLE IF NOT EXISTS %I PARTITION OF %I
|
|
295
1207
|
FOR VALUES IN (emt_sanitize_name(''%s__%s'')) PARTITION BY LIST (is_archived);',
|
|
296
|
-
emt_sanitize_name('${
|
|
1208
|
+
emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || tenant_record.tenant), '${streamsTable.name}', new_module, tenant_record.tenant
|
|
297
1209
|
);
|
|
298
1210
|
|
|
299
1211
|
EXECUTE format('
|
|
300
1212
|
CREATE TABLE IF NOT EXISTS %I_active PARTITION OF %I
|
|
301
1213
|
FOR VALUES IN (FALSE);',
|
|
302
|
-
emt_sanitize_name('${
|
|
1214
|
+
emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || tenant_record.tenant || '_active'), emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || tenant_record.tenant)
|
|
303
1215
|
);
|
|
304
1216
|
|
|
305
1217
|
EXECUTE format('
|
|
306
1218
|
CREATE TABLE IF NOT EXISTS %I_archived PARTITION OF %I
|
|
307
1219
|
FOR VALUES IN (TRUE);',
|
|
308
|
-
emt_sanitize_name('${
|
|
1220
|
+
emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || tenant_record.tenant || '_archived'), emt_sanitize_name('${streamsTable.name}_' || new_module || '__' || tenant_record.tenant)
|
|
309
1221
|
);
|
|
310
1222
|
END LOOP;
|
|
311
1223
|
END;
|
|
312
1224
|
$$ LANGUAGE plpgsql;
|
|
313
|
-
`
|
|
1225
|
+
`
|
|
1226
|
+
);
|
|
1227
|
+
var addTenantForAllModulesSQL = _dumbo.rawSql.call(void 0,
|
|
1228
|
+
`
|
|
314
1229
|
CREATE OR REPLACE FUNCTION add_tenant_for_all_modules(new_tenant TEXT) RETURNS void AS $$
|
|
315
1230
|
DECLARE
|
|
316
1231
|
module_record RECORD;
|
|
317
1232
|
BEGIN
|
|
318
|
-
FOR module_record IN SELECT DISTINCT partitionname FROM pg_partman.part_config WHERE parent_table = '${
|
|
1233
|
+
FOR module_record IN SELECT DISTINCT partitionname FROM pg_partman.part_config WHERE parent_table = '${eventsTable.name}'
|
|
319
1234
|
LOOP
|
|
320
|
-
-- For ${
|
|
1235
|
+
-- For ${eventsTable.name} table
|
|
321
1236
|
EXECUTE format('
|
|
322
1237
|
CREATE TABLE IF NOT EXISTS %I PARTITION OF %I
|
|
323
1238
|
FOR VALUES IN (emt_sanitize_name(''%s__%s'')) PARTITION BY LIST (is_archived);',
|
|
324
|
-
emt_sanitize_name('${
|
|
1239
|
+
emt_sanitize_name('${eventsTable.name}_' || module_record.partitionname || '__' || new_tenant), '${eventsTable.name}', module_record.partitionname, new_tenant
|
|
325
1240
|
);
|
|
326
1241
|
|
|
327
1242
|
EXECUTE format('
|
|
328
1243
|
CREATE TABLE IF NOT EXISTS %I_active PARTITION OF %I
|
|
329
1244
|
FOR VALUES IN (FALSE);',
|
|
330
|
-
emt_sanitize_name('${
|
|
1245
|
+
emt_sanitize_name('${eventsTable.name}_' || module_record.partitionname || '__' || new_tenant || '_active'), emt_sanitize_name('${eventsTable.name}_' || module_record.partitionname || '__' || new_tenant)
|
|
331
1246
|
);
|
|
332
1247
|
|
|
333
1248
|
EXECUTE format('
|
|
334
1249
|
CREATE TABLE IF NOT EXISTS %I_archived PARTITION OF %I
|
|
335
1250
|
FOR VALUES IN (TRUE);',
|
|
336
|
-
emt_sanitize_name('${
|
|
1251
|
+
emt_sanitize_name('${eventsTable.name}_' || module_record.partitionname || '__' || new_tenant || '_archived'), emt_sanitize_name('${eventsTable.name}_' || module_record.partitionname || '__' || new_tenant)
|
|
337
1252
|
);
|
|
338
1253
|
|
|
339
|
-
-- For ${
|
|
1254
|
+
-- For ${streamsTable.name} table
|
|
340
1255
|
EXECUTE format('
|
|
341
1256
|
CREATE TABLE IF NOT EXISTS %I PARTITION OF %I
|
|
342
1257
|
FOR VALUES IN (emt_sanitize_name(''%s__%s'')) PARTITION BY LIST (is_archived);',
|
|
343
|
-
emt_sanitize_name('${
|
|
1258
|
+
emt_sanitize_name('${streamsTable.name}_' || module_record.partitionname || '__' || new_tenant), '${streamsTable.name}', module_record.partitionname, new_tenant
|
|
344
1259
|
);
|
|
345
1260
|
|
|
346
1261
|
EXECUTE format('
|
|
347
1262
|
CREATE TABLE IF NOT EXISTS %I_active PARTITION OF %I
|
|
348
1263
|
FOR VALUES IN (FALSE);',
|
|
349
|
-
emt_sanitize_name('${
|
|
1264
|
+
emt_sanitize_name('${streamsTable.name}_' || module_record.partitionname || '__' || new_tenant || '_active'), emt_sanitize_name('${streamsTable.name}_' || module_record.partitionname || '__' || new_tenant)
|
|
350
1265
|
);
|
|
351
1266
|
|
|
352
1267
|
EXECUTE format('
|
|
353
1268
|
CREATE TABLE IF NOT EXISTS %I_archived PARTITION OF %I
|
|
354
1269
|
FOR VALUES IN (TRUE);',
|
|
355
|
-
emt_sanitize_name('${
|
|
1270
|
+
emt_sanitize_name('${streamsTable.name}_' || module_record.partitionname || '__' || new_tenant || '_archived'), emt_sanitize_name('${streamsTable.name}_' || module_record.partitionname || '__' || new_tenant)
|
|
356
1271
|
);
|
|
357
1272
|
END LOOP;
|
|
358
1273
|
END;
|
|
359
1274
|
$$ LANGUAGE plpgsql;
|
|
360
|
-
`
|
|
361
|
-
|
|
362
|
-
|
|
1275
|
+
`
|
|
1276
|
+
);
|
|
1277
|
+
var addDefaultPartition = _dumbo.rawSql.call(void 0,
|
|
1278
|
+
`SELECT emt_add_partition('${defaultTag}');`
|
|
1279
|
+
);
|
|
1280
|
+
|
|
1281
|
+
// src/eventStore/schema/readStream.ts
|
|
1282
|
+
|
|
1283
|
+
var readStream = async (execute, streamId, options) => {
|
|
1284
|
+
const fromCondition = options && "from" in options ? `AND stream_position >= ${options.from}` : "";
|
|
1285
|
+
const to = Number(
|
|
1286
|
+
options && "to" in options ? options.to : options && "maxCount" in options && options.maxCount ? options.from + options.maxCount : NaN
|
|
1287
|
+
);
|
|
1288
|
+
const toCondition = !isNaN(to) ? `AND stream_position <= ${to}` : "";
|
|
1289
|
+
const events = await _dumbo.mapRows.call(void 0,
|
|
1290
|
+
execute.query(
|
|
1291
|
+
_dumbo.sql.call(void 0,
|
|
1292
|
+
`SELECT stream_id, stream_position, global_position, event_data, event_metadata, event_schema_version, event_type, event_id
|
|
1293
|
+
FROM ${eventsTable.name}
|
|
1294
|
+
WHERE stream_id = %L AND partition = %L AND is_archived = FALSE ${fromCondition} ${toCondition}`,
|
|
1295
|
+
streamId,
|
|
1296
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _30 => _30.partition]), () => ( defaultTag))
|
|
1297
|
+
)
|
|
1298
|
+
),
|
|
1299
|
+
(row) => {
|
|
1300
|
+
const rawEvent = event(
|
|
1301
|
+
row.event_type,
|
|
1302
|
+
row.event_data,
|
|
1303
|
+
row.event_metadata
|
|
1304
|
+
);
|
|
1305
|
+
return {
|
|
1306
|
+
...rawEvent,
|
|
1307
|
+
metadata: {
|
|
1308
|
+
...rawEvent.metadata,
|
|
1309
|
+
eventId: row.event_id,
|
|
1310
|
+
streamName: streamId,
|
|
1311
|
+
streamPosition: BigInt(row.stream_position),
|
|
1312
|
+
globalPosition: BigInt(row.global_position)
|
|
1313
|
+
}
|
|
1314
|
+
};
|
|
1315
|
+
}
|
|
1316
|
+
);
|
|
1317
|
+
return events.length > 0 ? {
|
|
1318
|
+
currentStreamVersion: events[events.length - 1].metadata.streamPosition,
|
|
1319
|
+
events,
|
|
1320
|
+
streamExists: true
|
|
1321
|
+
} : {
|
|
1322
|
+
currentStreamVersion: PostgreSQLEventStoreDefaultStreamVersion,
|
|
1323
|
+
events: [],
|
|
1324
|
+
streamExists: false
|
|
1325
|
+
};
|
|
1326
|
+
};
|
|
1327
|
+
|
|
1328
|
+
// src/eventStore/schema/index.ts
|
|
1329
|
+
var schemaSQL = [
|
|
1330
|
+
streamsTableSQL,
|
|
1331
|
+
eventsTableSQL,
|
|
1332
|
+
subscriptionsTableSQL,
|
|
1333
|
+
sanitizeNameSQL,
|
|
1334
|
+
addTablePartitions,
|
|
1335
|
+
addEventsPartitions,
|
|
1336
|
+
addModuleSQL,
|
|
1337
|
+
addTenantSQL,
|
|
1338
|
+
addModuleForAllTenantsSQL,
|
|
1339
|
+
addTenantForAllModulesSQL,
|
|
1340
|
+
appendEventsSQL,
|
|
1341
|
+
addDefaultPartition
|
|
1342
|
+
];
|
|
1343
|
+
var createEventStoreSchema = async (pool) => {
|
|
1344
|
+
await pool.withTransaction(({ execute }) => execute.batchCommand(schemaSQL));
|
|
1345
|
+
};
|
|
1346
|
+
|
|
1347
|
+
// src/eventStore/postgreSQLEventStore.ts
|
|
1348
|
+
var defaultPostgreSQLOptions = {
|
|
1349
|
+
projections: [],
|
|
1350
|
+
schema: { autoMigration: "CreateOrUpdate" }
|
|
1351
|
+
};
|
|
1352
|
+
var PostgreSQLEventStoreDefaultStreamVersion = 0n;
|
|
1353
|
+
var getPostgreSQLEventStore = (connectionString, options = defaultPostgreSQLOptions) => {
|
|
1354
|
+
const poolOptions = {
|
|
1355
|
+
connectionString,
|
|
1356
|
+
...options.connectionOptions ? options.connectionOptions : {}
|
|
1357
|
+
};
|
|
1358
|
+
const pool = "dumbo" in poolOptions ? poolOptions.dumbo : _dumbo.dumbo.call(void 0, poolOptions);
|
|
1359
|
+
let migrateSchema;
|
|
1360
|
+
const autoGenerateSchema = _optionalChain([options, 'access', _31 => _31.schema, 'optionalAccess', _32 => _32.autoMigration]) === void 0 || _optionalChain([options, 'access', _33 => _33.schema, 'optionalAccess', _34 => _34.autoMigration]) !== "None";
|
|
1361
|
+
const ensureSchemaExists = () => {
|
|
1362
|
+
if (!autoGenerateSchema) return Promise.resolve();
|
|
1363
|
+
if (!migrateSchema) {
|
|
1364
|
+
migrateSchema = createEventStoreSchema(pool);
|
|
1365
|
+
}
|
|
1366
|
+
return migrateSchema;
|
|
1367
|
+
};
|
|
1368
|
+
const inlineProjections = (_nullishCoalesce(options.projections, () => ( []))).filter(({ type }) => type === "inline").map(({ projection: projection2 }) => projection2);
|
|
1369
|
+
const preCommitHook = inlineProjections.length > 0 ? (events, { transaction }) => handleProjections({
|
|
1370
|
+
projections: inlineProjections,
|
|
1371
|
+
connection: {
|
|
1372
|
+
connectionString,
|
|
1373
|
+
transaction
|
|
1374
|
+
},
|
|
1375
|
+
events
|
|
1376
|
+
}) : void 0;
|
|
1377
|
+
return {
|
|
1378
|
+
schema: {
|
|
1379
|
+
sql: () => schemaSQL.join(""),
|
|
1380
|
+
print: () => console.log(schemaSQL.join("")),
|
|
1381
|
+
migrate: async () => {
|
|
1382
|
+
await (migrateSchema = createEventStoreSchema(pool));
|
|
1383
|
+
}
|
|
1384
|
+
},
|
|
1385
|
+
async aggregateStream(streamName, options2) {
|
|
1386
|
+
const { evolve, initialState, read } = options2;
|
|
1387
|
+
const expectedStreamVersion = _optionalChain([read, 'optionalAccess', _35 => _35.expectedStreamVersion]);
|
|
1388
|
+
let state = initialState();
|
|
1389
|
+
const result = await this.readStream(streamName, options2.read);
|
|
1390
|
+
const currentStreamVersion = result.currentStreamVersion;
|
|
1391
|
+
assertExpectedVersionMatchesCurrent(
|
|
1392
|
+
currentStreamVersion,
|
|
1393
|
+
expectedStreamVersion,
|
|
1394
|
+
PostgreSQLEventStoreDefaultStreamVersion
|
|
1395
|
+
);
|
|
1396
|
+
for (const event2 of result.events) {
|
|
1397
|
+
if (!event2) continue;
|
|
1398
|
+
state = evolve(state, event2);
|
|
1399
|
+
}
|
|
1400
|
+
return {
|
|
1401
|
+
currentStreamVersion,
|
|
1402
|
+
state,
|
|
1403
|
+
streamExists: result.streamExists
|
|
1404
|
+
};
|
|
1405
|
+
},
|
|
1406
|
+
readStream: async (streamName, options2) => {
|
|
1407
|
+
await ensureSchemaExists();
|
|
1408
|
+
return readStream(pool.execute, streamName, options2);
|
|
1409
|
+
},
|
|
1410
|
+
appendToStream: async (streamName, events, options2) => {
|
|
1411
|
+
await ensureSchemaExists();
|
|
1412
|
+
const [firstPart, ...rest] = streamName.split("-");
|
|
1413
|
+
const streamType = firstPart && rest.length > 0 ? firstPart : "emt:unknown";
|
|
1414
|
+
const appendResult = await appendToStream(
|
|
1415
|
+
pool,
|
|
1416
|
+
streamName,
|
|
1417
|
+
streamType,
|
|
1418
|
+
events,
|
|
1419
|
+
{
|
|
1420
|
+
...options2,
|
|
1421
|
+
preCommitHook
|
|
1422
|
+
}
|
|
1423
|
+
);
|
|
1424
|
+
if (!appendResult.success)
|
|
1425
|
+
throw new ExpectedVersionConflictError(
|
|
1426
|
+
-1n,
|
|
1427
|
+
//TODO: Return actual version in case of error
|
|
1428
|
+
_nullishCoalesce(_optionalChain([options2, 'optionalAccess', _36 => _36.expectedStreamVersion]), () => ( NO_CONCURRENCY_CHECK))
|
|
1429
|
+
);
|
|
1430
|
+
return {
|
|
1431
|
+
nextExpectedStreamVersion: appendResult.nextStreamPosition,
|
|
1432
|
+
createdNewStream: appendResult.nextStreamPosition >= BigInt(events.length)
|
|
1433
|
+
};
|
|
1434
|
+
},
|
|
1435
|
+
close: () => pool.close(),
|
|
1436
|
+
async withSession(callback) {
|
|
1437
|
+
return await pool.withConnection(async (connection) => {
|
|
1438
|
+
const storeOptions = {
|
|
1439
|
+
...options,
|
|
1440
|
+
connectionOptions: {
|
|
1441
|
+
connection
|
|
1442
|
+
}
|
|
1443
|
+
};
|
|
1444
|
+
const eventStore = getPostgreSQLEventStore(
|
|
1445
|
+
connectionString,
|
|
1446
|
+
storeOptions
|
|
1447
|
+
);
|
|
1448
|
+
return callback({
|
|
1449
|
+
eventStore,
|
|
1450
|
+
close: () => Promise.resolve()
|
|
1451
|
+
});
|
|
1452
|
+
});
|
|
1453
|
+
}
|
|
1454
|
+
};
|
|
1455
|
+
};
|
|
1456
|
+
|
|
1457
|
+
|
|
1458
|
+
|
|
1459
|
+
|
|
1460
|
+
|
|
1461
|
+
|
|
1462
|
+
|
|
1463
|
+
|
|
1464
|
+
|
|
1465
|
+
|
|
1466
|
+
|
|
1467
|
+
|
|
1468
|
+
|
|
1469
|
+
|
|
1470
|
+
|
|
1471
|
+
|
|
1472
|
+
|
|
1473
|
+
|
|
1474
|
+
|
|
1475
|
+
|
|
1476
|
+
|
|
1477
|
+
|
|
1478
|
+
|
|
1479
|
+
|
|
1480
|
+
|
|
1481
|
+
|
|
1482
|
+
|
|
1483
|
+
|
|
1484
|
+
|
|
1485
|
+
|
|
1486
|
+
|
|
1487
|
+
|
|
1488
|
+
|
|
1489
|
+
|
|
1490
|
+
|
|
1491
|
+
|
|
1492
|
+
|
|
1493
|
+
|
|
1494
|
+
|
|
1495
|
+
|
|
1496
|
+
|
|
1497
|
+
|
|
1498
|
+
|
|
1499
|
+
|
|
1500
|
+
|
|
1501
|
+
exports.PostgreSQLEventStoreDefaultStreamVersion = PostgreSQLEventStoreDefaultStreamVersion; exports.PostgreSQLProjectionSpec = PostgreSQLProjectionSpec; exports.addDefaultPartition = addDefaultPartition; exports.addEventsPartitions = addEventsPartitions; exports.addModuleForAllTenantsSQL = addModuleForAllTenantsSQL; exports.addModuleSQL = addModuleSQL; exports.addTablePartitions = addTablePartitions; exports.addTenantForAllModulesSQL = addTenantForAllModulesSQL; exports.addTenantSQL = addTenantSQL; exports.appendEventsSQL = appendEventsSQL; exports.appendToStream = appendToStream; exports.assertSQLQueryResultMatches = assertSQLQueryResultMatches; exports.createEventStoreSchema = createEventStoreSchema; exports.defaultPostgreSQLOptions = defaultPostgreSQLOptions; exports.defaultTag = defaultTag; exports.documentDoesNotExist = documentDoesNotExist; exports.documentExists = documentExists; exports.documentMatchingExists = documentMatchingExists; exports.documentsAreTheSame = documentsAreTheSame; exports.documentsMatchingHaveCount = documentsMatchingHaveCount; exports.emmettPrefix = emmettPrefix; exports.eventInStream = eventInStream; exports.eventsInStream = eventsInStream; exports.eventsTable = eventsTable; exports.eventsTableSQL = eventsTableSQL; exports.expectPongoDocuments = expectPongoDocuments; exports.expectSQL = expectSQL; exports.getPostgreSQLEventStore = getPostgreSQLEventStore; exports.globalNames = globalNames; exports.globalTag = globalTag; exports.handleProjections = handleProjections; exports.newEventsInStream = newEventsInStream; exports.pongoMultiStreamProjection = pongoMultiStreamProjection; exports.pongoProjection = pongoProjection; exports.pongoSingleStreamProjection = pongoSingleStreamProjection; exports.postgreSQLProjection = postgreSQLProjection; exports.postgreSQLRawBatchSQLProjection = postgreSQLRawBatchSQLProjection; exports.postgreSQLRawSQLProjection = postgreSQLRawSQLProjection; exports.readStream = readStream; exports.sanitizeNameSQL = sanitizeNameSQL; exports.schemaSQL = schemaSQL; exports.streamsTable = streamsTable; exports.streamsTableSQL = streamsTableSQL; exports.subscriptionsTableSQL = subscriptionsTableSQL;
|
|
363
1502
|
//# sourceMappingURL=index.cjs.map
|