@event-driven-io/emmett-postgresql 0.35.0 → 0.36.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +359 -188
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +144 -181
- package/dist/index.d.ts +144 -181
- package/dist/index.js +347 -176
- package/dist/index.js.map +1 -1
- package/package.json +3 -3
package/dist/index.cjs
CHANGED
|
@@ -176,6 +176,108 @@ var JSONParser = {
|
|
|
176
176
|
return _optionalChain([options, 'optionalAccess', _16 => _16.map]) ? options.map(parsed) : parsed;
|
|
177
177
|
}
|
|
178
178
|
};
|
|
179
|
+
var MessageProcessorType = {
|
|
180
|
+
PROJECTOR: "projector",
|
|
181
|
+
REACTOR: "reactor"
|
|
182
|
+
};
|
|
183
|
+
var defaultProcessingMessageProcessingScope = (handler, partialContext) => handler(partialContext);
|
|
184
|
+
var reactor = (options) => {
|
|
185
|
+
const eachMessage = "eachMessage" in options && options.eachMessage ? options.eachMessage : () => Promise.resolve();
|
|
186
|
+
let isActive = true;
|
|
187
|
+
const { checkpoints, processorId, partition } = options;
|
|
188
|
+
const processingScope = _nullishCoalesce(options.processingScope, () => ( defaultProcessingMessageProcessingScope));
|
|
189
|
+
return {
|
|
190
|
+
id: options.processorId,
|
|
191
|
+
type: _nullishCoalesce(options.type, () => ( MessageProcessorType.REACTOR)),
|
|
192
|
+
close: () => _optionalChain([options, 'access', _17 => _17.hooks, 'optionalAccess', _18 => _18.onClose]) ? _optionalChain([options, 'access', _19 => _19.hooks, 'optionalAccess', _20 => _20.onClose, 'call', _21 => _21()]) : Promise.resolve(),
|
|
193
|
+
start: async (startOptions) => {
|
|
194
|
+
isActive = true;
|
|
195
|
+
return await processingScope(async (context) => {
|
|
196
|
+
if (_optionalChain([options, 'access', _22 => _22.hooks, 'optionalAccess', _23 => _23.onStart])) {
|
|
197
|
+
await _optionalChain([options, 'access', _24 => _24.hooks, 'optionalAccess', _25 => _25.onStart, 'call', _26 => _26(context)]);
|
|
198
|
+
}
|
|
199
|
+
if (options.startFrom !== "CURRENT" && options.startFrom)
|
|
200
|
+
return options.startFrom;
|
|
201
|
+
let lastCheckpoint = null;
|
|
202
|
+
if (checkpoints) {
|
|
203
|
+
const readResult = await _optionalChain([checkpoints, 'optionalAccess', _27 => _27.read, 'call', _28 => _28(
|
|
204
|
+
{
|
|
205
|
+
processorId,
|
|
206
|
+
partition
|
|
207
|
+
},
|
|
208
|
+
startOptions
|
|
209
|
+
)]);
|
|
210
|
+
lastCheckpoint = readResult.lastCheckpoint;
|
|
211
|
+
}
|
|
212
|
+
if (lastCheckpoint === null) return "BEGINNING";
|
|
213
|
+
return {
|
|
214
|
+
lastCheckpoint
|
|
215
|
+
};
|
|
216
|
+
}, startOptions);
|
|
217
|
+
},
|
|
218
|
+
get isActive() {
|
|
219
|
+
return isActive;
|
|
220
|
+
},
|
|
221
|
+
handle: async (messages, partialContext) => {
|
|
222
|
+
if (!isActive) return Promise.resolve();
|
|
223
|
+
return await processingScope(async (context) => {
|
|
224
|
+
let result = void 0;
|
|
225
|
+
let lastCheckpoint = null;
|
|
226
|
+
for (const message2 of messages) {
|
|
227
|
+
const messageProcessingResult = await eachMessage(message2, context);
|
|
228
|
+
if (checkpoints) {
|
|
229
|
+
const storeCheckpointResult = await checkpoints.store(
|
|
230
|
+
{
|
|
231
|
+
processorId: options.processorId,
|
|
232
|
+
version: options.version,
|
|
233
|
+
message: message2,
|
|
234
|
+
lastCheckpoint,
|
|
235
|
+
partition: options.partition
|
|
236
|
+
},
|
|
237
|
+
context
|
|
238
|
+
);
|
|
239
|
+
if (storeCheckpointResult && storeCheckpointResult.success) {
|
|
240
|
+
lastCheckpoint = storeCheckpointResult.newCheckpoint;
|
|
241
|
+
}
|
|
242
|
+
}
|
|
243
|
+
if (messageProcessingResult && messageProcessingResult.type === "STOP") {
|
|
244
|
+
isActive = false;
|
|
245
|
+
result = messageProcessingResult;
|
|
246
|
+
break;
|
|
247
|
+
}
|
|
248
|
+
if (options.stopAfter && options.stopAfter(message2)) {
|
|
249
|
+
isActive = false;
|
|
250
|
+
result = { type: "STOP", reason: "Stop condition reached" };
|
|
251
|
+
break;
|
|
252
|
+
}
|
|
253
|
+
if (messageProcessingResult && messageProcessingResult.type === "SKIP")
|
|
254
|
+
continue;
|
|
255
|
+
}
|
|
256
|
+
return result;
|
|
257
|
+
}, partialContext);
|
|
258
|
+
}
|
|
259
|
+
};
|
|
260
|
+
};
|
|
261
|
+
var projector = (options) => {
|
|
262
|
+
const { projection: projection2, ...rest } = options;
|
|
263
|
+
return reactor({
|
|
264
|
+
...rest,
|
|
265
|
+
type: MessageProcessorType.PROJECTOR,
|
|
266
|
+
processorId: _nullishCoalesce(options.processorId, () => ( `projection:${projection2.name}`)),
|
|
267
|
+
hooks: {
|
|
268
|
+
onStart: options.truncateOnStart && options.projection.truncate || _optionalChain([options, 'access', _29 => _29.hooks, 'optionalAccess', _30 => _30.onStart]) ? async (context) => {
|
|
269
|
+
if (options.truncateOnStart && options.projection.truncate)
|
|
270
|
+
await options.projection.truncate(context);
|
|
271
|
+
if (_optionalChain([options, 'access', _31 => _31.hooks, 'optionalAccess', _32 => _32.onStart])) await _optionalChain([options, 'access', _33 => _33.hooks, 'optionalAccess', _34 => _34.onStart, 'call', _35 => _35(context)]);
|
|
272
|
+
} : void 0,
|
|
273
|
+
onClose: _optionalChain([options, 'access', _36 => _36.hooks, 'optionalAccess', _37 => _37.onClose])
|
|
274
|
+
},
|
|
275
|
+
eachMessage: async (event2, context) => {
|
|
276
|
+
if (!projection2.canHandle.includes(event2.type)) return;
|
|
277
|
+
await projection2.handle([event2], context);
|
|
278
|
+
}
|
|
279
|
+
});
|
|
280
|
+
};
|
|
179
281
|
var projection = (definition) => definition;
|
|
180
282
|
var filter = (filter2) => new (0, _webstreamspolyfill.TransformStream)({
|
|
181
283
|
transform(chunk, controller) {
|
|
@@ -485,7 +587,7 @@ var readLastMessageGlobalPosition = async (execute, options) => {
|
|
|
485
587
|
WHERE partition = %L AND is_archived = FALSE AND transaction_id < pg_snapshot_xmin(pg_current_snapshot())
|
|
486
588
|
ORDER BY transaction_id, global_position
|
|
487
589
|
LIMIT 1`,
|
|
488
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess',
|
|
590
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _38 => _38.partition]), () => ( defaultTag))
|
|
489
591
|
)
|
|
490
592
|
)
|
|
491
593
|
);
|
|
@@ -502,7 +604,7 @@ var readMessagesBatch = async (execute, options) => {
|
|
|
502
604
|
const fromCondition = from !== -0n ? `AND global_position >= ${from}` : "";
|
|
503
605
|
const toCondition = "to" in options ? `AND global_position <= ${options.to}` : "";
|
|
504
606
|
const limitCondition = "batchSize" in options ? `LIMIT ${options.batchSize}` : "";
|
|
505
|
-
const
|
|
607
|
+
const messages = await _dumbo.mapRows.call(void 0,
|
|
506
608
|
execute.query(
|
|
507
609
|
_dumbo.sql.call(void 0,
|
|
508
610
|
`SELECT stream_id, stream_position, global_position, message_data, message_metadata, message_schema_version, message_type, message_id
|
|
@@ -510,7 +612,7 @@ var readMessagesBatch = async (execute, options) => {
|
|
|
510
612
|
WHERE partition = %L AND is_archived = FALSE AND transaction_id < pg_snapshot_xmin(pg_current_snapshot()) ${fromCondition} ${toCondition}
|
|
511
613
|
ORDER BY transaction_id, global_position
|
|
512
614
|
${limitCondition}`,
|
|
513
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess',
|
|
615
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _39 => _39.partition]), () => ( defaultTag))
|
|
514
616
|
)
|
|
515
617
|
),
|
|
516
618
|
(row) => {
|
|
@@ -533,14 +635,14 @@ var readMessagesBatch = async (execute, options) => {
|
|
|
533
635
|
};
|
|
534
636
|
}
|
|
535
637
|
);
|
|
536
|
-
return
|
|
537
|
-
currentGlobalPosition:
|
|
538
|
-
messages
|
|
539
|
-
|
|
638
|
+
return messages.length > 0 ? {
|
|
639
|
+
currentGlobalPosition: messages[messages.length - 1].metadata.globalPosition,
|
|
640
|
+
messages,
|
|
641
|
+
areMessagesLeft: messages.length === batchSize
|
|
540
642
|
} : {
|
|
541
643
|
currentGlobalPosition: "from" in options ? options.from : "after" in options ? options.after : 0n,
|
|
542
644
|
messages: [],
|
|
543
|
-
|
|
645
|
+
areMessagesLeft: false
|
|
544
646
|
};
|
|
545
647
|
};
|
|
546
648
|
|
|
@@ -551,21 +653,22 @@ var postgreSQLEventStoreMessageBatchPuller = ({
|
|
|
551
653
|
executor,
|
|
552
654
|
batchSize,
|
|
553
655
|
eachBatch,
|
|
554
|
-
pullingFrequencyInMs
|
|
656
|
+
pullingFrequencyInMs,
|
|
657
|
+
stopWhen
|
|
555
658
|
}) => {
|
|
556
659
|
let isRunning = false;
|
|
557
660
|
let start;
|
|
558
661
|
const pullMessages = async (options) => {
|
|
559
|
-
const after = options.startFrom === "BEGINNING" ? 0n : options.startFrom === "END" ? await _asyncNullishCoalesce((await readLastMessageGlobalPosition(executor)).currentGlobalPosition, async () => ( 0n)) : options.startFrom.
|
|
662
|
+
const after = options.startFrom === "BEGINNING" ? 0n : options.startFrom === "END" ? await _asyncNullishCoalesce((await readLastMessageGlobalPosition(executor)).currentGlobalPosition, async () => ( 0n)) : options.startFrom.lastCheckpoint;
|
|
560
663
|
const readMessagesOptions = {
|
|
561
664
|
after,
|
|
562
665
|
batchSize
|
|
563
666
|
};
|
|
564
667
|
let waitTime = 100;
|
|
565
668
|
do {
|
|
566
|
-
const { messages, currentGlobalPosition,
|
|
669
|
+
const { messages, currentGlobalPosition, areMessagesLeft } = await readMessagesBatch(executor, readMessagesOptions);
|
|
567
670
|
if (messages.length > 0) {
|
|
568
|
-
const result = await eachBatch(
|
|
671
|
+
const result = await eachBatch(messages);
|
|
569
672
|
if (result && result.type === "STOP") {
|
|
570
673
|
isRunning = false;
|
|
571
674
|
break;
|
|
@@ -573,7 +676,11 @@ var postgreSQLEventStoreMessageBatchPuller = ({
|
|
|
573
676
|
}
|
|
574
677
|
readMessagesOptions.after = currentGlobalPosition;
|
|
575
678
|
await new Promise((resolve) => setTimeout(resolve, waitTime));
|
|
576
|
-
if (!
|
|
679
|
+
if (_optionalChain([stopWhen, 'optionalAccess', _40 => _40.noMessagesLeft]) === true && !areMessagesLeft) {
|
|
680
|
+
isRunning = false;
|
|
681
|
+
break;
|
|
682
|
+
}
|
|
683
|
+
if (!areMessagesLeft) {
|
|
577
684
|
waitTime = Math.min(waitTime * 2, 1e3);
|
|
578
685
|
} else {
|
|
579
686
|
waitTime = pullingFrequencyInMs;
|
|
@@ -609,6 +716,7 @@ var zipPostgreSQLEventStoreMessageBatchPullerStartFrom = (options) => {
|
|
|
609
716
|
// src/eventStore/consumers/postgreSQLEventStoreConsumer.ts
|
|
610
717
|
|
|
611
718
|
|
|
719
|
+
|
|
612
720
|
// src/eventStore/consumers/postgreSQLProcessor.ts
|
|
613
721
|
|
|
614
722
|
|
|
@@ -719,7 +827,7 @@ var appendToStream = (pool, streamName, streamType, messages, options) => pool.w
|
|
|
719
827
|
let appendResult;
|
|
720
828
|
try {
|
|
721
829
|
const expectedStreamVersion = toExpectedVersion(
|
|
722
|
-
_optionalChain([options, 'optionalAccess',
|
|
830
|
+
_optionalChain([options, 'optionalAccess', _41 => _41.expectedStreamVersion])
|
|
723
831
|
);
|
|
724
832
|
const messagesToAppend = messages.map((e, i) => ({
|
|
725
833
|
...e,
|
|
@@ -740,7 +848,7 @@ var appendToStream = (pool, streamName, streamType, messages, options) => pool.w
|
|
|
740
848
|
expectedStreamVersion
|
|
741
849
|
}
|
|
742
850
|
);
|
|
743
|
-
if (_optionalChain([options, 'optionalAccess',
|
|
851
|
+
if (_optionalChain([options, 'optionalAccess', _42 => _42.beforeCommitHook]))
|
|
744
852
|
await options.beforeCommitHook(messagesToAppend, { transaction });
|
|
745
853
|
} catch (error) {
|
|
746
854
|
if (!isOptimisticConcurrencyError(error)) throw error;
|
|
@@ -798,8 +906,8 @@ var appendEventsRaw = (execute, streamId, streamType, messages, options) => _dum
|
|
|
798
906
|
messages.map((e) => _dumbo.sql.call(void 0, "%L", e.kind === "Event" ? "E" : "C")).join(","),
|
|
799
907
|
streamId,
|
|
800
908
|
streamType,
|
|
801
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess',
|
|
802
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess',
|
|
909
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _43 => _43.expectedStreamVersion]), () => ( "NULL")),
|
|
910
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _44 => _44.partition]), () => ( defaultTag))
|
|
803
911
|
)
|
|
804
912
|
)
|
|
805
913
|
);
|
|
@@ -866,7 +974,7 @@ BEGIN
|
|
|
866
974
|
END;
|
|
867
975
|
$$ LANGUAGE plpgsql;
|
|
868
976
|
`);
|
|
869
|
-
async
|
|
977
|
+
var storeProcessorCheckpoint = async (execute, options) => {
|
|
870
978
|
try {
|
|
871
979
|
const { result } = await _dumbo.single.call(void 0,
|
|
872
980
|
execute.command(
|
|
@@ -885,7 +993,7 @@ async function storeProcessorCheckpoint(execute, options) {
|
|
|
885
993
|
console.log(error);
|
|
886
994
|
throw error;
|
|
887
995
|
}
|
|
888
|
-
}
|
|
996
|
+
};
|
|
889
997
|
|
|
890
998
|
// src/eventStore/schema/tables.ts
|
|
891
999
|
|
|
@@ -1243,7 +1351,7 @@ var readProcessorCheckpoint = async (execute, options) => {
|
|
|
1243
1351
|
FROM ${subscriptionsTable.name}
|
|
1244
1352
|
WHERE partition = %L AND subscription_id = %L
|
|
1245
1353
|
LIMIT 1`,
|
|
1246
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess',
|
|
1354
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _45 => _45.partition]), () => ( defaultTag)),
|
|
1247
1355
|
options.processorId
|
|
1248
1356
|
)
|
|
1249
1357
|
)
|
|
@@ -1268,7 +1376,7 @@ var readStream = async (execute, streamId, options) => {
|
|
|
1268
1376
|
FROM ${messagesTable.name}
|
|
1269
1377
|
WHERE stream_id = %L AND partition = %L AND is_archived = FALSE ${fromCondition} ${toCondition}`,
|
|
1270
1378
|
streamId,
|
|
1271
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess',
|
|
1379
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _46 => _46.partition]), () => ( defaultTag))
|
|
1272
1380
|
)
|
|
1273
1381
|
),
|
|
1274
1382
|
(row) => {
|
|
@@ -1324,120 +1432,114 @@ var createEventStoreSchema = async (pool) => {
|
|
|
1324
1432
|
};
|
|
1325
1433
|
|
|
1326
1434
|
// src/eventStore/consumers/postgreSQLProcessor.ts
|
|
1327
|
-
var
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1435
|
+
var postgreSQLCheckpointer = () => ({
|
|
1436
|
+
read: async (options, context) => {
|
|
1437
|
+
const result = await readProcessorCheckpoint(context.execute, options);
|
|
1438
|
+
return { lastCheckpoint: _optionalChain([result, 'optionalAccess', _47 => _47.lastProcessedPosition]) };
|
|
1439
|
+
},
|
|
1440
|
+
store: async (options, context) => {
|
|
1441
|
+
const result = await storeProcessorCheckpoint(context.execute, {
|
|
1442
|
+
lastProcessedPosition: options.lastCheckpoint,
|
|
1443
|
+
newPosition: options.message.metadata.globalPosition,
|
|
1444
|
+
processorId: options.processorId,
|
|
1445
|
+
partition: options.partition,
|
|
1446
|
+
version: options.version
|
|
1447
|
+
});
|
|
1448
|
+
return result.success ? { success: true, newCheckpoint: result.newPosition } : result;
|
|
1337
1449
|
}
|
|
1338
|
-
};
|
|
1339
|
-
var
|
|
1340
|
-
const
|
|
1341
|
-
|
|
1342
|
-
const
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
const processorConnectionString = "connectionString" in poolOptions ? poolOptions.connectionString : null;
|
|
1346
|
-
const processorPool = "dumbo" in poolOptions ? poolOptions.dumbo : processorConnectionString ? _dumbo.dumbo.call(void 0, {
|
|
1347
|
-
connectionString: processorConnectionString,
|
|
1348
|
-
...poolOptions
|
|
1349
|
-
}) : null;
|
|
1350
|
-
const getPool = (context) => {
|
|
1351
|
-
const connectionString = _nullishCoalesce(processorConnectionString, () => ( context.connectionString));
|
|
1450
|
+
});
|
|
1451
|
+
var postgreSQLProcessingScope = (options) => {
|
|
1452
|
+
const processorConnectionString = options.connectionString;
|
|
1453
|
+
const processorPool = options.pool;
|
|
1454
|
+
const processingScope = async (handler, partialContext) => {
|
|
1455
|
+
const connection = _optionalChain([partialContext, 'optionalAccess', _48 => _48.connection]);
|
|
1456
|
+
const connectionString = _nullishCoalesce(processorConnectionString, () => ( _optionalChain([connection, 'optionalAccess', _49 => _49.connectionString])));
|
|
1352
1457
|
if (!connectionString)
|
|
1353
1458
|
throw new EmmettError(
|
|
1354
1459
|
`PostgreSQL processor '${options.processorId}' is missing connection string. Ensure that you passed it through options`
|
|
1355
1460
|
);
|
|
1356
|
-
const pool = _nullishCoalesce((!processorConnectionString || connectionString == processorConnectionString ? _optionalChain([
|
|
1461
|
+
const pool = _nullishCoalesce((!processorConnectionString || connectionString == processorConnectionString ? _optionalChain([connection, 'optionalAccess', _50 => _50.pool]) : processorPool), () => ( processorPool));
|
|
1357
1462
|
if (!pool)
|
|
1358
1463
|
throw new EmmettError(
|
|
1359
1464
|
`PostgreSQL processor '${options.processorId}' is missing connection string. Ensure that you passed it through options`
|
|
1360
1465
|
);
|
|
1361
|
-
return {
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1369
|
-
|
|
1370
|
-
if (options.startFrom !== "CURRENT") return options.startFrom;
|
|
1371
|
-
const { lastProcessedPosition } = await readProcessorCheckpoint(execute, {
|
|
1372
|
-
processorId: options.processorId,
|
|
1373
|
-
partition: options.partition
|
|
1374
|
-
});
|
|
1375
|
-
if (lastProcessedPosition === null) return "BEGINNING";
|
|
1376
|
-
return { globalPosition: lastProcessedPosition };
|
|
1377
|
-
},
|
|
1378
|
-
get isActive() {
|
|
1379
|
-
return isActive;
|
|
1380
|
-
},
|
|
1381
|
-
handle: async ({ messages }, context) => {
|
|
1382
|
-
if (!isActive) return;
|
|
1383
|
-
const { pool, connectionString } = getPool(context);
|
|
1384
|
-
return pool.withTransaction(async (transaction) => {
|
|
1385
|
-
let result = void 0;
|
|
1386
|
-
let lastProcessedPosition = null;
|
|
1387
|
-
for (const message of messages) {
|
|
1388
|
-
const typedMessage = message;
|
|
1389
|
-
const client = await transaction.connection.open();
|
|
1390
|
-
const messageProcessingResult = await eachMessage(typedMessage, {
|
|
1391
|
-
execute: transaction.execute,
|
|
1392
|
-
connection: {
|
|
1393
|
-
connectionString,
|
|
1394
|
-
pool,
|
|
1395
|
-
transaction,
|
|
1396
|
-
client
|
|
1397
|
-
}
|
|
1398
|
-
});
|
|
1399
|
-
await storeProcessorCheckpoint(transaction.execute, {
|
|
1400
|
-
processorId: options.processorId,
|
|
1401
|
-
version: options.version,
|
|
1402
|
-
lastProcessedPosition,
|
|
1403
|
-
newPosition: typedMessage.metadata.globalPosition,
|
|
1404
|
-
partition: options.partition
|
|
1405
|
-
});
|
|
1406
|
-
lastProcessedPosition = typedMessage.metadata.globalPosition;
|
|
1407
|
-
if (messageProcessingResult && messageProcessingResult.type === "STOP") {
|
|
1408
|
-
isActive = false;
|
|
1409
|
-
result = messageProcessingResult;
|
|
1410
|
-
break;
|
|
1411
|
-
}
|
|
1412
|
-
if (options.stopAfter && options.stopAfter(typedMessage)) {
|
|
1413
|
-
isActive = false;
|
|
1414
|
-
result = { type: "STOP", reason: "Stop condition reached" };
|
|
1415
|
-
break;
|
|
1416
|
-
}
|
|
1417
|
-
if (messageProcessingResult && messageProcessingResult.type === "SKIP")
|
|
1418
|
-
continue;
|
|
1466
|
+
return pool.withTransaction(async (transaction) => {
|
|
1467
|
+
const client = await transaction.connection.open();
|
|
1468
|
+
return handler({
|
|
1469
|
+
execute: transaction.execute,
|
|
1470
|
+
connection: {
|
|
1471
|
+
connectionString,
|
|
1472
|
+
pool,
|
|
1473
|
+
client,
|
|
1474
|
+
transaction
|
|
1419
1475
|
}
|
|
1420
|
-
return result;
|
|
1421
1476
|
});
|
|
1422
|
-
}
|
|
1477
|
+
});
|
|
1423
1478
|
};
|
|
1479
|
+
return processingScope;
|
|
1424
1480
|
};
|
|
1425
|
-
var
|
|
1426
|
-
const
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1481
|
+
var getProcessorPool = (options) => {
|
|
1482
|
+
const poolOptions = {
|
|
1483
|
+
...options.connectionOptions ? options.connectionOptions : {}
|
|
1484
|
+
};
|
|
1485
|
+
const processorConnectionString = "connectionString" in poolOptions ? _nullishCoalesce(poolOptions.connectionString, () => ( null)) : null;
|
|
1486
|
+
const processorPool = "dumbo" in poolOptions ? poolOptions.dumbo : processorConnectionString ? _dumbo.dumbo.call(void 0, {
|
|
1487
|
+
connectionString: processorConnectionString,
|
|
1488
|
+
...poolOptions
|
|
1489
|
+
}) : null;
|
|
1490
|
+
return {
|
|
1491
|
+
pool: processorPool,
|
|
1492
|
+
connectionString: processorConnectionString,
|
|
1493
|
+
close: processorPool != null && !("dumbo" in poolOptions) ? processorPool.close : void 0
|
|
1494
|
+
};
|
|
1495
|
+
};
|
|
1496
|
+
var postgreSQLProjector = (options) => {
|
|
1497
|
+
const { pool, connectionString, close } = getProcessorPool(options);
|
|
1498
|
+
const hooks = {
|
|
1499
|
+
onStart: _optionalChain([options, 'access', _51 => _51.hooks, 'optionalAccess', _52 => _52.onStart]),
|
|
1500
|
+
onClose: _optionalChain([options, 'access', _53 => _53.hooks, 'optionalAccess', _54 => _54.onClose]) || close ? async () => {
|
|
1501
|
+
if (_optionalChain([options, 'access', _55 => _55.hooks, 'optionalAccess', _56 => _56.onClose])) await _optionalChain([options, 'access', _57 => _57.hooks, 'optionalAccess', _58 => _58.onClose, 'call', _59 => _59()]);
|
|
1502
|
+
if (close) await close();
|
|
1503
|
+
} : void 0
|
|
1504
|
+
};
|
|
1505
|
+
return projector({
|
|
1506
|
+
...options,
|
|
1507
|
+
hooks,
|
|
1508
|
+
processingScope: postgreSQLProcessingScope({
|
|
1509
|
+
pool,
|
|
1510
|
+
connectionString,
|
|
1511
|
+
processorId: _nullishCoalesce(options.processorId, () => ( `projection:${options.projection.name}`))
|
|
1512
|
+
}),
|
|
1513
|
+
checkpoints: postgreSQLCheckpointer()
|
|
1514
|
+
});
|
|
1515
|
+
};
|
|
1516
|
+
var postgreSQLReactor = (options) => {
|
|
1517
|
+
const { pool, connectionString, close } = getProcessorPool(options);
|
|
1518
|
+
const hooks = {
|
|
1519
|
+
onStart: _optionalChain([options, 'access', _60 => _60.hooks, 'optionalAccess', _61 => _61.onStart]),
|
|
1520
|
+
onClose: _optionalChain([options, 'access', _62 => _62.hooks, 'optionalAccess', _63 => _63.onClose]) || close ? async () => {
|
|
1521
|
+
if (_optionalChain([options, 'access', _64 => _64.hooks, 'optionalAccess', _65 => _65.onClose])) await _optionalChain([options, 'access', _66 => _66.hooks, 'optionalAccess', _67 => _67.onClose, 'call', _68 => _68()]);
|
|
1522
|
+
if (close) await close();
|
|
1523
|
+
} : void 0
|
|
1524
|
+
};
|
|
1525
|
+
return reactor({
|
|
1526
|
+
...options,
|
|
1527
|
+
hooks,
|
|
1528
|
+
processingScope: postgreSQLProcessingScope({
|
|
1529
|
+
pool,
|
|
1530
|
+
connectionString,
|
|
1531
|
+
processorId: options.processorId
|
|
1532
|
+
}),
|
|
1533
|
+
checkpoints: postgreSQLCheckpointer()
|
|
1434
1534
|
});
|
|
1435
1535
|
};
|
|
1436
|
-
var
|
|
1536
|
+
var postgreSQLMessageProcessor = (options) => {
|
|
1437
1537
|
if ("projection" in options) {
|
|
1438
|
-
return
|
|
1538
|
+
return postgreSQLProjector(
|
|
1539
|
+
options
|
|
1540
|
+
);
|
|
1439
1541
|
}
|
|
1440
|
-
return
|
|
1542
|
+
return postgreSQLReactor(options);
|
|
1441
1543
|
};
|
|
1442
1544
|
|
|
1443
1545
|
// src/eventStore/consumers/postgreSQLEventStoreConsumer.ts
|
|
@@ -1458,22 +1560,25 @@ var postgreSQLEventStoreConsumer = (options) => {
|
|
|
1458
1560
|
const result = await Promise.allSettled(
|
|
1459
1561
|
activeProcessors.map((s) => {
|
|
1460
1562
|
return s.handle(messagesBatch, {
|
|
1461
|
-
|
|
1462
|
-
|
|
1563
|
+
connection: {
|
|
1564
|
+
connectionString: options.connectionString,
|
|
1565
|
+
pool
|
|
1566
|
+
}
|
|
1463
1567
|
});
|
|
1464
1568
|
})
|
|
1465
1569
|
);
|
|
1466
1570
|
return result.some(
|
|
1467
|
-
(r) => r.status === "fulfilled" && _optionalChain([r, 'access',
|
|
1571
|
+
(r) => r.status === "fulfilled" && _optionalChain([r, 'access', _69 => _69.value, 'optionalAccess', _70 => _70.type]) !== "STOP"
|
|
1468
1572
|
) ? void 0 : {
|
|
1469
1573
|
type: "STOP"
|
|
1470
1574
|
};
|
|
1471
1575
|
};
|
|
1472
1576
|
const messagePooler = currentMessagePuller = postgreSQLEventStoreMessageBatchPuller({
|
|
1577
|
+
stopWhen: options.stopWhen,
|
|
1473
1578
|
executor: pool.execute,
|
|
1474
1579
|
eachBatch,
|
|
1475
|
-
batchSize: _nullishCoalesce(_optionalChain([pulling, 'optionalAccess',
|
|
1476
|
-
pullingFrequencyInMs: _nullishCoalesce(_optionalChain([pulling, 'optionalAccess',
|
|
1580
|
+
batchSize: _nullishCoalesce(_optionalChain([pulling, 'optionalAccess', _71 => _71.batchSize]), () => ( DefaultPostgreSQLEventStoreProcessorBatchSize)),
|
|
1581
|
+
pullingFrequencyInMs: _nullishCoalesce(_optionalChain([pulling, 'optionalAccess', _72 => _72.pullingFrequencyInMs]), () => ( DefaultPostgreSQLEventStoreProcessorPullingFrequencyInMs))
|
|
1477
1582
|
});
|
|
1478
1583
|
const stop = async () => {
|
|
1479
1584
|
if (!isRunning) return;
|
|
@@ -1483,15 +1588,20 @@ var postgreSQLEventStoreConsumer = (options) => {
|
|
|
1483
1588
|
currentMessagePuller = void 0;
|
|
1484
1589
|
}
|
|
1485
1590
|
await start;
|
|
1591
|
+
await Promise.all(processors.map((p) => p.close()));
|
|
1486
1592
|
};
|
|
1487
1593
|
return {
|
|
1488
|
-
|
|
1594
|
+
consumerId: _nullishCoalesce(options.consumerId, () => ( _uuid.v7.call(void 0, ))),
|
|
1489
1595
|
get isRunning() {
|
|
1490
1596
|
return isRunning;
|
|
1491
1597
|
},
|
|
1598
|
+
processors,
|
|
1492
1599
|
processor: (options2) => {
|
|
1493
|
-
const processor =
|
|
1494
|
-
processors.push(
|
|
1600
|
+
const processor = postgreSQLMessageProcessor(options2);
|
|
1601
|
+
processors.push(
|
|
1602
|
+
// TODO: change that
|
|
1603
|
+
processor
|
|
1604
|
+
);
|
|
1495
1605
|
return processor;
|
|
1496
1606
|
},
|
|
1497
1607
|
start: () => {
|
|
@@ -1505,7 +1615,18 @@ var postgreSQLEventStoreConsumer = (options) => {
|
|
|
1505
1615
|
);
|
|
1506
1616
|
isRunning = true;
|
|
1507
1617
|
const startFrom = zipPostgreSQLEventStoreMessageBatchPullerStartFrom(
|
|
1508
|
-
await Promise.all(
|
|
1618
|
+
await Promise.all(
|
|
1619
|
+
processors.map(async (o) => {
|
|
1620
|
+
const result = await o.start({
|
|
1621
|
+
execute: pool.execute,
|
|
1622
|
+
connection: {
|
|
1623
|
+
connectionString: options.connectionString,
|
|
1624
|
+
pool
|
|
1625
|
+
}
|
|
1626
|
+
});
|
|
1627
|
+
return result;
|
|
1628
|
+
})
|
|
1629
|
+
)
|
|
1509
1630
|
);
|
|
1510
1631
|
return messagePooler.start({ startFrom });
|
|
1511
1632
|
})();
|
|
@@ -1519,14 +1640,110 @@ var postgreSQLEventStoreConsumer = (options) => {
|
|
|
1519
1640
|
};
|
|
1520
1641
|
};
|
|
1521
1642
|
|
|
1522
|
-
// src/eventStore/
|
|
1643
|
+
// src/eventStore/consumers/rebuildPostgreSQLProjections.ts
|
|
1523
1644
|
|
|
1645
|
+
var rebuildPostgreSQLProjections = (options) => {
|
|
1646
|
+
const consumer = postgreSQLEventStoreConsumer({
|
|
1647
|
+
...options,
|
|
1648
|
+
stopWhen: { noMessagesLeft: true }
|
|
1649
|
+
});
|
|
1650
|
+
const projections = "projections" in options ? options.projections.map(
|
|
1651
|
+
(p) => "projection" in p ? {
|
|
1652
|
+
...p,
|
|
1653
|
+
processorId: `projection:${_nullishCoalesce(p.projection.name, () => ( _uuid.v7.call(void 0, )))}-rebuild`,
|
|
1654
|
+
truncateOnStart: _nullishCoalesce(p.truncateOnStart, () => ( true))
|
|
1655
|
+
} : {
|
|
1656
|
+
projection: p,
|
|
1657
|
+
processorId: `projection:${_nullishCoalesce(p.name, () => ( _uuid.v7.call(void 0, )))}-rebuild`,
|
|
1658
|
+
truncateOnStart: true
|
|
1659
|
+
}
|
|
1660
|
+
) : [options];
|
|
1661
|
+
for (const projectionDefinition of projections) {
|
|
1662
|
+
consumer.processor({
|
|
1663
|
+
...projectionDefinition,
|
|
1664
|
+
processorId: _nullishCoalesce(projectionDefinition.processorId, () => ( `projection:${_nullishCoalesce(projectionDefinition.projection.name, () => ( _uuid.v7.call(void 0, )))}-rebuild`)),
|
|
1665
|
+
truncateOnStart: _nullishCoalesce(projectionDefinition.truncateOnStart, () => ( true))
|
|
1666
|
+
});
|
|
1667
|
+
}
|
|
1668
|
+
return consumer;
|
|
1669
|
+
};
|
|
1670
|
+
|
|
1671
|
+
// src/eventStore/projections/pongo/pongoProjections.ts
|
|
1672
|
+
|
|
1673
|
+
|
|
1674
|
+
var _pongo = require('@event-driven-io/pongo');
|
|
1675
|
+
var pongoProjection = ({
|
|
1676
|
+
truncate,
|
|
1677
|
+
handle,
|
|
1678
|
+
canHandle
|
|
1679
|
+
}) => postgreSQLProjection({
|
|
1680
|
+
canHandle,
|
|
1681
|
+
handle: async (events, context) => {
|
|
1682
|
+
const {
|
|
1683
|
+
connection: { connectionString, client, pool }
|
|
1684
|
+
} = context;
|
|
1685
|
+
const pongo = _pongo.pongoClient.call(void 0, connectionString, {
|
|
1686
|
+
connectionOptions: { client, pool }
|
|
1687
|
+
});
|
|
1688
|
+
await handle(events, {
|
|
1689
|
+
...context,
|
|
1690
|
+
pongo
|
|
1691
|
+
});
|
|
1692
|
+
},
|
|
1693
|
+
truncate: truncate ? (context) => {
|
|
1694
|
+
const {
|
|
1695
|
+
connection: { connectionString, client, pool }
|
|
1696
|
+
} = context;
|
|
1697
|
+
const pongo = _pongo.pongoClient.call(void 0, connectionString, {
|
|
1698
|
+
connectionOptions: { client, pool }
|
|
1699
|
+
});
|
|
1700
|
+
return truncate({
|
|
1701
|
+
...context,
|
|
1702
|
+
pongo
|
|
1703
|
+
});
|
|
1704
|
+
} : void 0
|
|
1705
|
+
});
|
|
1706
|
+
var pongoMultiStreamProjection = (options) => {
|
|
1707
|
+
const { collectionName, getDocumentId, canHandle } = options;
|
|
1708
|
+
return pongoProjection({
|
|
1709
|
+
handle: async (events, { pongo }) => {
|
|
1710
|
+
const collection = pongo.db().collection(collectionName);
|
|
1711
|
+
for (const event of events) {
|
|
1712
|
+
await collection.handle(getDocumentId(event), async (document) => {
|
|
1713
|
+
return "initialState" in options ? await options.evolve(
|
|
1714
|
+
_nullishCoalesce(document, () => ( options.initialState())),
|
|
1715
|
+
event
|
|
1716
|
+
) : await options.evolve(
|
|
1717
|
+
document,
|
|
1718
|
+
event
|
|
1719
|
+
);
|
|
1720
|
+
});
|
|
1721
|
+
}
|
|
1722
|
+
},
|
|
1723
|
+
canHandle,
|
|
1724
|
+
truncate: async (context) => {
|
|
1725
|
+
const {
|
|
1726
|
+
connection: { connectionString, client, pool }
|
|
1727
|
+
} = context;
|
|
1728
|
+
const pongo = _pongo.pongoClient.call(void 0, connectionString, {
|
|
1729
|
+
connectionOptions: { client, pool }
|
|
1730
|
+
});
|
|
1731
|
+
await pongo.db().collection(collectionName).deleteMany();
|
|
1732
|
+
}
|
|
1733
|
+
});
|
|
1734
|
+
};
|
|
1735
|
+
var pongoSingleStreamProjection = (options) => {
|
|
1736
|
+
return pongoMultiStreamProjection({
|
|
1737
|
+
...options,
|
|
1738
|
+
getDocumentId: _nullishCoalesce(options.getDocumentId, () => ( ((event) => event.metadata.streamName)))
|
|
1739
|
+
});
|
|
1740
|
+
};
|
|
1524
1741
|
|
|
1525
1742
|
// src/eventStore/projections/pongo/pongoProjectionSpec.ts
|
|
1526
1743
|
|
|
1527
1744
|
|
|
1528
1745
|
|
|
1529
|
-
|
|
1746
|
+
|
|
1530
1747
|
var withCollection = (handle, options) => {
|
|
1531
1748
|
const { pool, connectionString, inDatabase, inCollection } = options;
|
|
1532
1749
|
return pool.withConnection(async (connection) => {
|
|
@@ -1658,55 +1875,6 @@ var expectPongoDocuments = {
|
|
|
1658
1875
|
}
|
|
1659
1876
|
};
|
|
1660
1877
|
|
|
1661
|
-
// src/eventStore/projections/pongo/projections.ts
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
var pongoProjection = ({
|
|
1666
|
-
handle,
|
|
1667
|
-
canHandle
|
|
1668
|
-
}) => postgreSQLProjection({
|
|
1669
|
-
canHandle,
|
|
1670
|
-
handle: async (events, context) => {
|
|
1671
|
-
const {
|
|
1672
|
-
connection: { connectionString, client }
|
|
1673
|
-
} = context;
|
|
1674
|
-
const pongo = _pongo.pongoClient.call(void 0, connectionString, {
|
|
1675
|
-
connectionOptions: { client }
|
|
1676
|
-
});
|
|
1677
|
-
await handle(events, {
|
|
1678
|
-
...context,
|
|
1679
|
-
pongo
|
|
1680
|
-
});
|
|
1681
|
-
}
|
|
1682
|
-
});
|
|
1683
|
-
var pongoMultiStreamProjection = (options) => {
|
|
1684
|
-
const { collectionName, getDocumentId, canHandle } = options;
|
|
1685
|
-
return pongoProjection({
|
|
1686
|
-
handle: async (events, { pongo }) => {
|
|
1687
|
-
const collection = pongo.db().collection(collectionName);
|
|
1688
|
-
for (const event of events) {
|
|
1689
|
-
await collection.handle(getDocumentId(event), async (document) => {
|
|
1690
|
-
return "initialState" in options ? await options.evolve(
|
|
1691
|
-
_nullishCoalesce(document, () => ( options.initialState())),
|
|
1692
|
-
event
|
|
1693
|
-
) : await options.evolve(
|
|
1694
|
-
document,
|
|
1695
|
-
event
|
|
1696
|
-
);
|
|
1697
|
-
});
|
|
1698
|
-
}
|
|
1699
|
-
},
|
|
1700
|
-
canHandle
|
|
1701
|
-
});
|
|
1702
|
-
};
|
|
1703
|
-
var pongoSingleStreamProjection = (options) => {
|
|
1704
|
-
return pongoMultiStreamProjection({
|
|
1705
|
-
...options,
|
|
1706
|
-
getDocumentId: _nullishCoalesce(options.getDocumentId, () => ( ((event) => event.metadata.streamName)))
|
|
1707
|
-
});
|
|
1708
|
-
};
|
|
1709
|
-
|
|
1710
1878
|
// src/eventStore/projections/postgresProjectionSpec.ts
|
|
1711
1879
|
|
|
1712
1880
|
|
|
@@ -1723,7 +1891,7 @@ var PostgreSQLProjectionSpec = {
|
|
|
1723
1891
|
const allEvents = [];
|
|
1724
1892
|
const run = async (pool) => {
|
|
1725
1893
|
let globalPosition = 0n;
|
|
1726
|
-
const numberOfTimes = _nullishCoalesce(_optionalChain([options2, 'optionalAccess',
|
|
1894
|
+
const numberOfTimes = _nullishCoalesce(_optionalChain([options2, 'optionalAccess', _73 => _73.numberOfTimes]), () => ( 1));
|
|
1727
1895
|
for (const event of [
|
|
1728
1896
|
...givenEvents,
|
|
1729
1897
|
...Array.from({ length: numberOfTimes }).flatMap(() => events)
|
|
@@ -1780,18 +1948,18 @@ var PostgreSQLProjectionSpec = {
|
|
|
1780
1948
|
if (!isErrorConstructor(args[0])) {
|
|
1781
1949
|
assertTrue(
|
|
1782
1950
|
args[0](error),
|
|
1783
|
-
`Error didn't match the error condition: ${_optionalChain([error, 'optionalAccess',
|
|
1951
|
+
`Error didn't match the error condition: ${_optionalChain([error, 'optionalAccess', _74 => _74.toString, 'call', _75 => _75()])}`
|
|
1784
1952
|
);
|
|
1785
1953
|
return;
|
|
1786
1954
|
}
|
|
1787
1955
|
assertTrue(
|
|
1788
1956
|
error instanceof args[0],
|
|
1789
|
-
`Caught error is not an instance of the expected type: ${_optionalChain([error, 'optionalAccess',
|
|
1957
|
+
`Caught error is not an instance of the expected type: ${_optionalChain([error, 'optionalAccess', _76 => _76.toString, 'call', _77 => _77()])}`
|
|
1790
1958
|
);
|
|
1791
1959
|
if (args[1]) {
|
|
1792
1960
|
assertTrue(
|
|
1793
1961
|
args[1](error),
|
|
1794
|
-
`Error didn't match the error condition: ${_optionalChain([error, 'optionalAccess',
|
|
1962
|
+
`Error didn't match the error condition: ${_optionalChain([error, 'optionalAccess', _78 => _78.toString, 'call', _79 => _79()])}`
|
|
1795
1963
|
);
|
|
1796
1964
|
}
|
|
1797
1965
|
} finally {
|
|
@@ -1810,7 +1978,7 @@ var eventInStream = (streamName, event) => {
|
|
|
1810
1978
|
...event,
|
|
1811
1979
|
metadata: {
|
|
1812
1980
|
..._nullishCoalesce(event.metadata, () => ( {})),
|
|
1813
|
-
streamName: _nullishCoalesce(_optionalChain([event, 'access',
|
|
1981
|
+
streamName: _nullishCoalesce(_optionalChain([event, 'access', _80 => _80.metadata, 'optionalAccess', _81 => _81.streamName]), () => ( streamName))
|
|
1814
1982
|
}
|
|
1815
1983
|
};
|
|
1816
1984
|
};
|
|
@@ -1830,7 +1998,8 @@ var expectSQL = {
|
|
|
1830
1998
|
})
|
|
1831
1999
|
};
|
|
1832
2000
|
|
|
1833
|
-
// src/eventStore/projections/
|
|
2001
|
+
// src/eventStore/projections/postgreSQLProjection.ts
|
|
2002
|
+
|
|
1834
2003
|
var handleProjections = async (options) => {
|
|
1835
2004
|
const {
|
|
1836
2005
|
projections: allProjections,
|
|
@@ -1886,7 +2055,7 @@ var getPostgreSQLEventStore = (connectionString, options = defaultPostgreSQLOpti
|
|
|
1886
2055
|
};
|
|
1887
2056
|
const pool = "dumbo" in poolOptions ? poolOptions.dumbo : _dumbo.dumbo.call(void 0, poolOptions);
|
|
1888
2057
|
let migrateSchema;
|
|
1889
|
-
const autoGenerateSchema = _optionalChain([options, 'access',
|
|
2058
|
+
const autoGenerateSchema = _optionalChain([options, 'access', _82 => _82.schema, 'optionalAccess', _83 => _83.autoMigration]) === void 0 || _optionalChain([options, 'access', _84 => _84.schema, 'optionalAccess', _85 => _85.autoMigration]) !== "None";
|
|
1890
2059
|
const ensureSchemaExists = () => {
|
|
1891
2060
|
if (!autoGenerateSchema) return Promise.resolve();
|
|
1892
2061
|
if (!migrateSchema) {
|
|
@@ -1916,7 +2085,7 @@ var getPostgreSQLEventStore = (connectionString, options = defaultPostgreSQLOpti
|
|
|
1916
2085
|
},
|
|
1917
2086
|
async aggregateStream(streamName, options2) {
|
|
1918
2087
|
const { evolve, initialState, read } = options2;
|
|
1919
|
-
const expectedStreamVersion = _optionalChain([read, 'optionalAccess',
|
|
2088
|
+
const expectedStreamVersion = _optionalChain([read, 'optionalAccess', _86 => _86.expectedStreamVersion]);
|
|
1920
2089
|
let state = initialState();
|
|
1921
2090
|
const result = await this.readStream(streamName, options2.read);
|
|
1922
2091
|
const currentStreamVersion = result.currentStreamVersion;
|
|
@@ -1957,7 +2126,7 @@ var getPostgreSQLEventStore = (connectionString, options = defaultPostgreSQLOpti
|
|
|
1957
2126
|
throw new ExpectedVersionConflictError(
|
|
1958
2127
|
-1n,
|
|
1959
2128
|
//TODO: Return actual version in case of error
|
|
1960
|
-
_nullishCoalesce(_optionalChain([options2, 'optionalAccess',
|
|
2129
|
+
_nullishCoalesce(_optionalChain([options2, 'optionalAccess', _87 => _87.expectedStreamVersion]), () => ( NO_CONCURRENCY_CHECK))
|
|
1961
2130
|
);
|
|
1962
2131
|
return {
|
|
1963
2132
|
nextExpectedStreamVersion: appendResult.nextStreamPosition,
|
|
@@ -2051,5 +2220,7 @@ var getPostgreSQLEventStore = (connectionString, options = defaultPostgreSQLOpti
|
|
|
2051
2220
|
|
|
2052
2221
|
|
|
2053
2222
|
|
|
2054
|
-
|
|
2223
|
+
|
|
2224
|
+
|
|
2225
|
+
exports.DefaultPostgreSQLEventStoreProcessorBatchSize = DefaultPostgreSQLEventStoreProcessorBatchSize; exports.DefaultPostgreSQLEventStoreProcessorPullingFrequencyInMs = DefaultPostgreSQLEventStoreProcessorPullingFrequencyInMs; exports.PostgreSQLEventStoreDefaultStreamVersion = PostgreSQLEventStoreDefaultStreamVersion; exports.PostgreSQLProjectionSpec = PostgreSQLProjectionSpec; exports.addDefaultPartitionSQL = addDefaultPartitionSQL; exports.addModuleForAllTenantsSQL = addModuleForAllTenantsSQL; exports.addModuleSQL = addModuleSQL; exports.addPartitionSQL = addPartitionSQL; exports.addTablePartitions = addTablePartitions; exports.addTenantForAllModulesSQL = addTenantForAllModulesSQL; exports.addTenantSQL = addTenantSQL; exports.appendToStream = appendToStream; exports.appendToStreamSQL = appendToStreamSQL; exports.assertSQLQueryResultMatches = assertSQLQueryResultMatches; exports.createEventStoreSchema = createEventStoreSchema; exports.defaultPostgreSQLOptions = defaultPostgreSQLOptions; exports.defaultTag = defaultTag; exports.documentDoesNotExist = documentDoesNotExist; exports.documentExists = documentExists; exports.documentMatchingExists = documentMatchingExists; exports.documentsAreTheSame = documentsAreTheSame; exports.documentsMatchingHaveCount = documentsMatchingHaveCount; exports.emmettPrefix = emmettPrefix; exports.eventInStream = eventInStream; exports.eventsInStream = eventsInStream; exports.expectPongoDocuments = expectPongoDocuments; exports.expectSQL = expectSQL; exports.getPostgreSQLEventStore = getPostgreSQLEventStore; exports.globalNames = globalNames; exports.globalTag = globalTag; exports.handleProjections = handleProjections; exports.messagesTable = messagesTable; exports.messagesTableSQL = messagesTableSQL; exports.migrationFromEventsToMessagesSQL = migrationFromEventsToMessagesSQL; exports.newEventsInStream = newEventsInStream; exports.pongoMultiStreamProjection = pongoMultiStreamProjection; exports.pongoProjection = pongoProjection; exports.pongoSingleStreamProjection = pongoSingleStreamProjection; exports.postgreSQLCheckpointer = postgreSQLCheckpointer; exports.postgreSQLEventStoreConsumer = postgreSQLEventStoreConsumer; exports.postgreSQLEventStoreMessageBatchPuller = postgreSQLEventStoreMessageBatchPuller; exports.postgreSQLMessageProcessor = postgreSQLMessageProcessor; exports.postgreSQLProjection = postgreSQLProjection; exports.postgreSQLProjector = postgreSQLProjector; exports.postgreSQLRawBatchSQLProjection = postgreSQLRawBatchSQLProjection; exports.postgreSQLRawSQLProjection = postgreSQLRawSQLProjection; exports.postgreSQLReactor = postgreSQLReactor; exports.readLastMessageGlobalPosition = readLastMessageGlobalPosition; exports.readMessagesBatch = readMessagesBatch; exports.readProcessorCheckpoint = readProcessorCheckpoint; exports.readStream = readStream; exports.rebuildPostgreSQLProjections = rebuildPostgreSQLProjections; exports.sanitizeNameSQL = sanitizeNameSQL; exports.schemaSQL = schemaSQL; exports.storeProcessorCheckpoint = storeProcessorCheckpoint; exports.storeSubscriptionCheckpointSQL = storeSubscriptionCheckpointSQL; exports.streamsTable = streamsTable; exports.streamsTableSQL = streamsTableSQL; exports.subscriptionsTable = subscriptionsTable; exports.subscriptionsTableSQL = subscriptionsTableSQL; exports.zipPostgreSQLEventStoreMessageBatchPullerStartFrom = zipPostgreSQLEventStoreMessageBatchPullerStartFrom;
|
|
2055
2226
|
//# sourceMappingURL=index.cjs.map
|