@event-driven-io/emmett-postgresql 0.28.0 → 0.30.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +689 -685
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +110 -106
- package/dist/index.d.ts +110 -106
- package/dist/index.js +675 -671
- package/dist/index.js.map +1 -1
- package/package.json +3 -3
package/dist/index.cjs
CHANGED
|
@@ -436,397 +436,190 @@ var assertThatArray = (array) => {
|
|
|
436
436
|
// src/eventStore/postgreSQLEventStore.ts
|
|
437
437
|
require('pg');
|
|
438
438
|
|
|
439
|
-
// src/eventStore/
|
|
440
|
-
|
|
439
|
+
// src/eventStore/consumers/messageBatchProcessing/index.ts
|
|
441
440
|
|
|
442
|
-
// src/eventStore/projections/pongo/pongoProjectionSpec.ts
|
|
443
441
|
|
|
442
|
+
// src/eventStore/schema/readLastMessageGlobalPosition.ts
|
|
444
443
|
|
|
445
444
|
|
|
446
|
-
|
|
447
|
-
var
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
try {
|
|
454
|
-
const collection = pongo.db(inDatabase).collection(inCollection);
|
|
455
|
-
return handle(collection);
|
|
456
|
-
} finally {
|
|
457
|
-
await pongo.close();
|
|
458
|
-
}
|
|
459
|
-
});
|
|
460
|
-
};
|
|
461
|
-
var withoutIdAndVersion = (doc) => {
|
|
462
|
-
const { _id, _version, ...without } = doc;
|
|
463
|
-
return without;
|
|
464
|
-
};
|
|
465
|
-
var assertDocumentsEqual = (actual, expected) => {
|
|
466
|
-
if ("_id" in expected)
|
|
467
|
-
assertEqual(
|
|
468
|
-
expected._id,
|
|
469
|
-
actual._id,
|
|
470
|
-
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
|
471
|
-
`Document ids are not matching! Expected: ${expected._id}, Actual: ${actual._id}`
|
|
472
|
-
);
|
|
473
|
-
return assertDeepEqual(
|
|
474
|
-
withoutIdAndVersion(actual),
|
|
475
|
-
withoutIdAndVersion(expected)
|
|
476
|
-
);
|
|
445
|
+
// src/eventStore/schema/typing.ts
|
|
446
|
+
var emmettPrefix = "emt";
|
|
447
|
+
var globalTag = "global";
|
|
448
|
+
var defaultTag = "emt:default";
|
|
449
|
+
var globalNames = {
|
|
450
|
+
module: `${emmettPrefix}:module:${globalTag}`,
|
|
451
|
+
tenant: `${emmettPrefix}:tenant:${globalTag}`
|
|
477
452
|
};
|
|
478
|
-
var
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
482
|
-
);
|
|
483
|
-
assertIsNotNull(result);
|
|
484
|
-
assertDocumentsEqual(result, document);
|
|
485
|
-
},
|
|
486
|
-
{ ...options, ...assertOptions }
|
|
487
|
-
);
|
|
488
|
-
var documentsAreTheSame = (documents, options) => (assertOptions) => withCollection(
|
|
489
|
-
async (collection) => {
|
|
490
|
-
const result = await collection.find(
|
|
491
|
-
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
492
|
-
);
|
|
493
|
-
assertEqual(
|
|
494
|
-
documents.length,
|
|
495
|
-
result.length,
|
|
496
|
-
"Different Documents Count than expected"
|
|
497
|
-
);
|
|
498
|
-
for (let i = 0; i < documents.length; i++) {
|
|
499
|
-
assertThatArray(result).contains(documents[i]);
|
|
500
|
-
}
|
|
501
|
-
},
|
|
502
|
-
{ ...options, ...assertOptions }
|
|
503
|
-
);
|
|
504
|
-
var documentsMatchingHaveCount = (expectedCount, options) => (assertOptions) => withCollection(
|
|
505
|
-
async (collection) => {
|
|
506
|
-
const result = await collection.find(
|
|
507
|
-
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
508
|
-
);
|
|
509
|
-
assertEqual(
|
|
510
|
-
expectedCount,
|
|
511
|
-
result.length,
|
|
512
|
-
"Different Documents Count than expected"
|
|
513
|
-
);
|
|
514
|
-
},
|
|
515
|
-
{ ...options, ...assertOptions }
|
|
516
|
-
);
|
|
517
|
-
var documentMatchingExists = (options) => (assertOptions) => withCollection(
|
|
518
|
-
async (collection) => {
|
|
519
|
-
const result = await collection.find(
|
|
520
|
-
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
521
|
-
);
|
|
522
|
-
assertThatArray(result).isNotEmpty();
|
|
523
|
-
},
|
|
524
|
-
{ ...options, ...assertOptions }
|
|
525
|
-
);
|
|
526
|
-
var documentDoesNotExist = (options) => (assertOptions) => withCollection(
|
|
527
|
-
async (collection) => {
|
|
528
|
-
const result = await collection.findOne(
|
|
529
|
-
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
530
|
-
);
|
|
531
|
-
assertIsNotNull(result);
|
|
453
|
+
var columns = {
|
|
454
|
+
partition: {
|
|
455
|
+
name: "partition"
|
|
532
456
|
},
|
|
533
|
-
{
|
|
534
|
-
|
|
535
|
-
var
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
toBeEqual: (document) => documentExists(document, {
|
|
541
|
-
withId: id,
|
|
542
|
-
inCollection: collectionName
|
|
543
|
-
}),
|
|
544
|
-
toExist: () => documentMatchingExists({
|
|
545
|
-
withId: id,
|
|
546
|
-
inCollection: collectionName
|
|
547
|
-
}),
|
|
548
|
-
notToExist: () => documentDoesNotExist({
|
|
549
|
-
withId: id,
|
|
550
|
-
inCollection: collectionName
|
|
551
|
-
})
|
|
552
|
-
};
|
|
553
|
-
},
|
|
554
|
-
matching: (filter2) => {
|
|
555
|
-
return {
|
|
556
|
-
toBeTheSame: (documents) => documentsAreTheSame(documents, {
|
|
557
|
-
matchingFilter: filter2,
|
|
558
|
-
inCollection: collectionName
|
|
559
|
-
}),
|
|
560
|
-
toHaveCount: (expectedCount) => documentsMatchingHaveCount(expectedCount, {
|
|
561
|
-
matchingFilter: filter2,
|
|
562
|
-
inCollection: collectionName
|
|
563
|
-
}),
|
|
564
|
-
toExist: () => documentMatchingExists({
|
|
565
|
-
matchingFilter: filter2,
|
|
566
|
-
inCollection: collectionName
|
|
567
|
-
}),
|
|
568
|
-
notToExist: () => documentDoesNotExist({
|
|
569
|
-
matchingFilter: filter2,
|
|
570
|
-
inCollection: collectionName
|
|
571
|
-
})
|
|
572
|
-
};
|
|
573
|
-
}
|
|
574
|
-
};
|
|
457
|
+
isArchived: { name: "is_archived" }
|
|
458
|
+
};
|
|
459
|
+
var streamsTable = {
|
|
460
|
+
name: `${emmettPrefix}_streams`,
|
|
461
|
+
columns: {
|
|
462
|
+
partition: columns.partition,
|
|
463
|
+
isArchived: columns.isArchived
|
|
575
464
|
}
|
|
576
465
|
};
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
var pongoProjection = ({
|
|
583
|
-
handle,
|
|
584
|
-
canHandle
|
|
585
|
-
}) => postgreSQLProjection({
|
|
586
|
-
canHandle,
|
|
587
|
-
handle: async (events, context) => {
|
|
588
|
-
const { connectionString, client } = context;
|
|
589
|
-
const pongo = _pongo.pongoClient.call(void 0, connectionString, {
|
|
590
|
-
connectionOptions: { client }
|
|
591
|
-
});
|
|
592
|
-
await handle(events, {
|
|
593
|
-
...context,
|
|
594
|
-
pongo
|
|
595
|
-
});
|
|
466
|
+
var eventsTable = {
|
|
467
|
+
name: `${emmettPrefix}_events`,
|
|
468
|
+
columns: {
|
|
469
|
+
partition: columns.partition,
|
|
470
|
+
isArchived: columns.isArchived
|
|
596
471
|
}
|
|
597
|
-
});
|
|
598
|
-
var pongoMultiStreamProjection = (options) => {
|
|
599
|
-
const { collectionName, getDocumentId, canHandle } = options;
|
|
600
|
-
return pongoProjection({
|
|
601
|
-
handle: async (events, { pongo }) => {
|
|
602
|
-
const collection = pongo.db().collection(collectionName);
|
|
603
|
-
for (const event of events) {
|
|
604
|
-
await collection.handle(getDocumentId(event), async (document) => {
|
|
605
|
-
return "initialState" in options ? await options.evolve(
|
|
606
|
-
_nullishCoalesce(document, () => ( options.initialState())),
|
|
607
|
-
event
|
|
608
|
-
) : await options.evolve(
|
|
609
|
-
document,
|
|
610
|
-
event
|
|
611
|
-
);
|
|
612
|
-
});
|
|
613
|
-
}
|
|
614
|
-
},
|
|
615
|
-
canHandle
|
|
616
|
-
});
|
|
617
472
|
};
|
|
618
|
-
var
|
|
619
|
-
|
|
620
|
-
...options,
|
|
621
|
-
getDocumentId: _nullishCoalesce(options.getDocumentId, () => ( ((event) => event.metadata.streamName)))
|
|
622
|
-
});
|
|
473
|
+
var subscriptionsTable = {
|
|
474
|
+
name: `${emmettPrefix}_subscriptions`
|
|
623
475
|
};
|
|
624
476
|
|
|
625
|
-
// src/eventStore/
|
|
626
|
-
|
|
627
|
-
|
|
477
|
+
// src/eventStore/schema/readLastMessageGlobalPosition.ts
|
|
478
|
+
var readLastMessageGlobalPosition = async (execute, options) => {
|
|
479
|
+
const result = await _dumbo.singleOrNull.call(void 0,
|
|
480
|
+
execute.query(
|
|
481
|
+
_dumbo.sql.call(void 0,
|
|
482
|
+
`SELECT global_position
|
|
483
|
+
FROM ${eventsTable.name}
|
|
484
|
+
WHERE partition = %L AND is_archived = FALSE AND transaction_id < pg_snapshot_xmin(pg_current_snapshot())
|
|
485
|
+
ORDER BY transaction_id, global_position
|
|
486
|
+
LIMIT 1`,
|
|
487
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _17 => _17.partition]), () => ( defaultTag))
|
|
488
|
+
)
|
|
489
|
+
)
|
|
490
|
+
);
|
|
491
|
+
return {
|
|
492
|
+
currentGlobalPosition: result !== null ? BigInt(result.global_position) : null
|
|
493
|
+
};
|
|
494
|
+
};
|
|
628
495
|
|
|
496
|
+
// src/eventStore/schema/readMessagesBatch.ts
|
|
629
497
|
|
|
630
|
-
var
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
projections: [projection2],
|
|
664
|
-
connection: {
|
|
665
|
-
connectionString,
|
|
666
|
-
transaction
|
|
667
|
-
}
|
|
668
|
-
})
|
|
669
|
-
);
|
|
670
|
-
};
|
|
671
|
-
return {
|
|
672
|
-
then: async (assert, message) => {
|
|
673
|
-
const pool = _dumbo.dumbo.call(void 0, dumoOptions);
|
|
674
|
-
try {
|
|
675
|
-
await run(pool);
|
|
676
|
-
const succeeded = await assert({ pool, connectionString });
|
|
677
|
-
if (succeeded !== void 0 && succeeded === false)
|
|
678
|
-
assertFails(
|
|
679
|
-
_nullishCoalesce(message, () => ( "Projection specification didn't match the criteria"))
|
|
680
|
-
);
|
|
681
|
-
} finally {
|
|
682
|
-
await pool.close();
|
|
683
|
-
}
|
|
684
|
-
},
|
|
685
|
-
thenThrows: async (...args) => {
|
|
686
|
-
const pool = _dumbo.dumbo.call(void 0, dumoOptions);
|
|
687
|
-
try {
|
|
688
|
-
await run(pool);
|
|
689
|
-
throw new AssertionError("Handler did not fail as expected");
|
|
690
|
-
} catch (error) {
|
|
691
|
-
if (error instanceof AssertionError) throw error;
|
|
692
|
-
if (args.length === 0) return;
|
|
693
|
-
if (!isErrorConstructor(args[0])) {
|
|
694
|
-
assertTrue(
|
|
695
|
-
args[0](error),
|
|
696
|
-
`Error didn't match the error condition: ${_optionalChain([error, 'optionalAccess', _18 => _18.toString, 'call', _19 => _19()])}`
|
|
697
|
-
);
|
|
698
|
-
return;
|
|
699
|
-
}
|
|
700
|
-
assertTrue(
|
|
701
|
-
error instanceof args[0],
|
|
702
|
-
`Caught error is not an instance of the expected type: ${_optionalChain([error, 'optionalAccess', _20 => _20.toString, 'call', _21 => _21()])}`
|
|
703
|
-
);
|
|
704
|
-
if (args[1]) {
|
|
705
|
-
assertTrue(
|
|
706
|
-
args[1](error),
|
|
707
|
-
`Error didn't match the error condition: ${_optionalChain([error, 'optionalAccess', _22 => _22.toString, 'call', _23 => _23()])}`
|
|
708
|
-
);
|
|
709
|
-
}
|
|
710
|
-
} finally {
|
|
711
|
-
await pool.close();
|
|
712
|
-
}
|
|
713
|
-
}
|
|
714
|
-
};
|
|
715
|
-
}
|
|
716
|
-
};
|
|
498
|
+
var readMessagesBatch = async (execute, options) => {
|
|
499
|
+
const from = "from" in options ? options.from : "after" in options ? options.after + 1n : 0n;
|
|
500
|
+
const batchSize = options && "batchSize" in options ? options.batchSize : options.to - options.from;
|
|
501
|
+
const fromCondition = from !== -0n ? `AND global_position >= ${from}` : "";
|
|
502
|
+
const toCondition = "to" in options ? `AND global_position <= ${options.to}` : "";
|
|
503
|
+
const limitCondition = "batchSize" in options ? `LIMIT ${options.batchSize}` : "";
|
|
504
|
+
const events = await _dumbo.mapRows.call(void 0,
|
|
505
|
+
execute.query(
|
|
506
|
+
_dumbo.sql.call(void 0,
|
|
507
|
+
`SELECT stream_id, stream_position, global_position, event_data, event_metadata, event_schema_version, event_type, event_id
|
|
508
|
+
FROM ${eventsTable.name}
|
|
509
|
+
WHERE partition = %L AND is_archived = FALSE AND transaction_id < pg_snapshot_xmin(pg_current_snapshot()) ${fromCondition} ${toCondition}
|
|
510
|
+
ORDER BY transaction_id, global_position
|
|
511
|
+
${limitCondition}`,
|
|
512
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _18 => _18.partition]), () => ( defaultTag))
|
|
513
|
+
)
|
|
514
|
+
),
|
|
515
|
+
(row) => {
|
|
516
|
+
const rawEvent = {
|
|
517
|
+
type: row.event_type,
|
|
518
|
+
data: row.event_data,
|
|
519
|
+
metadata: row.event_metadata
|
|
520
|
+
};
|
|
521
|
+
const metadata = {
|
|
522
|
+
..."metadata" in rawEvent ? _nullishCoalesce(rawEvent.metadata, () => ( {})) : {},
|
|
523
|
+
eventId: row.event_id,
|
|
524
|
+
streamName: row.stream_id,
|
|
525
|
+
streamPosition: BigInt(row.stream_position),
|
|
526
|
+
globalPosition: BigInt(row.global_position)
|
|
527
|
+
};
|
|
528
|
+
return {
|
|
529
|
+
...rawEvent,
|
|
530
|
+
metadata
|
|
717
531
|
};
|
|
718
532
|
}
|
|
719
|
-
|
|
533
|
+
);
|
|
534
|
+
return events.length > 0 ? {
|
|
535
|
+
currentGlobalPosition: events[events.length - 1].metadata.globalPosition,
|
|
536
|
+
messages: events,
|
|
537
|
+
areEventsLeft: events.length === batchSize
|
|
538
|
+
} : {
|
|
539
|
+
currentGlobalPosition: "from" in options ? options.from : "after" in options ? options.after : 0n,
|
|
540
|
+
messages: [],
|
|
541
|
+
areEventsLeft: false
|
|
542
|
+
};
|
|
720
543
|
};
|
|
721
|
-
|
|
544
|
+
|
|
545
|
+
// src/eventStore/consumers/messageBatchProcessing/index.ts
|
|
546
|
+
var DefaultPostgreSQLEventStoreProcessorBatchSize = 100;
|
|
547
|
+
var DefaultPostgreSQLEventStoreProcessorPullingFrequencyInMs = 50;
|
|
548
|
+
var postgreSQLEventStoreMessageBatchPuller = ({
|
|
549
|
+
executor,
|
|
550
|
+
batchSize,
|
|
551
|
+
eachBatch,
|
|
552
|
+
pullingFrequencyInMs
|
|
553
|
+
}) => {
|
|
554
|
+
let isRunning = false;
|
|
555
|
+
let start;
|
|
556
|
+
const pullMessages = async (options) => {
|
|
557
|
+
const after = options.startFrom === "BEGINNING" ? 0n : options.startFrom === "END" ? await _asyncNullishCoalesce((await readLastMessageGlobalPosition(executor)).currentGlobalPosition, async () => ( 0n)) : options.startFrom.globalPosition;
|
|
558
|
+
const readMessagesOptions = {
|
|
559
|
+
after,
|
|
560
|
+
batchSize
|
|
561
|
+
};
|
|
562
|
+
let waitTime = 100;
|
|
563
|
+
do {
|
|
564
|
+
const { messages, currentGlobalPosition, areEventsLeft } = await readMessagesBatch(executor, readMessagesOptions);
|
|
565
|
+
if (messages.length > 0) {
|
|
566
|
+
const result = await eachBatch({ messages });
|
|
567
|
+
if (result && result.type === "STOP") {
|
|
568
|
+
isRunning = false;
|
|
569
|
+
break;
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
readMessagesOptions.after = currentGlobalPosition;
|
|
573
|
+
await new Promise((resolve) => setTimeout(resolve, waitTime));
|
|
574
|
+
if (!areEventsLeft) {
|
|
575
|
+
waitTime = Math.min(waitTime * 2, 1e3);
|
|
576
|
+
} else {
|
|
577
|
+
waitTime = pullingFrequencyInMs;
|
|
578
|
+
}
|
|
579
|
+
} while (isRunning);
|
|
580
|
+
};
|
|
722
581
|
return {
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
582
|
+
get isRunning() {
|
|
583
|
+
return isRunning;
|
|
584
|
+
},
|
|
585
|
+
start: (options) => {
|
|
586
|
+
if (isRunning) return start;
|
|
587
|
+
start = (async () => {
|
|
588
|
+
isRunning = true;
|
|
589
|
+
return pullMessages(options);
|
|
590
|
+
})();
|
|
591
|
+
return start;
|
|
592
|
+
},
|
|
593
|
+
stop: async () => {
|
|
594
|
+
if (!isRunning) return;
|
|
595
|
+
isRunning = false;
|
|
596
|
+
await start;
|
|
727
597
|
}
|
|
728
598
|
};
|
|
729
599
|
};
|
|
730
|
-
var
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
const result = await execute.query(sql7);
|
|
736
|
-
assertThatArray(rows).containsExactlyInAnyOrder(result.rows);
|
|
737
|
-
};
|
|
738
|
-
var expectSQL = {
|
|
739
|
-
query: (sql7) => ({
|
|
740
|
-
resultRows: {
|
|
741
|
-
toBeTheSame: (rows) => assertSQLQueryResultMatches(sql7, rows)
|
|
742
|
-
}
|
|
743
|
-
})
|
|
600
|
+
var zipPostgreSQLEventStoreMessageBatchPullerStartFrom = (options) => {
|
|
601
|
+
if (options.length === 0 || options.some((o) => o === void 0 || o === "BEGINNING"))
|
|
602
|
+
return "BEGINNING";
|
|
603
|
+
if (options.every((o) => o === "END")) return "END";
|
|
604
|
+
return options.filter((o) => o !== void 0 && o !== "BEGINNING" && o !== "END").sort((a, b) => a > b ? 1 : -1)[0];
|
|
744
605
|
};
|
|
745
606
|
|
|
746
|
-
// src/eventStore/
|
|
747
|
-
var handleProjections = async (options) => {
|
|
748
|
-
const {
|
|
749
|
-
projections: allProjections,
|
|
750
|
-
events,
|
|
751
|
-
connection: { transaction, connectionString }
|
|
752
|
-
} = options;
|
|
753
|
-
const eventTypes = events.map((e) => e.type);
|
|
754
|
-
const projections = allProjections.filter(
|
|
755
|
-
(p) => p.canHandle.some((type) => eventTypes.includes(type))
|
|
756
|
-
);
|
|
757
|
-
const client = await transaction.connection.open();
|
|
758
|
-
for (const projection2 of projections) {
|
|
759
|
-
await projection2.handle(events, {
|
|
760
|
-
connectionString,
|
|
761
|
-
client,
|
|
762
|
-
transaction,
|
|
763
|
-
execute: transaction.execute
|
|
764
|
-
});
|
|
765
|
-
}
|
|
766
|
-
};
|
|
767
|
-
var postgreSQLProjection = (definition) => projection(definition);
|
|
768
|
-
var postgreSQLRawBatchSQLProjection = (handle, ...canHandle) => postgreSQLProjection({
|
|
769
|
-
canHandle,
|
|
770
|
-
handle: async (events, context) => {
|
|
771
|
-
const sqls = await handle(events, context);
|
|
772
|
-
await context.execute.batchCommand(sqls);
|
|
773
|
-
}
|
|
774
|
-
});
|
|
775
|
-
var postgreSQLRawSQLProjection = (handle, ...canHandle) => postgreSQLRawBatchSQLProjection(
|
|
776
|
-
async (events, context) => {
|
|
777
|
-
const sqls = [];
|
|
778
|
-
for (const event of events) {
|
|
779
|
-
sqls.push(await handle(event, context));
|
|
780
|
-
}
|
|
781
|
-
return sqls;
|
|
782
|
-
},
|
|
783
|
-
...canHandle
|
|
784
|
-
);
|
|
607
|
+
// src/eventStore/consumers/postgreSQLEventStoreConsumer.ts
|
|
785
608
|
|
|
786
|
-
// src/eventStore/schema/index.ts
|
|
787
609
|
|
|
610
|
+
// src/eventStore/consumers/postgreSQLProcessor.ts
|
|
788
611
|
|
|
789
|
-
// src/eventStore/schema/appendToStream.ts
|
|
790
612
|
|
|
613
|
+
// src/eventStore/schema/index.ts
|
|
791
614
|
|
|
792
615
|
|
|
616
|
+
// src/eventStore/schema/appendToStream.ts
|
|
617
|
+
|
|
793
618
|
|
|
794
619
|
|
|
795
620
|
|
|
796
621
|
|
|
797
|
-
// src/eventStore/schema/typing.ts
|
|
798
|
-
var emmettPrefix = "emt";
|
|
799
|
-
var globalTag = "global";
|
|
800
|
-
var defaultTag = "emt:default";
|
|
801
|
-
var globalNames = {
|
|
802
|
-
module: `${emmettPrefix}:module:${globalTag}`,
|
|
803
|
-
tenant: `${emmettPrefix}:tenant:${globalTag}`
|
|
804
|
-
};
|
|
805
|
-
var columns = {
|
|
806
|
-
partition: {
|
|
807
|
-
name: "partition"
|
|
808
|
-
},
|
|
809
|
-
isArchived: { name: "is_archived" }
|
|
810
|
-
};
|
|
811
|
-
var streamsTable = {
|
|
812
|
-
name: `${emmettPrefix}_streams`,
|
|
813
|
-
columns: {
|
|
814
|
-
partition: columns.partition,
|
|
815
|
-
isArchived: columns.isArchived
|
|
816
|
-
}
|
|
817
|
-
};
|
|
818
|
-
var eventsTable = {
|
|
819
|
-
name: `${emmettPrefix}_events`,
|
|
820
|
-
columns: {
|
|
821
|
-
partition: columns.partition,
|
|
822
|
-
isArchived: columns.isArchived
|
|
823
|
-
}
|
|
824
|
-
};
|
|
825
|
-
var subscriptionsTable = {
|
|
826
|
-
name: `${emmettPrefix}_subscriptions`
|
|
827
|
-
};
|
|
828
622
|
|
|
829
|
-
// src/eventStore/schema/appendToStream.ts
|
|
830
623
|
var appendEventsSQL = _dumbo.rawSql.call(void 0,
|
|
831
624
|
`CREATE OR REPLACE FUNCTION emt_append_event(
|
|
832
625
|
v_event_ids text[],
|
|
@@ -919,7 +712,7 @@ var appendToStream = (pool, streamName, streamType, events, options) => pool.wit
|
|
|
919
712
|
let appendResult;
|
|
920
713
|
try {
|
|
921
714
|
const expectedStreamVersion = toExpectedVersion(
|
|
922
|
-
_optionalChain([options, 'optionalAccess',
|
|
715
|
+
_optionalChain([options, 'optionalAccess', _19 => _19.expectedStreamVersion])
|
|
923
716
|
);
|
|
924
717
|
const eventsToAppend = events.map((e, i) => ({
|
|
925
718
|
...e,
|
|
@@ -939,7 +732,7 @@ var appendToStream = (pool, streamName, streamType, events, options) => pool.wit
|
|
|
939
732
|
expectedStreamVersion
|
|
940
733
|
}
|
|
941
734
|
);
|
|
942
|
-
if (_optionalChain([options, 'optionalAccess',
|
|
735
|
+
if (_optionalChain([options, 'optionalAccess', _20 => _20.preCommitHook]))
|
|
943
736
|
await options.preCommitHook(eventsToAppend, { transaction });
|
|
944
737
|
} catch (error) {
|
|
945
738
|
if (!isOptimisticConcurrencyError(error)) throw error;
|
|
@@ -995,13 +788,13 @@ var appendEventsRaw = (execute, streamId, streamType, events, options) => _dumbo
|
|
|
995
788
|
events.map((e) => _dumbo.sql.call(void 0, "%L", e.type)).join(","),
|
|
996
789
|
streamId,
|
|
997
790
|
streamType,
|
|
998
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess',
|
|
999
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess',
|
|
791
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _21 => _21.expectedStreamVersion]), () => ( "NULL")),
|
|
792
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _22 => _22.partition]), () => ( defaultTag))
|
|
1000
793
|
)
|
|
1001
794
|
)
|
|
1002
795
|
);
|
|
1003
796
|
|
|
1004
|
-
// src/eventStore/schema/
|
|
797
|
+
// src/eventStore/schema/storeProcessorCheckpoint.ts
|
|
1005
798
|
|
|
1006
799
|
var storeSubscriptionCheckpointSQL = _dumbo.sql.call(void 0, `
|
|
1007
800
|
CREATE OR REPLACE FUNCTION store_subscription_checkpoint(
|
|
@@ -1063,13 +856,13 @@ BEGIN
|
|
|
1063
856
|
END;
|
|
1064
857
|
$$ LANGUAGE plpgsql;
|
|
1065
858
|
`);
|
|
1066
|
-
async function
|
|
859
|
+
async function storeProcessorCheckpoint(execute, options) {
|
|
1067
860
|
try {
|
|
1068
861
|
const { result } = await _dumbo.single.call(void 0,
|
|
1069
862
|
execute.command(
|
|
1070
863
|
_dumbo.sql.call(void 0,
|
|
1071
864
|
`SELECT store_subscription_checkpoint(%L, %s, %L, %L, pg_current_xact_id(), %L) as result;`,
|
|
1072
|
-
options.
|
|
865
|
+
options.processorId,
|
|
1073
866
|
_nullishCoalesce(options.version, () => ( 1)),
|
|
1074
867
|
options.newPosition,
|
|
1075
868
|
options.lastProcessedPosition,
|
|
@@ -1382,91 +1175,22 @@ var addDefaultPartition = _dumbo.rawSql.call(void 0,
|
|
|
1382
1175
|
`SELECT emt_add_partition('${defaultTag}');`
|
|
1383
1176
|
);
|
|
1384
1177
|
|
|
1385
|
-
// src/eventStore/schema/
|
|
1178
|
+
// src/eventStore/schema/readStream.ts
|
|
1386
1179
|
|
|
1387
|
-
var
|
|
1388
|
-
const
|
|
1180
|
+
var readStream = async (execute, streamId, options) => {
|
|
1181
|
+
const fromCondition = options && "from" in options ? `AND stream_position >= ${options.from}` : "";
|
|
1182
|
+
const to = Number(
|
|
1183
|
+
options && "to" in options ? options.to : options && "maxCount" in options && options.maxCount ? options.from + options.maxCount : NaN
|
|
1184
|
+
);
|
|
1185
|
+
const toCondition = !isNaN(to) ? `AND stream_position <= ${to}` : "";
|
|
1186
|
+
const events = await _dumbo.mapRows.call(void 0,
|
|
1389
1187
|
execute.query(
|
|
1390
1188
|
_dumbo.sql.call(void 0,
|
|
1391
|
-
`SELECT global_position
|
|
1189
|
+
`SELECT stream_id, stream_position, global_position, event_data, event_metadata, event_schema_version, event_type, event_id
|
|
1392
1190
|
FROM ${eventsTable.name}
|
|
1393
|
-
WHERE partition = %L AND is_archived = FALSE
|
|
1394
|
-
|
|
1395
|
-
|
|
1396
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _30 => _30.partition]), () => ( defaultTag))
|
|
1397
|
-
)
|
|
1398
|
-
)
|
|
1399
|
-
);
|
|
1400
|
-
return {
|
|
1401
|
-
currentGlobalPosition: result !== null ? BigInt(result.global_position) : null
|
|
1402
|
-
};
|
|
1403
|
-
};
|
|
1404
|
-
|
|
1405
|
-
// src/eventStore/schema/readMessagesBatch.ts
|
|
1406
|
-
|
|
1407
|
-
var readMessagesBatch = async (execute, options) => {
|
|
1408
|
-
const from = "from" in options ? options.from : "after" in options ? options.after + 1n : 0n;
|
|
1409
|
-
const batchSize = options && "batchSize" in options ? options.batchSize : options.to - options.from;
|
|
1410
|
-
const fromCondition = from !== -0n ? `AND global_position >= ${from}` : "";
|
|
1411
|
-
const toCondition = "to" in options ? `AND global_position <= ${options.to}` : "";
|
|
1412
|
-
const limitCondition = "batchSize" in options ? `LIMIT ${options.batchSize}` : "";
|
|
1413
|
-
const events = await _dumbo.mapRows.call(void 0,
|
|
1414
|
-
execute.query(
|
|
1415
|
-
_dumbo.sql.call(void 0,
|
|
1416
|
-
`SELECT stream_id, stream_position, global_position, event_data, event_metadata, event_schema_version, event_type, event_id
|
|
1417
|
-
FROM ${eventsTable.name}
|
|
1418
|
-
WHERE partition = %L AND is_archived = FALSE AND transaction_id < pg_snapshot_xmin(pg_current_snapshot()) ${fromCondition} ${toCondition}
|
|
1419
|
-
ORDER BY transaction_id, global_position
|
|
1420
|
-
${limitCondition}`,
|
|
1421
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _31 => _31.partition]), () => ( defaultTag))
|
|
1422
|
-
)
|
|
1423
|
-
),
|
|
1424
|
-
(row) => {
|
|
1425
|
-
const rawEvent = {
|
|
1426
|
-
type: row.event_type,
|
|
1427
|
-
data: row.event_data,
|
|
1428
|
-
metadata: row.event_metadata
|
|
1429
|
-
};
|
|
1430
|
-
const metadata = {
|
|
1431
|
-
..."metadata" in rawEvent ? _nullishCoalesce(rawEvent.metadata, () => ( {})) : {},
|
|
1432
|
-
eventId: row.event_id,
|
|
1433
|
-
streamName: row.stream_id,
|
|
1434
|
-
streamPosition: BigInt(row.stream_position),
|
|
1435
|
-
globalPosition: BigInt(row.global_position)
|
|
1436
|
-
};
|
|
1437
|
-
return {
|
|
1438
|
-
...rawEvent,
|
|
1439
|
-
metadata
|
|
1440
|
-
};
|
|
1441
|
-
}
|
|
1442
|
-
);
|
|
1443
|
-
return events.length > 0 ? {
|
|
1444
|
-
currentGlobalPosition: events[events.length - 1].metadata.globalPosition,
|
|
1445
|
-
messages: events,
|
|
1446
|
-
areEventsLeft: events.length === batchSize
|
|
1447
|
-
} : {
|
|
1448
|
-
currentGlobalPosition: "from" in options ? options.from : "after" in options ? options.after : 0n,
|
|
1449
|
-
messages: [],
|
|
1450
|
-
areEventsLeft: false
|
|
1451
|
-
};
|
|
1452
|
-
};
|
|
1453
|
-
|
|
1454
|
-
// src/eventStore/schema/readStream.ts
|
|
1455
|
-
|
|
1456
|
-
var readStream = async (execute, streamId, options) => {
|
|
1457
|
-
const fromCondition = options && "from" in options ? `AND stream_position >= ${options.from}` : "";
|
|
1458
|
-
const to = Number(
|
|
1459
|
-
options && "to" in options ? options.to : options && "maxCount" in options && options.maxCount ? options.from + options.maxCount : NaN
|
|
1460
|
-
);
|
|
1461
|
-
const toCondition = !isNaN(to) ? `AND stream_position <= ${to}` : "";
|
|
1462
|
-
const events = await _dumbo.mapRows.call(void 0,
|
|
1463
|
-
execute.query(
|
|
1464
|
-
_dumbo.sql.call(void 0,
|
|
1465
|
-
`SELECT stream_id, stream_position, global_position, event_data, event_metadata, event_schema_version, event_type, event_id
|
|
1466
|
-
FROM ${eventsTable.name}
|
|
1467
|
-
WHERE stream_id = %L AND partition = %L AND is_archived = FALSE ${fromCondition} ${toCondition}`,
|
|
1468
|
-
streamId,
|
|
1469
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _32 => _32.partition]), () => ( defaultTag))
|
|
1191
|
+
WHERE stream_id = %L AND partition = %L AND is_archived = FALSE ${fromCondition} ${toCondition}`,
|
|
1192
|
+
streamId,
|
|
1193
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _23 => _23.partition]), () => ( defaultTag))
|
|
1470
1194
|
)
|
|
1471
1195
|
),
|
|
1472
1196
|
(row) => {
|
|
@@ -1499,9 +1223,9 @@ var readStream = async (execute, streamId, options) => {
|
|
|
1499
1223
|
};
|
|
1500
1224
|
};
|
|
1501
1225
|
|
|
1502
|
-
// src/eventStore/schema/
|
|
1226
|
+
// src/eventStore/schema/readProcessorCheckpoint.ts
|
|
1503
1227
|
|
|
1504
|
-
var
|
|
1228
|
+
var readProcessorCheckpoint = async (execute, options) => {
|
|
1505
1229
|
const result = await _dumbo.singleOrNull.call(void 0,
|
|
1506
1230
|
execute.query(
|
|
1507
1231
|
_dumbo.sql.call(void 0,
|
|
@@ -1509,35 +1233,524 @@ var readSubscriptionCheckpoint = async (execute, options) => {
|
|
|
1509
1233
|
FROM ${subscriptionsTable.name}
|
|
1510
1234
|
WHERE partition = %L AND subscription_id = %L
|
|
1511
1235
|
LIMIT 1`,
|
|
1512
|
-
_nullishCoalesce(_optionalChain([options, 'optionalAccess',
|
|
1513
|
-
options.
|
|
1236
|
+
_nullishCoalesce(_optionalChain([options, 'optionalAccess', _24 => _24.partition]), () => ( defaultTag)),
|
|
1237
|
+
options.processorId
|
|
1514
1238
|
)
|
|
1515
1239
|
)
|
|
1516
1240
|
);
|
|
1517
1241
|
return {
|
|
1518
|
-
lastProcessedPosition: result !== null ? BigInt(result.last_processed_position) : null
|
|
1242
|
+
lastProcessedPosition: result !== null ? BigInt(result.last_processed_position) : null
|
|
1243
|
+
};
|
|
1244
|
+
};
|
|
1245
|
+
|
|
1246
|
+
// src/eventStore/schema/index.ts
|
|
1247
|
+
var schemaSQL = [
|
|
1248
|
+
streamsTableSQL,
|
|
1249
|
+
eventsTableSQL,
|
|
1250
|
+
subscriptionsTableSQL,
|
|
1251
|
+
sanitizeNameSQL,
|
|
1252
|
+
addTablePartitions,
|
|
1253
|
+
addEventsPartitions,
|
|
1254
|
+
addModuleSQL,
|
|
1255
|
+
addTenantSQL,
|
|
1256
|
+
addModuleForAllTenantsSQL,
|
|
1257
|
+
addTenantForAllModulesSQL,
|
|
1258
|
+
appendEventsSQL,
|
|
1259
|
+
addDefaultPartition,
|
|
1260
|
+
storeSubscriptionCheckpointSQL
|
|
1261
|
+
];
|
|
1262
|
+
var createEventStoreSchema = async (pool) => {
|
|
1263
|
+
await pool.withTransaction(({ execute }) => execute.batchCommand(schemaSQL));
|
|
1264
|
+
};
|
|
1265
|
+
|
|
1266
|
+
// src/eventStore/consumers/postgreSQLProcessor.ts
|
|
1267
|
+
var PostgreSQLProcessor = {
|
|
1268
|
+
result: {
|
|
1269
|
+
skip: (options) => ({
|
|
1270
|
+
type: "SKIP",
|
|
1271
|
+
..._nullishCoalesce(options, () => ( {}))
|
|
1272
|
+
}),
|
|
1273
|
+
stop: (options) => ({
|
|
1274
|
+
type: "STOP",
|
|
1275
|
+
..._nullishCoalesce(options, () => ( {}))
|
|
1276
|
+
})
|
|
1277
|
+
}
|
|
1278
|
+
};
|
|
1279
|
+
var postgreSQLProcessor = (options) => {
|
|
1280
|
+
const { eachMessage } = options;
|
|
1281
|
+
let isActive = true;
|
|
1282
|
+
return {
|
|
1283
|
+
id: options.processorId,
|
|
1284
|
+
start: async (execute) => {
|
|
1285
|
+
isActive = true;
|
|
1286
|
+
if (options.startFrom !== "CURRENT") return options.startFrom;
|
|
1287
|
+
const { lastProcessedPosition } = await readProcessorCheckpoint(execute, {
|
|
1288
|
+
processorId: options.processorId,
|
|
1289
|
+
partition: options.partition
|
|
1290
|
+
});
|
|
1291
|
+
if (lastProcessedPosition === null) return "BEGINNING";
|
|
1292
|
+
return { globalPosition: lastProcessedPosition };
|
|
1293
|
+
},
|
|
1294
|
+
get isActive() {
|
|
1295
|
+
return isActive;
|
|
1296
|
+
},
|
|
1297
|
+
handle: async ({ messages }, { pool }) => {
|
|
1298
|
+
if (!isActive) return;
|
|
1299
|
+
return pool.withTransaction(async (tx) => {
|
|
1300
|
+
let result = void 0;
|
|
1301
|
+
let lastProcessedPosition = null;
|
|
1302
|
+
for (const message of messages) {
|
|
1303
|
+
const typedMessage = message;
|
|
1304
|
+
const messageProcessingResult = await eachMessage(typedMessage);
|
|
1305
|
+
await storeProcessorCheckpoint(tx.execute, {
|
|
1306
|
+
processorId: options.processorId,
|
|
1307
|
+
version: options.version,
|
|
1308
|
+
lastProcessedPosition,
|
|
1309
|
+
newPosition: typedMessage.metadata.globalPosition,
|
|
1310
|
+
partition: options.partition
|
|
1311
|
+
});
|
|
1312
|
+
lastProcessedPosition = typedMessage.metadata.globalPosition;
|
|
1313
|
+
if (messageProcessingResult && messageProcessingResult.type === "STOP") {
|
|
1314
|
+
isActive = false;
|
|
1315
|
+
result = messageProcessingResult;
|
|
1316
|
+
break;
|
|
1317
|
+
}
|
|
1318
|
+
if (options.stopAfter && options.stopAfter(typedMessage)) {
|
|
1319
|
+
isActive = false;
|
|
1320
|
+
result = { type: "STOP", reason: "Stop condition reached" };
|
|
1321
|
+
break;
|
|
1322
|
+
}
|
|
1323
|
+
if (messageProcessingResult && messageProcessingResult.type === "SKIP")
|
|
1324
|
+
continue;
|
|
1325
|
+
}
|
|
1326
|
+
return result;
|
|
1327
|
+
});
|
|
1328
|
+
}
|
|
1329
|
+
};
|
|
1330
|
+
};
|
|
1331
|
+
|
|
1332
|
+
// src/eventStore/consumers/postgreSQLEventStoreConsumer.ts
|
|
1333
|
+
var postgreSQLEventStoreConsumer = (options) => {
|
|
1334
|
+
let isRunning = false;
|
|
1335
|
+
const { pulling } = options;
|
|
1336
|
+
const processors = _nullishCoalesce(options.processors, () => ( []));
|
|
1337
|
+
let start;
|
|
1338
|
+
let currentMessagePuller;
|
|
1339
|
+
const pool = "pool" in options ? options.pool : _dumbo.dumbo.call(void 0, { connectionString: options.connectionString });
|
|
1340
|
+
const eachBatch = async (messagesBatch) => {
|
|
1341
|
+
const activeProcessors = processors.filter((s) => s.isActive);
|
|
1342
|
+
if (activeProcessors.length === 0)
|
|
1343
|
+
return {
|
|
1344
|
+
type: "STOP",
|
|
1345
|
+
reason: "No active processors"
|
|
1346
|
+
};
|
|
1347
|
+
const result = await Promise.allSettled(
|
|
1348
|
+
activeProcessors.map((s) => {
|
|
1349
|
+
return s.handle(messagesBatch, { pool });
|
|
1350
|
+
})
|
|
1351
|
+
);
|
|
1352
|
+
return result.some(
|
|
1353
|
+
(r) => r.status === "fulfilled" && _optionalChain([r, 'access', _25 => _25.value, 'optionalAccess', _26 => _26.type]) !== "STOP"
|
|
1354
|
+
) ? void 0 : {
|
|
1355
|
+
type: "STOP"
|
|
1356
|
+
};
|
|
1357
|
+
};
|
|
1358
|
+
const messagePooler = currentMessagePuller = postgreSQLEventStoreMessageBatchPuller({
|
|
1359
|
+
executor: pool.execute,
|
|
1360
|
+
eachBatch,
|
|
1361
|
+
batchSize: _nullishCoalesce(_optionalChain([pulling, 'optionalAccess', _27 => _27.batchSize]), () => ( DefaultPostgreSQLEventStoreProcessorBatchSize)),
|
|
1362
|
+
pullingFrequencyInMs: _nullishCoalesce(_optionalChain([pulling, 'optionalAccess', _28 => _28.pullingFrequencyInMs]), () => ( DefaultPostgreSQLEventStoreProcessorPullingFrequencyInMs))
|
|
1363
|
+
});
|
|
1364
|
+
const stop = async () => {
|
|
1365
|
+
if (!isRunning) return;
|
|
1366
|
+
isRunning = false;
|
|
1367
|
+
if (currentMessagePuller) {
|
|
1368
|
+
await currentMessagePuller.stop();
|
|
1369
|
+
currentMessagePuller = void 0;
|
|
1370
|
+
}
|
|
1371
|
+
await start;
|
|
1372
|
+
};
|
|
1373
|
+
return {
|
|
1374
|
+
processors,
|
|
1375
|
+
get isRunning() {
|
|
1376
|
+
return isRunning;
|
|
1377
|
+
},
|
|
1378
|
+
processor: (options2) => {
|
|
1379
|
+
const processor = postgreSQLProcessor(options2);
|
|
1380
|
+
processors.push(processor);
|
|
1381
|
+
return processor;
|
|
1382
|
+
},
|
|
1383
|
+
start: () => {
|
|
1384
|
+
if (isRunning) return start;
|
|
1385
|
+
start = (async () => {
|
|
1386
|
+
if (processors.length === 0)
|
|
1387
|
+
return Promise.reject(
|
|
1388
|
+
new EmmettError(
|
|
1389
|
+
"Cannot start consumer without at least a single processor"
|
|
1390
|
+
)
|
|
1391
|
+
);
|
|
1392
|
+
isRunning = true;
|
|
1393
|
+
const startFrom = zipPostgreSQLEventStoreMessageBatchPullerStartFrom(
|
|
1394
|
+
await Promise.all(processors.map((o) => o.start(pool.execute)))
|
|
1395
|
+
);
|
|
1396
|
+
return messagePooler.start({ startFrom });
|
|
1397
|
+
})();
|
|
1398
|
+
return start;
|
|
1399
|
+
},
|
|
1400
|
+
stop,
|
|
1401
|
+
close: async () => {
|
|
1402
|
+
await stop();
|
|
1403
|
+
await pool.close();
|
|
1404
|
+
}
|
|
1405
|
+
};
|
|
1406
|
+
};
|
|
1407
|
+
|
|
1408
|
+
// src/eventStore/projections/index.ts
|
|
1409
|
+
|
|
1410
|
+
|
|
1411
|
+
// src/eventStore/projections/pongo/pongoProjectionSpec.ts
|
|
1412
|
+
|
|
1413
|
+
|
|
1414
|
+
|
|
1415
|
+
var _pongo = require('@event-driven-io/pongo');
|
|
1416
|
+
var withCollection = (handle, options) => {
|
|
1417
|
+
const { pool, connectionString, inDatabase, inCollection } = options;
|
|
1418
|
+
return pool.withConnection(async (connection) => {
|
|
1419
|
+
const pongo = _pongo.pongoClient.call(void 0, connectionString, {
|
|
1420
|
+
connectionOptions: { connection }
|
|
1421
|
+
});
|
|
1422
|
+
try {
|
|
1423
|
+
const collection = pongo.db(inDatabase).collection(inCollection);
|
|
1424
|
+
return handle(collection);
|
|
1425
|
+
} finally {
|
|
1426
|
+
await pongo.close();
|
|
1427
|
+
}
|
|
1428
|
+
});
|
|
1429
|
+
};
|
|
1430
|
+
var withoutIdAndVersion = (doc) => {
|
|
1431
|
+
const { _id, _version, ...without } = doc;
|
|
1432
|
+
return without;
|
|
1433
|
+
};
|
|
1434
|
+
var assertDocumentsEqual = (actual, expected) => {
|
|
1435
|
+
if ("_id" in expected)
|
|
1436
|
+
assertEqual(
|
|
1437
|
+
expected._id,
|
|
1438
|
+
actual._id,
|
|
1439
|
+
// eslint-disable-next-line @typescript-eslint/restrict-template-expressions
|
|
1440
|
+
`Document ids are not matching! Expected: ${expected._id}, Actual: ${actual._id}`
|
|
1441
|
+
);
|
|
1442
|
+
return assertDeepEqual(
|
|
1443
|
+
withoutIdAndVersion(actual),
|
|
1444
|
+
withoutIdAndVersion(expected)
|
|
1445
|
+
);
|
|
1446
|
+
};
|
|
1447
|
+
var documentExists = (document, options) => (assertOptions) => withCollection(
|
|
1448
|
+
async (collection) => {
|
|
1449
|
+
const result = await collection.findOne(
|
|
1450
|
+
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
1451
|
+
);
|
|
1452
|
+
assertIsNotNull(result);
|
|
1453
|
+
assertDocumentsEqual(result, document);
|
|
1454
|
+
},
|
|
1455
|
+
{ ...options, ...assertOptions }
|
|
1456
|
+
);
|
|
1457
|
+
var documentsAreTheSame = (documents, options) => (assertOptions) => withCollection(
|
|
1458
|
+
async (collection) => {
|
|
1459
|
+
const result = await collection.find(
|
|
1460
|
+
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
1461
|
+
);
|
|
1462
|
+
assertEqual(
|
|
1463
|
+
documents.length,
|
|
1464
|
+
result.length,
|
|
1465
|
+
"Different Documents Count than expected"
|
|
1466
|
+
);
|
|
1467
|
+
for (let i = 0; i < documents.length; i++) {
|
|
1468
|
+
assertThatArray(result).contains(documents[i]);
|
|
1469
|
+
}
|
|
1470
|
+
},
|
|
1471
|
+
{ ...options, ...assertOptions }
|
|
1472
|
+
);
|
|
1473
|
+
var documentsMatchingHaveCount = (expectedCount, options) => (assertOptions) => withCollection(
|
|
1474
|
+
async (collection) => {
|
|
1475
|
+
const result = await collection.find(
|
|
1476
|
+
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
1477
|
+
);
|
|
1478
|
+
assertEqual(
|
|
1479
|
+
expectedCount,
|
|
1480
|
+
result.length,
|
|
1481
|
+
"Different Documents Count than expected"
|
|
1482
|
+
);
|
|
1483
|
+
},
|
|
1484
|
+
{ ...options, ...assertOptions }
|
|
1485
|
+
);
|
|
1486
|
+
var documentMatchingExists = (options) => (assertOptions) => withCollection(
|
|
1487
|
+
async (collection) => {
|
|
1488
|
+
const result = await collection.find(
|
|
1489
|
+
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
1490
|
+
);
|
|
1491
|
+
assertThatArray(result).isNotEmpty();
|
|
1492
|
+
},
|
|
1493
|
+
{ ...options, ...assertOptions }
|
|
1494
|
+
);
|
|
1495
|
+
var documentDoesNotExist = (options) => (assertOptions) => withCollection(
|
|
1496
|
+
async (collection) => {
|
|
1497
|
+
const result = await collection.findOne(
|
|
1498
|
+
"withId" in options ? { _id: options.withId } : options.matchingFilter
|
|
1499
|
+
);
|
|
1500
|
+
assertIsNotNull(result);
|
|
1501
|
+
},
|
|
1502
|
+
{ ...options, ...assertOptions }
|
|
1503
|
+
);
|
|
1504
|
+
var expectPongoDocuments = {
|
|
1505
|
+
fromCollection: (collectionName) => {
|
|
1506
|
+
return {
|
|
1507
|
+
withId: (id) => {
|
|
1508
|
+
return {
|
|
1509
|
+
toBeEqual: (document) => documentExists(document, {
|
|
1510
|
+
withId: id,
|
|
1511
|
+
inCollection: collectionName
|
|
1512
|
+
}),
|
|
1513
|
+
toExist: () => documentMatchingExists({
|
|
1514
|
+
withId: id,
|
|
1515
|
+
inCollection: collectionName
|
|
1516
|
+
}),
|
|
1517
|
+
notToExist: () => documentDoesNotExist({
|
|
1518
|
+
withId: id,
|
|
1519
|
+
inCollection: collectionName
|
|
1520
|
+
})
|
|
1521
|
+
};
|
|
1522
|
+
},
|
|
1523
|
+
matching: (filter2) => {
|
|
1524
|
+
return {
|
|
1525
|
+
toBeTheSame: (documents) => documentsAreTheSame(documents, {
|
|
1526
|
+
matchingFilter: filter2,
|
|
1527
|
+
inCollection: collectionName
|
|
1528
|
+
}),
|
|
1529
|
+
toHaveCount: (expectedCount) => documentsMatchingHaveCount(expectedCount, {
|
|
1530
|
+
matchingFilter: filter2,
|
|
1531
|
+
inCollection: collectionName
|
|
1532
|
+
}),
|
|
1533
|
+
toExist: () => documentMatchingExists({
|
|
1534
|
+
matchingFilter: filter2,
|
|
1535
|
+
inCollection: collectionName
|
|
1536
|
+
}),
|
|
1537
|
+
notToExist: () => documentDoesNotExist({
|
|
1538
|
+
matchingFilter: filter2,
|
|
1539
|
+
inCollection: collectionName
|
|
1540
|
+
})
|
|
1541
|
+
};
|
|
1542
|
+
}
|
|
1543
|
+
};
|
|
1544
|
+
}
|
|
1545
|
+
};
|
|
1546
|
+
|
|
1547
|
+
// src/eventStore/projections/pongo/projections.ts
|
|
1548
|
+
|
|
1549
|
+
|
|
1550
|
+
|
|
1551
|
+
var pongoProjection = ({
|
|
1552
|
+
handle,
|
|
1553
|
+
canHandle
|
|
1554
|
+
}) => postgreSQLProjection({
|
|
1555
|
+
canHandle,
|
|
1556
|
+
handle: async (events, context) => {
|
|
1557
|
+
const { connectionString, client } = context;
|
|
1558
|
+
const pongo = _pongo.pongoClient.call(void 0, connectionString, {
|
|
1559
|
+
connectionOptions: { client }
|
|
1560
|
+
});
|
|
1561
|
+
await handle(events, {
|
|
1562
|
+
...context,
|
|
1563
|
+
pongo
|
|
1564
|
+
});
|
|
1565
|
+
}
|
|
1566
|
+
});
|
|
1567
|
+
var pongoMultiStreamProjection = (options) => {
|
|
1568
|
+
const { collectionName, getDocumentId, canHandle } = options;
|
|
1569
|
+
return pongoProjection({
|
|
1570
|
+
handle: async (events, { pongo }) => {
|
|
1571
|
+
const collection = pongo.db().collection(collectionName);
|
|
1572
|
+
for (const event of events) {
|
|
1573
|
+
await collection.handle(getDocumentId(event), async (document) => {
|
|
1574
|
+
return "initialState" in options ? await options.evolve(
|
|
1575
|
+
_nullishCoalesce(document, () => ( options.initialState())),
|
|
1576
|
+
event
|
|
1577
|
+
) : await options.evolve(
|
|
1578
|
+
document,
|
|
1579
|
+
event
|
|
1580
|
+
);
|
|
1581
|
+
});
|
|
1582
|
+
}
|
|
1583
|
+
},
|
|
1584
|
+
canHandle
|
|
1585
|
+
});
|
|
1586
|
+
};
|
|
1587
|
+
var pongoSingleStreamProjection = (options) => {
|
|
1588
|
+
return pongoMultiStreamProjection({
|
|
1589
|
+
...options,
|
|
1590
|
+
getDocumentId: _nullishCoalesce(options.getDocumentId, () => ( ((event) => event.metadata.streamName)))
|
|
1591
|
+
});
|
|
1592
|
+
};
|
|
1593
|
+
|
|
1594
|
+
// src/eventStore/projections/postgresProjectionSpec.ts
|
|
1595
|
+
|
|
1596
|
+
|
|
1597
|
+
|
|
1598
|
+
|
|
1599
|
+
var PostgreSQLProjectionSpec = {
|
|
1600
|
+
for: (options) => {
|
|
1601
|
+
{
|
|
1602
|
+
const { projection: projection2, ...dumoOptions } = options;
|
|
1603
|
+
const { connectionString } = dumoOptions;
|
|
1604
|
+
return (givenEvents) => {
|
|
1605
|
+
return {
|
|
1606
|
+
when: (events, options2) => {
|
|
1607
|
+
const allEvents = [];
|
|
1608
|
+
const run = async (pool) => {
|
|
1609
|
+
let globalPosition = 0n;
|
|
1610
|
+
const numberOfTimes = _nullishCoalesce(_optionalChain([options2, 'optionalAccess', _29 => _29.numberOfTimes]), () => ( 1));
|
|
1611
|
+
for (const event of [
|
|
1612
|
+
...givenEvents,
|
|
1613
|
+
...Array.from({ length: numberOfTimes }).flatMap(() => events)
|
|
1614
|
+
]) {
|
|
1615
|
+
const metadata = {
|
|
1616
|
+
globalPosition: ++globalPosition,
|
|
1617
|
+
streamPosition: globalPosition,
|
|
1618
|
+
streamName: `test-${_uuid.v4.call(void 0, )}`,
|
|
1619
|
+
eventId: _uuid.v4.call(void 0, )
|
|
1620
|
+
};
|
|
1621
|
+
allEvents.push({
|
|
1622
|
+
...event,
|
|
1623
|
+
metadata: {
|
|
1624
|
+
...metadata,
|
|
1625
|
+
..."metadata" in event ? _nullishCoalesce(event.metadata, () => ( {})) : {}
|
|
1626
|
+
}
|
|
1627
|
+
});
|
|
1628
|
+
}
|
|
1629
|
+
await pool.withTransaction(
|
|
1630
|
+
(transaction) => handleProjections({
|
|
1631
|
+
events: allEvents,
|
|
1632
|
+
projections: [projection2],
|
|
1633
|
+
connection: {
|
|
1634
|
+
connectionString,
|
|
1635
|
+
transaction
|
|
1636
|
+
}
|
|
1637
|
+
})
|
|
1638
|
+
);
|
|
1639
|
+
};
|
|
1640
|
+
return {
|
|
1641
|
+
then: async (assert, message) => {
|
|
1642
|
+
const pool = _dumbo.dumbo.call(void 0, dumoOptions);
|
|
1643
|
+
try {
|
|
1644
|
+
await run(pool);
|
|
1645
|
+
const succeeded = await assert({ pool, connectionString });
|
|
1646
|
+
if (succeeded !== void 0 && succeeded === false)
|
|
1647
|
+
assertFails(
|
|
1648
|
+
_nullishCoalesce(message, () => ( "Projection specification didn't match the criteria"))
|
|
1649
|
+
);
|
|
1650
|
+
} finally {
|
|
1651
|
+
await pool.close();
|
|
1652
|
+
}
|
|
1653
|
+
},
|
|
1654
|
+
thenThrows: async (...args) => {
|
|
1655
|
+
const pool = _dumbo.dumbo.call(void 0, dumoOptions);
|
|
1656
|
+
try {
|
|
1657
|
+
await run(pool);
|
|
1658
|
+
throw new AssertionError("Handler did not fail as expected");
|
|
1659
|
+
} catch (error) {
|
|
1660
|
+
if (error instanceof AssertionError) throw error;
|
|
1661
|
+
if (args.length === 0) return;
|
|
1662
|
+
if (!isErrorConstructor(args[0])) {
|
|
1663
|
+
assertTrue(
|
|
1664
|
+
args[0](error),
|
|
1665
|
+
`Error didn't match the error condition: ${_optionalChain([error, 'optionalAccess', _30 => _30.toString, 'call', _31 => _31()])}`
|
|
1666
|
+
);
|
|
1667
|
+
return;
|
|
1668
|
+
}
|
|
1669
|
+
assertTrue(
|
|
1670
|
+
error instanceof args[0],
|
|
1671
|
+
`Caught error is not an instance of the expected type: ${_optionalChain([error, 'optionalAccess', _32 => _32.toString, 'call', _33 => _33()])}`
|
|
1672
|
+
);
|
|
1673
|
+
if (args[1]) {
|
|
1674
|
+
assertTrue(
|
|
1675
|
+
args[1](error),
|
|
1676
|
+
`Error didn't match the error condition: ${_optionalChain([error, 'optionalAccess', _34 => _34.toString, 'call', _35 => _35()])}`
|
|
1677
|
+
);
|
|
1678
|
+
}
|
|
1679
|
+
} finally {
|
|
1680
|
+
await pool.close();
|
|
1681
|
+
}
|
|
1682
|
+
}
|
|
1683
|
+
};
|
|
1684
|
+
}
|
|
1685
|
+
};
|
|
1686
|
+
};
|
|
1687
|
+
}
|
|
1688
|
+
}
|
|
1689
|
+
};
|
|
1690
|
+
var eventInStream = (streamName, event) => {
|
|
1691
|
+
return {
|
|
1692
|
+
...event,
|
|
1693
|
+
metadata: {
|
|
1694
|
+
..._nullishCoalesce(event.metadata, () => ( {})),
|
|
1695
|
+
streamName: _nullishCoalesce(_optionalChain([event, 'access', _36 => _36.metadata, 'optionalAccess', _37 => _37.streamName]), () => ( streamName))
|
|
1696
|
+
}
|
|
1519
1697
|
};
|
|
1520
1698
|
};
|
|
1699
|
+
var eventsInStream = (streamName, events) => {
|
|
1700
|
+
return events.map((e) => eventInStream(streamName, e));
|
|
1701
|
+
};
|
|
1702
|
+
var newEventsInStream = eventsInStream;
|
|
1703
|
+
var assertSQLQueryResultMatches = (sql7, rows) => async ({ pool: { execute } }) => {
|
|
1704
|
+
const result = await execute.query(sql7);
|
|
1705
|
+
assertThatArray(rows).containsExactlyInAnyOrder(result.rows);
|
|
1706
|
+
};
|
|
1707
|
+
var expectSQL = {
|
|
1708
|
+
query: (sql7) => ({
|
|
1709
|
+
resultRows: {
|
|
1710
|
+
toBeTheSame: (rows) => assertSQLQueryResultMatches(sql7, rows)
|
|
1711
|
+
}
|
|
1712
|
+
})
|
|
1713
|
+
};
|
|
1521
1714
|
|
|
1522
|
-
// src/eventStore/
|
|
1523
|
-
var
|
|
1524
|
-
|
|
1525
|
-
|
|
1526
|
-
|
|
1527
|
-
|
|
1528
|
-
|
|
1529
|
-
|
|
1530
|
-
|
|
1531
|
-
|
|
1532
|
-
|
|
1533
|
-
|
|
1534
|
-
|
|
1535
|
-
|
|
1536
|
-
|
|
1537
|
-
|
|
1538
|
-
|
|
1539
|
-
|
|
1715
|
+
// src/eventStore/projections/index.ts
|
|
1716
|
+
var handleProjections = async (options) => {
|
|
1717
|
+
const {
|
|
1718
|
+
projections: allProjections,
|
|
1719
|
+
events,
|
|
1720
|
+
connection: { transaction, connectionString }
|
|
1721
|
+
} = options;
|
|
1722
|
+
const eventTypes = events.map((e) => e.type);
|
|
1723
|
+
const projections = allProjections.filter(
|
|
1724
|
+
(p) => p.canHandle.some((type) => eventTypes.includes(type))
|
|
1725
|
+
);
|
|
1726
|
+
const client = await transaction.connection.open();
|
|
1727
|
+
for (const projection2 of projections) {
|
|
1728
|
+
await projection2.handle(events, {
|
|
1729
|
+
connectionString,
|
|
1730
|
+
client,
|
|
1731
|
+
transaction,
|
|
1732
|
+
execute: transaction.execute
|
|
1733
|
+
});
|
|
1734
|
+
}
|
|
1540
1735
|
};
|
|
1736
|
+
var postgreSQLProjection = (definition) => projection(definition);
|
|
1737
|
+
var postgreSQLRawBatchSQLProjection = (handle, ...canHandle) => postgreSQLProjection({
|
|
1738
|
+
canHandle,
|
|
1739
|
+
handle: async (events, context) => {
|
|
1740
|
+
const sqls = await handle(events, context);
|
|
1741
|
+
await context.execute.batchCommand(sqls);
|
|
1742
|
+
}
|
|
1743
|
+
});
|
|
1744
|
+
var postgreSQLRawSQLProjection = (handle, ...canHandle) => postgreSQLRawBatchSQLProjection(
|
|
1745
|
+
async (events, context) => {
|
|
1746
|
+
const sqls = [];
|
|
1747
|
+
for (const event of events) {
|
|
1748
|
+
sqls.push(await handle(event, context));
|
|
1749
|
+
}
|
|
1750
|
+
return sqls;
|
|
1751
|
+
},
|
|
1752
|
+
...canHandle
|
|
1753
|
+
);
|
|
1541
1754
|
|
|
1542
1755
|
// src/eventStore/postgreSQLEventStore.ts
|
|
1543
1756
|
var defaultPostgreSQLOptions = {
|
|
@@ -1552,7 +1765,7 @@ var getPostgreSQLEventStore = (connectionString, options = defaultPostgreSQLOpti
|
|
|
1552
1765
|
};
|
|
1553
1766
|
const pool = "dumbo" in poolOptions ? poolOptions.dumbo : _dumbo.dumbo.call(void 0, poolOptions);
|
|
1554
1767
|
let migrateSchema;
|
|
1555
|
-
const autoGenerateSchema = _optionalChain([options, 'access',
|
|
1768
|
+
const autoGenerateSchema = _optionalChain([options, 'access', _38 => _38.schema, 'optionalAccess', _39 => _39.autoMigration]) === void 0 || _optionalChain([options, 'access', _40 => _40.schema, 'optionalAccess', _41 => _41.autoMigration]) !== "None";
|
|
1556
1769
|
const ensureSchemaExists = () => {
|
|
1557
1770
|
if (!autoGenerateSchema) return Promise.resolve();
|
|
1558
1771
|
if (!migrateSchema) {
|
|
@@ -1581,7 +1794,7 @@ var getPostgreSQLEventStore = (connectionString, options = defaultPostgreSQLOpti
|
|
|
1581
1794
|
},
|
|
1582
1795
|
async aggregateStream(streamName, options2) {
|
|
1583
1796
|
const { evolve, initialState, read } = options2;
|
|
1584
|
-
const expectedStreamVersion = _optionalChain([read, 'optionalAccess',
|
|
1797
|
+
const expectedStreamVersion = _optionalChain([read, 'optionalAccess', _42 => _42.expectedStreamVersion]);
|
|
1585
1798
|
let state = initialState();
|
|
1586
1799
|
const result = await this.readStream(streamName, options2.read);
|
|
1587
1800
|
const currentStreamVersion = result.currentStreamVersion;
|
|
@@ -1622,7 +1835,7 @@ var getPostgreSQLEventStore = (connectionString, options = defaultPostgreSQLOpti
|
|
|
1622
1835
|
throw new ExpectedVersionConflictError(
|
|
1623
1836
|
-1n,
|
|
1624
1837
|
//TODO: Return actual version in case of error
|
|
1625
|
-
_nullishCoalesce(_optionalChain([options2, 'optionalAccess',
|
|
1838
|
+
_nullishCoalesce(_optionalChain([options2, 'optionalAccess', _43 => _43.expectedStreamVersion]), () => ( NO_CONCURRENCY_CHECK))
|
|
1626
1839
|
);
|
|
1627
1840
|
return {
|
|
1628
1841
|
nextExpectedStreamVersion: appendResult.nextStreamPosition,
|
|
@@ -1630,6 +1843,10 @@ var getPostgreSQLEventStore = (connectionString, options = defaultPostgreSQLOpti
|
|
|
1630
1843
|
createdNewStream: appendResult.nextStreamPosition >= BigInt(events.length)
|
|
1631
1844
|
};
|
|
1632
1845
|
},
|
|
1846
|
+
consumer: (options2) => postgreSQLEventStoreConsumer({
|
|
1847
|
+
..._nullishCoalesce(options2, () => ( {})),
|
|
1848
|
+
pool
|
|
1849
|
+
}),
|
|
1633
1850
|
close: () => pool.close(),
|
|
1634
1851
|
async withSession(callback) {
|
|
1635
1852
|
return await pool.withConnection(async (connection) => {
|
|
@@ -1652,219 +1869,6 @@ var getPostgreSQLEventStore = (connectionString, options = defaultPostgreSQLOpti
|
|
|
1652
1869
|
};
|
|
1653
1870
|
};
|
|
1654
1871
|
|
|
1655
|
-
// src/eventStore/subscriptions/messageBatchProcessing/index.ts
|
|
1656
|
-
|
|
1657
|
-
var DefaultPostgreSQLEventStoreSubscriptionBatchSize = 100;
|
|
1658
|
-
var DefaultPostgreSQLEventStoreSubscriptionPullingFrequencyInMs = 50;
|
|
1659
|
-
var postgreSQLEventStoreMessageBatchPuller = ({
|
|
1660
|
-
executor,
|
|
1661
|
-
batchSize,
|
|
1662
|
-
eachBatch,
|
|
1663
|
-
pullingFrequencyInMs
|
|
1664
|
-
}) => {
|
|
1665
|
-
let isRunning = false;
|
|
1666
|
-
let start;
|
|
1667
|
-
const pullMessages = async (options) => {
|
|
1668
|
-
const after = options.startFrom === "BEGINNING" ? 0n : options.startFrom === "END" ? await _asyncNullishCoalesce((await readLastMessageGlobalPosition(executor)).currentGlobalPosition, async () => ( 0n)) : options.startFrom.globalPosition;
|
|
1669
|
-
const readMessagesOptions = {
|
|
1670
|
-
after,
|
|
1671
|
-
batchSize
|
|
1672
|
-
};
|
|
1673
|
-
let waitTime = 100;
|
|
1674
|
-
do {
|
|
1675
|
-
const { messages, currentGlobalPosition, areEventsLeft } = await readMessagesBatch(executor, readMessagesOptions);
|
|
1676
|
-
if (messages.length > 0) {
|
|
1677
|
-
const result = await eachBatch({ messages });
|
|
1678
|
-
if (result && result.type === "STOP") {
|
|
1679
|
-
isRunning = false;
|
|
1680
|
-
break;
|
|
1681
|
-
}
|
|
1682
|
-
}
|
|
1683
|
-
readMessagesOptions.after = currentGlobalPosition;
|
|
1684
|
-
await new Promise((resolve) => setTimeout(resolve, waitTime));
|
|
1685
|
-
if (!areEventsLeft) {
|
|
1686
|
-
waitTime = Math.min(waitTime * 2, 1e3);
|
|
1687
|
-
} else {
|
|
1688
|
-
waitTime = pullingFrequencyInMs;
|
|
1689
|
-
}
|
|
1690
|
-
} while (isRunning);
|
|
1691
|
-
};
|
|
1692
|
-
return {
|
|
1693
|
-
get isRunning() {
|
|
1694
|
-
return isRunning;
|
|
1695
|
-
},
|
|
1696
|
-
start: (options) => {
|
|
1697
|
-
if (isRunning) return start;
|
|
1698
|
-
start = (async () => {
|
|
1699
|
-
isRunning = true;
|
|
1700
|
-
return pullMessages(options);
|
|
1701
|
-
})();
|
|
1702
|
-
return start;
|
|
1703
|
-
},
|
|
1704
|
-
stop: async () => {
|
|
1705
|
-
if (!isRunning) return;
|
|
1706
|
-
isRunning = false;
|
|
1707
|
-
await start;
|
|
1708
|
-
}
|
|
1709
|
-
};
|
|
1710
|
-
};
|
|
1711
|
-
var zipPostgreSQLEventStoreMessageBatchPullerStartFrom = (options) => {
|
|
1712
|
-
if (options.length === 0 || options.some((o) => o === void 0 || o === "BEGINNING"))
|
|
1713
|
-
return "BEGINNING";
|
|
1714
|
-
if (options.every((o) => o === "END")) return "END";
|
|
1715
|
-
return options.filter((o) => o !== void 0 && o !== "BEGINNING" && o !== "END").sort((a, b) => a > b ? 1 : -1)[0];
|
|
1716
|
-
};
|
|
1717
|
-
|
|
1718
|
-
// src/eventStore/subscriptions/postgreSQLEventStoreConsumer.ts
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
// src/eventStore/subscriptions/postgreSQLEventStoreSubscription.ts
|
|
1722
|
-
|
|
1723
|
-
var PostgreSQLEventStoreSubscription = {
|
|
1724
|
-
result: {
|
|
1725
|
-
skip: (options) => ({
|
|
1726
|
-
type: "SKIP",
|
|
1727
|
-
..._nullishCoalesce(options, () => ( {}))
|
|
1728
|
-
}),
|
|
1729
|
-
stop: (options) => ({
|
|
1730
|
-
type: "STOP",
|
|
1731
|
-
..._nullishCoalesce(options, () => ( {}))
|
|
1732
|
-
})
|
|
1733
|
-
}
|
|
1734
|
-
};
|
|
1735
|
-
var postgreSQLEventStoreSubscription = (options) => {
|
|
1736
|
-
const { eachMessage } = options;
|
|
1737
|
-
let isActive = true;
|
|
1738
|
-
return {
|
|
1739
|
-
id: options.subscriptionId,
|
|
1740
|
-
start: async (execute) => {
|
|
1741
|
-
isActive = true;
|
|
1742
|
-
if (options.startFrom !== "CURRENT") return options.startFrom;
|
|
1743
|
-
const { lastProcessedPosition } = await readSubscriptionCheckpoint(
|
|
1744
|
-
execute,
|
|
1745
|
-
{
|
|
1746
|
-
subscriptionId: options.subscriptionId,
|
|
1747
|
-
partition: options.partition
|
|
1748
|
-
}
|
|
1749
|
-
);
|
|
1750
|
-
if (lastProcessedPosition === null) return "BEGINNING";
|
|
1751
|
-
return { globalPosition: lastProcessedPosition };
|
|
1752
|
-
},
|
|
1753
|
-
get isActive() {
|
|
1754
|
-
return isActive;
|
|
1755
|
-
},
|
|
1756
|
-
handle: async ({ messages }, { pool }) => {
|
|
1757
|
-
if (!isActive) return;
|
|
1758
|
-
return pool.withTransaction(async (tx) => {
|
|
1759
|
-
let result = void 0;
|
|
1760
|
-
let lastProcessedPosition = null;
|
|
1761
|
-
for (const message of messages) {
|
|
1762
|
-
const typedMessage = message;
|
|
1763
|
-
const messageProcessingResult = await eachMessage(typedMessage);
|
|
1764
|
-
await storeSubscriptionCheckpoint(tx.execute, {
|
|
1765
|
-
subscriptionId: options.subscriptionId,
|
|
1766
|
-
version: options.version,
|
|
1767
|
-
lastProcessedPosition,
|
|
1768
|
-
newPosition: typedMessage.metadata.globalPosition,
|
|
1769
|
-
partition: options.partition
|
|
1770
|
-
});
|
|
1771
|
-
lastProcessedPosition = typedMessage.metadata.globalPosition;
|
|
1772
|
-
if (messageProcessingResult && messageProcessingResult.type === "STOP") {
|
|
1773
|
-
isActive = false;
|
|
1774
|
-
result = messageProcessingResult;
|
|
1775
|
-
break;
|
|
1776
|
-
}
|
|
1777
|
-
if (options.stopAfter && options.stopAfter(typedMessage)) {
|
|
1778
|
-
isActive = false;
|
|
1779
|
-
result = { type: "STOP", reason: "Stop condition reached" };
|
|
1780
|
-
break;
|
|
1781
|
-
}
|
|
1782
|
-
if (messageProcessingResult && messageProcessingResult.type === "SKIP")
|
|
1783
|
-
continue;
|
|
1784
|
-
}
|
|
1785
|
-
return result;
|
|
1786
|
-
});
|
|
1787
|
-
}
|
|
1788
|
-
};
|
|
1789
|
-
};
|
|
1790
|
-
|
|
1791
|
-
// src/eventStore/subscriptions/postgreSQLEventStoreConsumer.ts
|
|
1792
|
-
var postgreSQLEventStoreConsumer = (options) => {
|
|
1793
|
-
let isRunning = false;
|
|
1794
|
-
const { connectionString, pooling } = options;
|
|
1795
|
-
const subscriptions = _nullishCoalesce(options.subscriptions, () => ( []));
|
|
1796
|
-
let start;
|
|
1797
|
-
let currentMessagePooler;
|
|
1798
|
-
const pool = _dumbo.dumbo.call(void 0, { connectionString });
|
|
1799
|
-
const eachBatch = async (messagesBatch) => {
|
|
1800
|
-
const activeSubscriptions = subscriptions.filter((s) => s.isActive);
|
|
1801
|
-
if (activeSubscriptions.length === 0)
|
|
1802
|
-
return {
|
|
1803
|
-
type: "STOP",
|
|
1804
|
-
reason: "No active subscriptions"
|
|
1805
|
-
};
|
|
1806
|
-
const result = await Promise.allSettled(
|
|
1807
|
-
activeSubscriptions.map((s) => {
|
|
1808
|
-
return s.handle(messagesBatch, { pool });
|
|
1809
|
-
})
|
|
1810
|
-
);
|
|
1811
|
-
return result.some(
|
|
1812
|
-
(r) => r.status === "fulfilled" && _optionalChain([r, 'access', _40 => _40.value, 'optionalAccess', _41 => _41.type]) !== "STOP"
|
|
1813
|
-
) ? void 0 : {
|
|
1814
|
-
type: "STOP"
|
|
1815
|
-
};
|
|
1816
|
-
};
|
|
1817
|
-
const messagePooler = currentMessagePooler = postgreSQLEventStoreMessageBatchPuller({
|
|
1818
|
-
executor: pool.execute,
|
|
1819
|
-
eachBatch,
|
|
1820
|
-
batchSize: _nullishCoalesce(_optionalChain([pooling, 'optionalAccess', _42 => _42.batchSize]), () => ( DefaultPostgreSQLEventStoreSubscriptionBatchSize)),
|
|
1821
|
-
pullingFrequencyInMs: _nullishCoalesce(_optionalChain([pooling, 'optionalAccess', _43 => _43.pullingFrequencyInMs]), () => ( DefaultPostgreSQLEventStoreSubscriptionPullingFrequencyInMs))
|
|
1822
|
-
});
|
|
1823
|
-
const stop = async () => {
|
|
1824
|
-
if (!isRunning) return;
|
|
1825
|
-
isRunning = false;
|
|
1826
|
-
if (currentMessagePooler) {
|
|
1827
|
-
await currentMessagePooler.stop();
|
|
1828
|
-
currentMessagePooler = void 0;
|
|
1829
|
-
}
|
|
1830
|
-
await start;
|
|
1831
|
-
};
|
|
1832
|
-
return {
|
|
1833
|
-
connectionString,
|
|
1834
|
-
subscriptions,
|
|
1835
|
-
get isRunning() {
|
|
1836
|
-
return isRunning;
|
|
1837
|
-
},
|
|
1838
|
-
subscribe: (options2) => {
|
|
1839
|
-
const subscription = postgreSQLEventStoreSubscription(options2);
|
|
1840
|
-
subscriptions.push(subscription);
|
|
1841
|
-
return subscription;
|
|
1842
|
-
},
|
|
1843
|
-
start: () => {
|
|
1844
|
-
if (isRunning) return start;
|
|
1845
|
-
start = (async () => {
|
|
1846
|
-
if (subscriptions.length === 0)
|
|
1847
|
-
return Promise.reject(
|
|
1848
|
-
new EmmettError(
|
|
1849
|
-
"Cannot start consumer without at least a single subscription"
|
|
1850
|
-
)
|
|
1851
|
-
);
|
|
1852
|
-
isRunning = true;
|
|
1853
|
-
const startFrom = zipPostgreSQLEventStoreMessageBatchPullerStartFrom(
|
|
1854
|
-
await Promise.all(subscriptions.map((o) => o.start(pool.execute)))
|
|
1855
|
-
);
|
|
1856
|
-
return messagePooler.start({ startFrom });
|
|
1857
|
-
})();
|
|
1858
|
-
return start;
|
|
1859
|
-
},
|
|
1860
|
-
stop,
|
|
1861
|
-
close: async () => {
|
|
1862
|
-
await stop();
|
|
1863
|
-
await pool.close();
|
|
1864
|
-
}
|
|
1865
|
-
};
|
|
1866
|
-
};
|
|
1867
|
-
|
|
1868
1872
|
|
|
1869
1873
|
|
|
1870
1874
|
|
|
@@ -1922,5 +1926,5 @@ var postgreSQLEventStoreConsumer = (options) => {
|
|
|
1922
1926
|
|
|
1923
1927
|
|
|
1924
1928
|
|
|
1925
|
-
exports.
|
|
1929
|
+
exports.DefaultPostgreSQLEventStoreProcessorBatchSize = DefaultPostgreSQLEventStoreProcessorBatchSize; exports.DefaultPostgreSQLEventStoreProcessorPullingFrequencyInMs = DefaultPostgreSQLEventStoreProcessorPullingFrequencyInMs; exports.PostgreSQLEventStoreDefaultStreamVersion = PostgreSQLEventStoreDefaultStreamVersion; exports.PostgreSQLProcessor = PostgreSQLProcessor; exports.PostgreSQLProjectionSpec = PostgreSQLProjectionSpec; exports.addDefaultPartition = addDefaultPartition; exports.addEventsPartitions = addEventsPartitions; exports.addModuleForAllTenantsSQL = addModuleForAllTenantsSQL; exports.addModuleSQL = addModuleSQL; exports.addTablePartitions = addTablePartitions; exports.addTenantForAllModulesSQL = addTenantForAllModulesSQL; exports.addTenantSQL = addTenantSQL; exports.appendEventsSQL = appendEventsSQL; exports.appendToStream = appendToStream; exports.assertSQLQueryResultMatches = assertSQLQueryResultMatches; exports.createEventStoreSchema = createEventStoreSchema; exports.defaultPostgreSQLOptions = defaultPostgreSQLOptions; exports.defaultTag = defaultTag; exports.documentDoesNotExist = documentDoesNotExist; exports.documentExists = documentExists; exports.documentMatchingExists = documentMatchingExists; exports.documentsAreTheSame = documentsAreTheSame; exports.documentsMatchingHaveCount = documentsMatchingHaveCount; exports.emmettPrefix = emmettPrefix; exports.eventInStream = eventInStream; exports.eventsInStream = eventsInStream; exports.eventsTable = eventsTable; exports.eventsTableSQL = eventsTableSQL; exports.expectPongoDocuments = expectPongoDocuments; exports.expectSQL = expectSQL; exports.getPostgreSQLEventStore = getPostgreSQLEventStore; exports.globalNames = globalNames; exports.globalTag = globalTag; exports.handleProjections = handleProjections; exports.newEventsInStream = newEventsInStream; exports.pongoMultiStreamProjection = pongoMultiStreamProjection; exports.pongoProjection = pongoProjection; exports.pongoSingleStreamProjection = pongoSingleStreamProjection; exports.postgreSQLEventStoreConsumer = postgreSQLEventStoreConsumer; exports.postgreSQLEventStoreMessageBatchPuller = postgreSQLEventStoreMessageBatchPuller; exports.postgreSQLProcessor = postgreSQLProcessor; exports.postgreSQLProjection = postgreSQLProjection; exports.postgreSQLRawBatchSQLProjection = postgreSQLRawBatchSQLProjection; exports.postgreSQLRawSQLProjection = postgreSQLRawSQLProjection; exports.readLastMessageGlobalPosition = readLastMessageGlobalPosition; exports.readMessagesBatch = readMessagesBatch; exports.readProcessorCheckpoint = readProcessorCheckpoint; exports.readStream = readStream; exports.sanitizeNameSQL = sanitizeNameSQL; exports.schemaSQL = schemaSQL; exports.storeProcessorCheckpoint = storeProcessorCheckpoint; exports.storeSubscriptionCheckpointSQL = storeSubscriptionCheckpointSQL; exports.streamsTable = streamsTable; exports.streamsTableSQL = streamsTableSQL; exports.subscriptionsTable = subscriptionsTable; exports.subscriptionsTableSQL = subscriptionsTableSQL; exports.zipPostgreSQLEventStoreMessageBatchPullerStartFrom = zipPostgreSQLEventStoreMessageBatchPullerStartFrom;
|
|
1926
1930
|
//# sourceMappingURL=index.cjs.map
|