@event-driven-io/emmett-sqlite 0.35.0 → 0.37.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -203,22 +203,6 @@ var NotifyAboutNoActiveReadersStream = (_class = class extends _webstreamspolyfi
203
203
  }
204
204
  }
205
205
  }, _class);
206
- var asyncRetry = async (fn, opts) => {
207
- if (opts === void 0 || opts.retries === 0) return fn();
208
- return _asyncretry2.default.call(void 0,
209
- async (bail) => {
210
- try {
211
- return await fn();
212
- } catch (error2) {
213
- if (_optionalChain([opts, 'optionalAccess', _10 => _10.shouldRetryError]) && !opts.shouldRetryError(error2)) {
214
- bail(error2);
215
- }
216
- throw error2;
217
- }
218
- },
219
- _nullishCoalesce(opts, () => ( { retries: 0 }))
220
- );
221
- };
222
206
  var ParseError = class extends Error {
223
207
  constructor(text) {
224
208
  super(`Cannot parse! ${text}`);
@@ -227,19 +211,41 @@ var ParseError = class extends Error {
227
211
  var JSONParser = {
228
212
  stringify: (value, options) => {
229
213
  return JSON.stringify(
230
- _optionalChain([options, 'optionalAccess', _11 => _11.map]) ? options.map(value) : value,
214
+ _optionalChain([options, 'optionalAccess', _10 => _10.map]) ? options.map(value) : value,
231
215
  //TODO: Consider adding support to DateTime and adding specific format to mark that's a bigint
232
216
  // eslint-disable-next-line @typescript-eslint/no-unsafe-return
233
217
  (_, v) => typeof v === "bigint" ? v.toString() : v
234
218
  );
235
219
  },
236
220
  parse: (text, options) => {
237
- const parsed = JSON.parse(text, _optionalChain([options, 'optionalAccess', _12 => _12.reviver]));
238
- if (_optionalChain([options, 'optionalAccess', _13 => _13.typeCheck]) && !_optionalChain([options, 'optionalAccess', _14 => _14.typeCheck, 'call', _15 => _15(parsed)]))
221
+ const parsed = JSON.parse(text, _optionalChain([options, 'optionalAccess', _11 => _11.reviver]));
222
+ if (_optionalChain([options, 'optionalAccess', _12 => _12.typeCheck]) && !_optionalChain([options, 'optionalAccess', _13 => _13.typeCheck, 'call', _14 => _14(parsed)]))
239
223
  throw new ParseError(text);
240
- return _optionalChain([options, 'optionalAccess', _16 => _16.map]) ? options.map(parsed) : parsed;
224
+ return _optionalChain([options, 'optionalAccess', _15 => _15.map]) ? options.map(parsed) : parsed;
241
225
  }
242
226
  };
227
+ var asyncRetry = async (fn, opts) => {
228
+ if (opts === void 0 || opts.retries === 0) return fn();
229
+ return _asyncretry2.default.call(void 0,
230
+ async (bail) => {
231
+ try {
232
+ const result = await fn();
233
+ if (_optionalChain([opts, 'optionalAccess', _16 => _16.shouldRetryResult]) && opts.shouldRetryResult(result)) {
234
+ throw new EmmettError(
235
+ `Retrying because of result: ${JSONParser.stringify(result)}`
236
+ );
237
+ }
238
+ return result;
239
+ } catch (error2) {
240
+ if (_optionalChain([opts, 'optionalAccess', _17 => _17.shouldRetryError]) && !opts.shouldRetryError(error2)) {
241
+ bail(error2);
242
+ }
243
+ throw error2;
244
+ }
245
+ },
246
+ _nullishCoalesce(opts, () => ( { retries: 0 }))
247
+ );
248
+ };
243
249
  var filter = (filter2) => new (0, _webstreamspolyfill.TransformStream)({
244
250
  transform(chunk, controller) {
245
251
  if (filter2(chunk)) {
@@ -420,7 +426,7 @@ var subscriptionsTable = {
420
426
  var appendToStream = async (db, streamName, streamType, messages, options) => {
421
427
  if (messages.length === 0) return { success: false };
422
428
  const expectedStreamVersion = toExpectedVersion(
423
- _optionalChain([options, 'optionalAccess', _17 => _17.expectedStreamVersion])
429
+ _optionalChain([options, 'optionalAccess', _18 => _18.expectedStreamVersion])
424
430
  );
425
431
  const messagesToAppend = messages.map(
426
432
  (m, i) => ({
@@ -445,7 +451,7 @@ var appendToStream = async (db, streamName, streamType, messages, options) => {
445
451
  expectedStreamVersion
446
452
  }
447
453
  );
448
- if (_optionalChain([options, 'optionalAccess', _18 => _18.onBeforeCommit]))
454
+ if (_optionalChain([options, 'optionalAccess', _19 => _19.onBeforeCommit]))
449
455
  await options.onBeforeCommit(messagesToAppend, { db });
450
456
  return result;
451
457
  });
@@ -461,7 +467,7 @@ var appendToStreamRaw = async (db, streamId, streamType, messages, options) => {
461
467
  let streamPosition;
462
468
  let globalPosition;
463
469
  try {
464
- let expectedStreamVersion = _nullishCoalesce(_optionalChain([options, 'optionalAccess', _19 => _19.expectedStreamVersion]), () => ( null));
470
+ let expectedStreamVersion = _nullishCoalesce(_optionalChain([options, 'optionalAccess', _20 => _20.expectedStreamVersion]), () => ( null));
465
471
  if (expectedStreamVersion == null) {
466
472
  expectedStreamVersion = await getLastStreamPosition(
467
473
  db,
@@ -487,7 +493,7 @@ var appendToStreamRaw = async (db, streamId, streamType, messages, options) => {
487
493
  [
488
494
  streamId,
489
495
  messages.length,
490
- _nullishCoalesce(_optionalChain([options, 'optionalAccess', _20 => _20.partition]), () => ( streamsTable.columns.partition)),
496
+ _nullishCoalesce(_optionalChain([options, 'optionalAccess', _21 => _21.partition]), () => ( streamsTable.columns.partition)),
491
497
  streamType
492
498
  ]
493
499
  );
@@ -503,7 +509,7 @@ var appendToStreamRaw = async (db, streamId, streamType, messages, options) => {
503
509
  [
504
510
  messages.length,
505
511
  streamId,
506
- _nullishCoalesce(_optionalChain([options, 'optionalAccess', _21 => _21.partition]), () => ( streamsTable.columns.partition))
512
+ _nullishCoalesce(_optionalChain([options, 'optionalAccess', _22 => _22.partition]), () => ( streamsTable.columns.partition))
507
513
  ]
508
514
  );
509
515
  }
@@ -523,10 +529,10 @@ var appendToStreamRaw = async (db, streamId, streamType, messages, options) => {
523
529
  messages,
524
530
  expectedStreamVersion,
525
531
  streamId,
526
- _nullishCoalesce(_optionalChain([options, 'optionalAccess', _22 => _22.partition, 'optionalAccess', _23 => _23.toString, 'call', _24 => _24()]), () => ( defaultTag))
532
+ _nullishCoalesce(_optionalChain([options, 'optionalAccess', _23 => _23.partition, 'optionalAccess', _24 => _24.toString, 'call', _25 => _25()]), () => ( defaultTag))
527
533
  );
528
534
  const returningIds = await db.query(sqlString, values);
529
- if (returningIds.length === 0 || !_optionalChain([returningIds, 'access', _25 => _25[returningIds.length - 1], 'optionalAccess', _26 => _26.global_position])) {
535
+ if (returningIds.length === 0 || !_optionalChain([returningIds, 'access', _26 => _26[returningIds.length - 1], 'optionalAccess', _27 => _27.global_position])) {
530
536
  throw new Error("Could not find global position");
531
537
  }
532
538
  globalPosition = BigInt(
@@ -547,14 +553,14 @@ var appendToStreamRaw = async (db, streamId, streamType, messages, options) => {
547
553
  };
548
554
  };
549
555
  var isOptimisticConcurrencyError = (error) => {
550
- return _optionalChain([error, 'optionalAccess', _27 => _27.errno]) !== void 0 && error.errno === 19;
556
+ return _optionalChain([error, 'optionalAccess', _28 => _28.errno]) !== void 0 && error.errno === 19;
551
557
  };
552
558
  async function getLastStreamPosition(db, streamId, expectedStreamVersion) {
553
559
  const result = await db.querySingle(
554
560
  `SELECT CAST(stream_position AS VARCHAR) AS stream_position FROM ${streamsTable.name} WHERE stream_id = ?`,
555
561
  [streamId]
556
562
  );
557
- if (_optionalChain([result, 'optionalAccess', _28 => _28.stream_position]) == null) {
563
+ if (_optionalChain([result, 'optionalAccess', _29 => _29.stream_position]) == null) {
558
564
  expectedStreamVersion = 0n;
559
565
  } else {
560
566
  expectedStreamVersion = BigInt(result.stream_position);
@@ -564,7 +570,7 @@ async function getLastStreamPosition(db, streamId, expectedStreamVersion) {
564
570
  var buildMessageInsertQuery = (messages, expectedStreamVersion, streamId, partition) => {
565
571
  const query = messages.reduce(
566
572
  (queryBuilder, message) => {
567
- if (_optionalChain([message, 'access', _29 => _29.metadata, 'optionalAccess', _30 => _30.streamPosition]) == null || typeof message.metadata.streamPosition !== "bigint") {
573
+ if (_optionalChain([message, 'access', _30 => _30.metadata, 'optionalAccess', _31 => _31.streamPosition]) == null || typeof message.metadata.streamPosition !== "bigint") {
568
574
  throw new Error("Stream position is required");
569
575
  }
570
576
  const streamPosition = BigInt(message.metadata.streamPosition) + BigInt(expectedStreamVersion);
@@ -576,7 +582,7 @@ var buildMessageInsertQuery = (messages, expectedStreamVersion, streamId, partit
576
582
  message.kind === "Event" ? "E" : "C",
577
583
  JSONParser.stringify(message.data),
578
584
  JSONParser.stringify(message.metadata),
579
- _nullishCoalesce(_optionalChain([expectedStreamVersion, 'optionalAccess', _31 => _31.toString, 'call', _32 => _32()]), () => ( 0)),
585
+ _nullishCoalesce(_optionalChain([expectedStreamVersion, 'optionalAccess', _32 => _32.toString, 'call', _33 => _33()]), () => ( 0)),
580
586
  message.type,
581
587
  message.metadata.messageId,
582
588
  false
@@ -680,7 +686,7 @@ var readLastMessageGlobalPosition = async (db, options) => {
680
686
  ORDER BY global_position
681
687
  LIMIT 1`
682
688
  ),
683
- [_nullishCoalesce(_optionalChain([options, 'optionalAccess', _33 => _33.partition]), () => ( defaultTag))]
689
+ [_nullishCoalesce(_optionalChain([options, 'optionalAccess', _34 => _34.partition]), () => ( defaultTag))]
684
690
  )
685
691
  );
686
692
  return {
@@ -703,7 +709,7 @@ var readMessagesBatch = async (db, options) => {
703
709
  ORDER BY global_position
704
710
  ${limitCondition}`
705
711
  ),
706
- [_nullishCoalesce(_optionalChain([options, 'optionalAccess', _34 => _34.partition]), () => ( defaultTag))]
712
+ [_nullishCoalesce(_optionalChain([options, 'optionalAccess', _35 => _35.partition]), () => ( defaultTag))]
707
713
  )).map((row) => {
708
714
  const rawEvent = {
709
715
  type: row.message_type,
@@ -744,7 +750,7 @@ var readProcessorCheckpoint = async (db, options) => {
744
750
  WHERE partition = ? AND subscription_id = ?
745
751
  LIMIT 1`
746
752
  ),
747
- [_nullishCoalesce(_optionalChain([options, 'optionalAccess', _35 => _35.partition]), () => ( defaultTag)), options.processorId]
753
+ [_nullishCoalesce(_optionalChain([options, 'optionalAccess', _36 => _36.partition]), () => ( defaultTag)), options.processorId]
748
754
  )
749
755
  );
750
756
  return {
@@ -819,12 +825,12 @@ var genericSQLiteProcessor = (options) => {
819
825
  const { eachMessage } = options;
820
826
  let isActive = true;
821
827
  const getDb = (context) => {
822
- const fileName = _nullishCoalesce(context.fileName, () => ( _optionalChain([options, 'access', _36 => _36.connectionOptions, 'optionalAccess', _37 => _37.fileName])));
828
+ const fileName = _nullishCoalesce(context.fileName, () => ( _optionalChain([options, 'access', _37 => _37.connectionOptions, 'optionalAccess', _38 => _38.fileName])));
823
829
  if (!fileName)
824
830
  throw new EmmettError(
825
831
  `SQLite processor '${options.processorId}' is missing file name. Ensure that you passed it through options`
826
832
  );
827
- const db = _nullishCoalesce(_nullishCoalesce(context.db, () => ( _optionalChain([options, 'access', _38 => _38.connectionOptions, 'optionalAccess', _39 => _39.db]))), () => ( sqliteConnection({ fileName })));
833
+ const db = _nullishCoalesce(_nullishCoalesce(context.db, () => ( _optionalChain([options, 'access', _39 => _39.connectionOptions, 'optionalAccess', _40 => _40.db]))), () => ( sqliteConnection({ fileName })));
828
834
  return { db, fileName };
829
835
  };
830
836
  return {
@@ -919,7 +925,7 @@ var sqliteEventStoreConsumer = (options) => {
919
925
  })
920
926
  );
921
927
  return result.some(
922
- (r) => r.status === "fulfilled" && _optionalChain([r, 'access', _40 => _40.value, 'optionalAccess', _41 => _41.type]) !== "STOP"
928
+ (r) => r.status === "fulfilled" && _optionalChain([r, 'access', _41 => _41.value, 'optionalAccess', _42 => _42.type]) !== "STOP"
923
929
  ) ? void 0 : {
924
930
  type: "STOP"
925
931
  };
@@ -927,8 +933,8 @@ var sqliteEventStoreConsumer = (options) => {
927
933
  const messagePooler = currentMessagePuller = sqliteEventStoreMessageBatchPuller({
928
934
  db,
929
935
  eachBatch,
930
- batchSize: _nullishCoalesce(_optionalChain([pulling, 'optionalAccess', _42 => _42.batchSize]), () => ( DefaultSQLiteEventStoreProcessorBatchSize)),
931
- pullingFrequencyInMs: _nullishCoalesce(_optionalChain([pulling, 'optionalAccess', _43 => _43.pullingFrequencyInMs]), () => ( DefaultSQLiteEventStoreProcessorPullingFrequencyInMs))
936
+ batchSize: _nullishCoalesce(_optionalChain([pulling, 'optionalAccess', _43 => _43.batchSize]), () => ( DefaultSQLiteEventStoreProcessorBatchSize)),
937
+ pullingFrequencyInMs: _nullishCoalesce(_optionalChain([pulling, 'optionalAccess', _44 => _44.pullingFrequencyInMs]), () => ( DefaultSQLiteEventStoreProcessorPullingFrequencyInMs))
932
938
  });
933
939
  const stop = async () => {
934
940
  if (!isRunning) return;
@@ -998,7 +1004,7 @@ var getSQLiteEventStore = (options) => {
998
1004
  const fileName = _nullishCoalesce(options.fileName, () => ( InMemorySQLiteDatabase));
999
1005
  const isInMemory = fileName === InMemorySQLiteDatabase || fileName === InMemorySharedCacheSQLiteDatabase;
1000
1006
  const inlineProjections = (_nullishCoalesce(options.projections, () => ( []))).filter(({ type }) => type === "inline").map(({ projection: projection2 }) => projection2);
1001
- const onBeforeCommitHook = _optionalChain([options, 'access', _44 => _44.hooks, 'optionalAccess', _45 => _45.onBeforeCommit]);
1007
+ const onBeforeCommitHook = _optionalChain([options, 'access', _45 => _45.hooks, 'optionalAccess', _46 => _46.onBeforeCommit]);
1002
1008
  const createConnection = () => {
1003
1009
  if (database != null) {
1004
1010
  return database;
@@ -1028,7 +1034,7 @@ var getSQLiteEventStore = (options) => {
1028
1034
  }
1029
1035
  };
1030
1036
  if (options) {
1031
- autoGenerateSchema = _optionalChain([options, 'access', _46 => _46.schema, 'optionalAccess', _47 => _47.autoMigration]) === void 0 || _optionalChain([options, 'access', _48 => _48.schema, 'optionalAccess', _49 => _49.autoMigration]) !== "None";
1037
+ autoGenerateSchema = _optionalChain([options, 'access', _47 => _47.schema, 'optionalAccess', _48 => _48.autoMigration]) === void 0 || _optionalChain([options, 'access', _49 => _49.schema, 'optionalAccess', _50 => _50.autoMigration]) !== "None";
1032
1038
  }
1033
1039
  const ensureSchemaExists = async (db) => {
1034
1040
  if (!autoGenerateSchema) return Promise.resolve();
@@ -1041,7 +1047,7 @@ var getSQLiteEventStore = (options) => {
1041
1047
  return {
1042
1048
  async aggregateStream(streamName, options2) {
1043
1049
  const { evolve, initialState, read } = options2;
1044
- const expectedStreamVersion = _optionalChain([read, 'optionalAccess', _50 => _50.expectedStreamVersion]);
1050
+ const expectedStreamVersion = _optionalChain([read, 'optionalAccess', _51 => _51.expectedStreamVersion]);
1045
1051
  let state = initialState();
1046
1052
  if (typeof streamName !== "string") {
1047
1053
  throw new Error("Stream name is not string");
@@ -1093,7 +1099,7 @@ var getSQLiteEventStore = (options) => {
1093
1099
  throw new ExpectedVersionConflictError(
1094
1100
  -1n,
1095
1101
  //TODO: Return actual version in case of error
1096
- _nullishCoalesce(_optionalChain([options2, 'optionalAccess', _51 => _51.expectedStreamVersion]), () => ( NO_CONCURRENCY_CHECK))
1102
+ _nullishCoalesce(_optionalChain([options2, 'optionalAccess', _52 => _52.expectedStreamVersion]), () => ( NO_CONCURRENCY_CHECK))
1097
1103
  );
1098
1104
  return {
1099
1105
  nextExpectedStreamVersion: appendResult.nextStreamPosition,
@@ -1120,7 +1126,7 @@ var readStream = async (db, streamId, options) => {
1120
1126
  `SELECT stream_id, stream_position, global_position, message_data, message_metadata, message_schema_version, message_type, message_id
1121
1127
  FROM ${messagesTable.name}
1122
1128
  WHERE stream_id = ? AND partition = ? AND is_archived = FALSE ${fromCondition} ${toCondition}`,
1123
- [streamId, _nullishCoalesce(_optionalChain([options, 'optionalAccess', _52 => _52.partition]), () => ( defaultTag))]
1129
+ [streamId, _nullishCoalesce(_optionalChain([options, 'optionalAccess', _53 => _53.partition]), () => ( defaultTag))]
1124
1130
  );
1125
1131
  const messages = results.map((row) => {
1126
1132
  const rawEvent = {
@@ -1177,9 +1183,9 @@ async function storeSubscriptionCheckpointSQLite(db, processorId, version, posit
1177
1183
  [processorId, partition]
1178
1184
  )
1179
1185
  );
1180
- if (_optionalChain([current_position, 'optionalAccess', _53 => _53.last_processed_position]) === position) {
1186
+ if (_optionalChain([current_position, 'optionalAccess', _54 => _54.last_processed_position]) === position) {
1181
1187
  return 0;
1182
- } else if (position !== null && current_position !== null && _optionalChain([current_position, 'optionalAccess', _54 => _54.last_processed_position]) > position) {
1188
+ } else if (position !== null && current_position !== null && _optionalChain([current_position, 'optionalAccess', _55 => _55.last_processed_position]) > position) {
1183
1189
  return 2;
1184
1190
  } else {
1185
1191
  return 2;
@@ -1206,7 +1212,7 @@ async function storeSubscriptionCheckpointSQLite(db, processorId, version, posit
1206
1212
  [processorId, partition]
1207
1213
  )
1208
1214
  );
1209
- if (_optionalChain([current, 'optionalAccess', _55 => _55.last_processed_position]) === position) {
1215
+ if (_optionalChain([current, 'optionalAccess', _56 => _56.last_processed_position]) === position) {
1210
1216
  return 0;
1211
1217
  } else {
1212
1218
  return 2;