@omegup/msync 0.1.18 → 0.1.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. package/index.d.ts +5 -1
  2. package/index.esm.js +226 -141
  3. package/index.js +225 -139
  4. package/package.json +1 -1
package/index.d.ts CHANGED
@@ -730,8 +730,12 @@ declare const $and: Combiner;
730
730
  declare const $nor: Combiner;
731
731
  declare const $or: Combiner;
732
732
 
733
+ declare const setF: (f: ({ input }: {
734
+ input: any;
735
+ }) => Promise<void>) => void;
736
+
733
737
  declare const enablePreAndPostImages: <T extends doc>(coll: Collection<T>) => Promise<Document>;
734
738
  declare const prepare: (testName?: string) => Promise<MongoClient$1>;
735
739
  declare const makeCol: <T extends ID>(docs: readonly OptionalUnlessRequiredId<T>[], database: Db, name?: string) => Promise<Collection<T>>;
736
740
 
737
- export { $accumulator, $and, $countDict, $entries, $eq, $exists, $expr, $getField, $group, $groupId, $groupMerge, $group_, $gt, $gtTs, $gte, $gteTs, $ifNull, $in, $insert, $insertPart, $insertX, $keys, $let, $lookup, $lt, $lte, $map, $map0, $map1, $match, $matchDelta, $merge, $merge2, $mergeId, $mergePart, $merge_, $ne, $nin, $nor, $or, $outerLookup, $pushDict, $rand, $reduce, $replaceWith, $set, $simpleInsert, $simpleMerge, $simpleMergePart, $sum, $type, $unwind, $unwindDelta, type Accumulators, type Arr, type AsLiteral, type Delta, type DeltaAccumulator, type DeltaAccumulators, type ExactKeys, Expr, type ExprHKT, type Exprs, type ExprsExact, type ExprsExactHKT, type ExprsPart, Field, type ID, type Loose, Machine, type Merge, type MergeArgs, type MergeInto, type MergeMapOArgs, type Model, type MongoTypeNames, type N, type NoRaw, type NullToOBJ, type O, type OPick, type OPickD, type Patch, type RONoRaw, type RORec, type RawStages, type Rec, type Replace, type SnapshotStreamExecutionResult, type StrKey, type Strict, type TS, Type, type WriteonlyCollection, add, and, anyElementTrue, array, ceil, comp, concat, concatArray, createIndex, ctx, current, dateAdd, dateDiff, dateLt, datePart, dayAndMonthPart, divide, type doc, enablePreAndPostImages, eq, eqTyped, except, exprMapVal, field, fieldF, fieldM, filter, filterDefined, first, firstSure, floor, from, func, getWhenMatched, getWhenMatchedForMerge, gt, gte, inArray, isArray, ite, type jsonPrim, last, log, lt, lte, makeCol, map1, mapVal, max, maxDate, mergeExact, mergeExact0, mergeExpr, mergeObjects, minDate, monthPart, multiply, ne, nil, noop, not, type notArr, notNull, now, or, pair, prepare, rand, range, regex, root, set, setField, single, size, slice, sortArray, staging, startOf, str, sub, subtract, to, toInt, val, weekPart, wrap, year };
741
+ export { $accumulator, $and, $countDict, $entries, $eq, $exists, $expr, $getField, $group, $groupId, $groupMerge, $group_, $gt, $gtTs, $gte, $gteTs, $ifNull, $in, $insert, $insertPart, $insertX, $keys, $let, $lookup, $lt, $lte, $map, $map0, $map1, $match, $matchDelta, $merge, $merge2, $mergeId, $mergePart, $merge_, $ne, $nin, $nor, $or, $outerLookup, $pushDict, $rand, $reduce, $replaceWith, $set, $simpleInsert, $simpleMerge, $simpleMergePart, $sum, $type, $unwind, $unwindDelta, type Accumulators, type Arr, type AsLiteral, type Delta, type DeltaAccumulator, type DeltaAccumulators, type ExactKeys, Expr, type ExprHKT, type Exprs, type ExprsExact, type ExprsExactHKT, type ExprsPart, Field, type ID, type Loose, Machine, type Merge, type MergeArgs, type MergeInto, type MergeMapOArgs, type Model, type MongoTypeNames, type N, type NoRaw, type NullToOBJ, type O, type OPick, type OPickD, type Patch, type RONoRaw, type RORec, type RawStages, type Rec, type Replace, type SnapshotStreamExecutionResult, type StrKey, type Strict, type TS, Type, type WriteonlyCollection, add, and, anyElementTrue, array, ceil, comp, concat, concatArray, createIndex, ctx, current, dateAdd, dateDiff, dateLt, datePart, dayAndMonthPart, divide, type doc, enablePreAndPostImages, eq, eqTyped, except, exprMapVal, field, fieldF, fieldM, filter, filterDefined, first, firstSure, floor, from, func, getWhenMatched, getWhenMatchedForMerge, gt, gte, inArray, isArray, ite, type jsonPrim, last, log, lt, lte, makeCol, map1, mapVal, max, maxDate, mergeExact, mergeExact0, mergeExpr, mergeObjects, minDate, monthPart, multiply, ne, nil, noop, not, type notArr, notNull, now, or, pair, prepare, rand, range, regex, root, set, setF, setField, single, size, slice, sortArray, staging, startOf, str, sub, subtract, to, toInt, val, weekPart, wrap, year };
package/index.esm.js CHANGED
@@ -1,7 +1,7 @@
1
1
  import crypto$1 from 'crypto';
2
2
  import { canonicalize } from 'json-canonicalize';
3
3
  import { SynchronousPromise } from 'synchronous-promise';
4
- import { UUID, MongoClient } from 'mongodb';
4
+ import { MongoClient, UUID } from 'mongodb';
5
5
  import { writeFile } from 'fs/promises';
6
6
 
7
7
  const asExprRaw = (raw) => ({ get: () => raw });
@@ -1336,9 +1336,12 @@ const replace = (s) => s.replace(/\{"\$timestamp":"(\d+)"\}/g, (_, d) => T(d));
1336
1336
  const json = (a) => replace(JSON.stringify(a));
1337
1337
  const log = (...args) => console.log(new Date(), ...args.map(a => (typeof a === 'function' ? a(replace) : a && typeof a === 'object' ? json(a) : a)));
1338
1338
 
1339
- const state = { steady: false };
1339
+ const state = { steady: false, f: (_) => Promise.resolve() };
1340
1340
  let timeout = null;
1341
- const aggregate = (streamName, input, snapshot = true, start = Date.now()) => input(({ coll, input }) => {
1341
+ const setF = (f) => {
1342
+ state.f = f;
1343
+ };
1344
+ const aggregate = (db, streamName, input, snapshot = true, start = Date.now()) => input(({ coll, input }) => {
1342
1345
  const req = {
1343
1346
  aggregate: coll.collectionName,
1344
1347
  pipeline: input,
@@ -1350,9 +1353,12 @@ const aggregate = (streamName, input, snapshot = true, start = Date.now()) => in
1350
1353
  timeout = null;
1351
1354
  }
1352
1355
  log('exec', streamName, req);
1353
- return coll.s.db.command(req).then(result => {
1356
+ const start2 = Date.now();
1357
+ return db.then(d => d.command(req)).then(result => {
1358
+ log('prepare', streamName, Date.now() - start);
1359
+ log('prepare2', streamName, start2 - start);
1354
1360
  const r = result;
1355
- log('execed', streamName, (replace) => replace(JSON.stringify(req).replaceAll('$$CLUSTER_TIME', JSON.stringify(r.cursor.atClusterTime))), result, 'took', Date.now() - start);
1361
+ log('execed', streamName, (replace) => replace(JSON.stringify(req).replaceAll('"$$CLUSTER_TIME"', JSON.stringify(r.cursor.atClusterTime))), result, 'took', Date.now() - start);
1356
1362
  if (!state.steady) {
1357
1363
  if (timeout !== null)
1358
1364
  throw new Error('timeout should be null');
@@ -1555,67 +1561,129 @@ const addTeardown = (it, tr) => {
1555
1561
  };
1556
1562
  };
1557
1563
 
1558
- const changeKeys = ['fullDocument', 'fullDocumentBeforeChange'];
1559
- const subQ = (a, f) => ({ raw: g => a.raw(g.with(f)) });
1560
- const makeWatchStream = (db, { collection, projection: p, hardMatch: m }, startAt, streamName) => {
1561
- const projection = p ? { ...mapExactToObject(p, v => v), deletedAt: 1 } : 1;
1562
- const pipeline = [];
1563
- if (m) {
1564
- const q = $or(...changeKeys.map((k) => subQ(m, root().of(k))));
1565
- if (q)
1566
- pipeline.push({
1567
- $match: {
1568
- $or: [
1569
- q.raw(root()),
1570
- Object.fromEntries(changeKeys.map(k => [k, null])),
1571
- ],
1572
- },
1573
- });
1564
+ const sleep = (ms) => new Promise(r => setTimeout(r, ms));
1565
+ const getCurrentTimestamp = async (db) => {
1566
+ const adminDb = db.admin();
1567
+ const serverStatus = await adminDb.command({ serverStatus: 1 });
1568
+ return serverStatus['operationTime'];
1569
+ };
1570
+ async function getLastCommittedTs(adminDb) {
1571
+ const st = await adminDb.command({ replSetGetStatus: 1 });
1572
+ return st?.optimes?.lastCommittedOpTime?.ts ?? null;
1573
+ }
1574
+ async function waitUntilStablePast(db, oplogTs, { pollMs = 0, timeoutMs = 10_000, } = {}) {
1575
+ const adminDb = db.client.db('admin');
1576
+ const deadline = Date.now() + timeoutMs;
1577
+ while (true) {
1578
+ const stable = await getLastCommittedTs(adminDb);
1579
+ if (stable && stable.comp(oplogTs) >= 0)
1580
+ return;
1581
+ if (Date.now() > deadline) {
1582
+ throw new Error("Timed out waiting for stable timestamp to reach oplog event time");
1583
+ }
1584
+ await sleep(pollMs);
1574
1585
  }
1575
- pipeline.push({
1576
- $project: {
1577
- _id: 1,
1578
- fullDocument: projection,
1579
- fullDocumentBeforeChange: projection,
1580
- documentKey: 1,
1581
- clusterTime: 1,
1582
- },
1583
- });
1584
- pipeline.push({
1585
- $match: {
1586
- clusterTime: { $gt: startAt },
1587
- $or: [
1588
- {
1589
- $expr: {
1590
- $ne: [
1591
- { $mergeObjects: ['$fullDocument', { touchedAt: null }] },
1592
- { $mergeObjects: ['$fullDocumentBeforeChange', { touchedAt: null }] },
1593
- ],
1594
- },
1595
- },
1596
- Object.fromEntries(changeKeys.map(k => [k, null])),
1597
- ],
1598
- },
1586
+ }
1587
+ async function* tailOplog(db, opts) {
1588
+ let lastTs = opts.since ?? (await getCurrentTimestamp(db));
1589
+ const reopenDelayMs = opts.reopenDelayMs ?? 250;
1590
+ const coll = db.client.db('local').collection('oplog.rs');
1591
+ while (true) {
1592
+ const cursor = coll.find({
1593
+ ts: { $gt: lastTs },
1594
+ ns: RegExp(`^${db.namespace}\\.(?!tmp_)(?!__).*(?<!_snapshot)$`),
1595
+ op: { $in: ['i', 'u'] },
1596
+ }, {
1597
+ tailable: true,
1598
+ awaitData: true,
1599
+ noCursorTimeout: true,
1600
+ });
1601
+ try {
1602
+ for await (const doc of cursor) {
1603
+ lastTs = doc.ts;
1604
+ if (doc.op === 'i') {
1605
+ yield { ns: doc.ns, fields: new Set(Object.keys(doc.o)), doc };
1606
+ }
1607
+ else {
1608
+ if (doc.o['$v'] !== 2) {
1609
+ throw new Error(`Expected update with $v: 2, got ${JSON.stringify(doc.o)}`);
1610
+ }
1611
+ const updatedFields = [];
1612
+ const diff = doc.o['diff'];
1613
+ for (const updateOp in diff) {
1614
+ if (['u', 'i', 'd'].includes(updateOp)) {
1615
+ updatedFields.push(...Object.keys(diff[updateOp]));
1616
+ }
1617
+ else if (updateOp.startsWith('s')) {
1618
+ updatedFields.push(updateOp.slice(1));
1619
+ }
1620
+ }
1621
+ yield { ns: doc.ns, fields: new Set(updatedFields), doc };
1622
+ }
1623
+ }
1624
+ }
1625
+ catch (e) {
1626
+ log('oplog loop error', e);
1627
+ }
1628
+ finally {
1629
+ log('oplog loop ended');
1630
+ await cursor.close().catch(() => { });
1631
+ }
1632
+ await sleep(reopenDelayMs);
1633
+ }
1634
+ }
1635
+ const watchers = new Map();
1636
+ let running = false;
1637
+ const loop = async (db) => {
1638
+ log('starting oplog loop');
1639
+ for await (const { ns, fields, doc } of tailOplog(db, {})) {
1640
+ log('oplog event', ns, doc.op, [...fields]);
1641
+ const m = watchers.get(ns);
1642
+ if (!m)
1643
+ continue;
1644
+ for (const { cb, keys } of m.values()) {
1645
+ if (!keys || keys.some(k => fields.has(k))) {
1646
+ cb(doc);
1647
+ }
1648
+ }
1649
+ }
1650
+ };
1651
+ const register = (coll, keys, cb) => {
1652
+ const ns = coll.namespace;
1653
+ let m = watchers.get(ns);
1654
+ if (!m)
1655
+ watchers.set(ns, (m = new Map()));
1656
+ const id = crypto.randomUUID();
1657
+ m.set(id, { cb, keys });
1658
+ if (!running) {
1659
+ running = true;
1660
+ loop(coll.s.db);
1661
+ }
1662
+ return () => {
1663
+ m.delete(id);
1664
+ if (m.size === 0)
1665
+ watchers.delete(ns);
1666
+ };
1667
+ };
1668
+ const makeWatchStream = ({ collection, projection: p, hardMatch: m }, streamName) => {
1669
+ const projection = { ...(p ? mapExactToObject(p, v => v) : {}), deletedAt: 1 };
1670
+ let resolve = (_) => { };
1671
+ const promise = new Promise(r => (resolve = r));
1672
+ const close = register(collection, p ? Object.keys(projection) : null, (doc) => {
1673
+ log(streamName, 'change detected', doc);
1674
+ resolve(doc);
1675
+ close();
1599
1676
  });
1600
- pipeline.push({
1601
- $project: {
1602
- _id: 1,
1677
+ return {
1678
+ tryNext: async () => {
1679
+ const doc = await promise;
1680
+ const start = Date.now();
1681
+ await waitUntilStablePast(collection.s.db, doc.ts);
1682
+ log(streamName, 'stable past took', Date.now() - start);
1683
+ return doc;
1603
1684
  },
1604
- });
1605
- const stream = db.collection(collection.collectionName).watch(pipeline, {
1606
- fullDocument: 'required',
1607
- fullDocumentBeforeChange: 'required',
1608
- startAtOperationTime: startAt,
1609
- });
1610
- const tryNext = async () => {
1611
- const doc = await stream.tryNext();
1612
- if (doc)
1613
- await new Promise(resolve => setTimeout(resolve, 100));
1614
- if (doc)
1615
- log('detected', streamName, collection.collectionName, doc);
1616
- return doc;
1685
+ close: async () => close(),
1617
1686
  };
1618
- return { tryNext, close: () => stream.close() };
1619
1687
  };
1620
1688
 
1621
1689
  const actions = {
@@ -1648,9 +1716,54 @@ const getFirstStages = (view, needs) => {
1648
1716
  return { firstStages, hardMatch };
1649
1717
  };
1650
1718
 
1719
+ require('dotenv').config();
1720
+ const uri = process.env['MONGO_URL'];
1721
+
1722
+ const enablePreAndPostImages = (coll) => coll.s.db.command({
1723
+ collMod: coll.collectionName,
1724
+ changeStreamPreAndPostImages: { enabled: true },
1725
+ });
1726
+ const prepare = async (testName) => {
1727
+ const client = new MongoClient(uri, testName ? { monitorCommands: true } : {});
1728
+ if (testName) {
1729
+ const handler = (c) => {
1730
+ writeFile(`./out/${testName}.log`, JSON.stringify(c.command) + ',\n', { flag: 'w' });
1731
+ };
1732
+ client.on('commandStarted', handler);
1733
+ client.on('commandSucceeded', handler);
1734
+ }
1735
+ await client.connect();
1736
+ await client.db('admin').command({
1737
+ setClusterParameter: {
1738
+ changeStreamOptions: {
1739
+ preAndPostImages: { expireAfterSeconds: 60 },
1740
+ },
1741
+ },
1742
+ });
1743
+ return client;
1744
+ };
1745
+ const makeCol = async (docs, database, name) => {
1746
+ if (!name) {
1747
+ (name = crypto.randomUUID());
1748
+ }
1749
+ try {
1750
+ const col = await database.createCollection(name, {
1751
+ changeStreamPreAndPostImages: { enabled: true },
1752
+ });
1753
+ if (docs.length)
1754
+ await col.insertMany([...docs]);
1755
+ return col;
1756
+ }
1757
+ catch {
1758
+ return database.collection(name);
1759
+ }
1760
+ };
1761
+
1651
1762
  const streamNames = {};
1652
1763
  const executes$2 = (view, input, streamName, skip = false, after, needs = {}) => {
1653
1764
  const { collection, projection, match } = view;
1765
+ const client = prepare();
1766
+ const pdb = client.then(cl => cl.db(collection.dbName));
1654
1767
  const { firstStages, hardMatch } = getFirstStages(view, needs);
1655
1768
  const db = collection.s.db, coll = collection.collectionName;
1656
1769
  const hash = crypto$1
@@ -1673,10 +1786,22 @@ const executes$2 = (view, input, streamName, skip = false, after, needs = {}) =>
1673
1786
  : {}).catch(e => e.code == 86 || Promise.reject(e));
1674
1787
  const last = db.collection('__last');
1675
1788
  const snapshotCollection = db.collection(coll + '_' + streamName + '_snapshot');
1789
+ createIndex(snapshotCollection, { before: 1 }, {
1790
+ partialFilterExpression: { before: null },
1791
+ name: 'before_' + new UUID().toString('base64'),
1792
+ });
1676
1793
  createIndex(snapshotCollection, { updated: 1 }, {
1677
1794
  partialFilterExpression: { updated: true },
1678
1795
  name: 'updated_' + new UUID().toString('base64'),
1679
1796
  });
1797
+ createIndex(snapshotCollection, { updated: 1, after: 1, before: 1 }, {
1798
+ partialFilterExpression: { updated: true, after: null, before: null },
1799
+ name: 'updated_nulls_' + new UUID().toString('base64'),
1800
+ });
1801
+ createIndex(snapshotCollection, { updated: 1, after: 1 }, {
1802
+ partialFilterExpression: { updated: true, after: null },
1803
+ name: 'updated_no_after_' + new UUID().toString('base64'),
1804
+ });
1680
1805
  createIndex(snapshotCollection, { updated: 1 }, {
1681
1806
  partialFilterExpression: { updated: true, after: null, before: null },
1682
1807
  name: 'updated_nulls_' + new UUID().toString('base64'),
@@ -1715,7 +1840,7 @@ const executes$2 = (view, input, streamName, skip = false, after, needs = {}) =>
1715
1840
  return next(step2, 'get last update');
1716
1841
  };
1717
1842
  const step2 = () => Promise.all([
1718
- last.findOne({ _id: streamName, data }),
1843
+ last.findOne({ _id: streamName, data, job: null }),
1719
1844
  last.findOne({ _id: streamName }),
1720
1845
  ]).then(ts => next(step2_5(ts), ts[0]
1721
1846
  ? `no teardown to handle, starting at ${ts[0].ts}`
@@ -1767,40 +1892,46 @@ const executes$2 = (view, input, streamName, skip = false, after, needs = {}) =>
1767
1892
  whenMatched: link().with($replaceWith_(ite(eq(root().of('before').expr())(ctx()('new').of('after').expr()), root().expr(), mergeObjects(root().expr(), ctx()('new').expr())))).stages,
1768
1893
  whenNotMatched: 'insert',
1769
1894
  })).stages;
1770
- const r = await aggregate(streamName, c => c({ coll: collection, input: cloneIntoNew }));
1771
- await snapshotCollection.deleteMany({ updated: true, after: null, before: null });
1895
+ const r = await aggregate(pdb, streamName, c => c({ coll: collection, input: cloneIntoNew }));
1896
+ const start = Date.now();
1897
+ const res = await snapshotCollection.deleteMany({ updated: true, after: null, before: null });
1898
+ log('deleting from cloned into new collection', Date.now() - start, res, `db['${snapshotCollection.collectionName}'].deleteMany({ updated: true, after: null, before: null })`);
1772
1899
  return next(step4({ result: r, ts: lastTS?.ts }), 'run the aggregation');
1773
1900
  };
1774
- const makeStream = (startAt) => makeWatchStream(db, view, startAt, streamName);
1901
+ const makeStream = () => makeWatchStream(view, streamName);
1775
1902
  const step4 = ({ result, ts }) => async () => {
1776
1903
  const start = Date.now();
1777
- await snapshotCollection.updateMany({ before: null }, { $set: { before: null } });
1778
- const stages = finalInput.raw(ts === undefined);
1779
- const aggResult = await aggregate(streamName, c => c({
1904
+ log('snapshot', streamName, 'ensure before null', Date.now() - start);
1905
+ const first = ts === undefined;
1906
+ const stages = finalInput.raw(first);
1907
+ await last.updateOne({ _id: streamName }, { $set: { job: 1 } }, { upsert: true });
1908
+ const stream = makeStream();
1909
+ const aggResult = await aggregate(pdb, streamName, c => c({
1780
1910
  coll: snapshotCollection,
1781
1911
  input: link()
1782
1912
  .with($match_(root().of('updated').has($eq(true))))
1913
+ .with($set_(set()({
1914
+ before: [
1915
+ 'before',
1916
+ to($ifNull(root().of('before').expr(), nil)),
1917
+ ],
1918
+ })))
1783
1919
  .with(input.delta)
1784
1920
  .with(stages).stages,
1785
1921
  }), false, start);
1786
- const stream = makeStream(result.cursor.atClusterTime);
1787
1922
  const nextRes = stream.tryNext();
1788
- const intoColl = stages.at(-1).$merge.into.coll;
1789
- const startx = Date.now();
1790
- await db
1791
- .collection(intoColl)
1792
- .countDocuments({ touchedAt: { $gte: result.cursor.atClusterTime } })
1793
- .then(count => log(`documents updated ${intoColl}`, count, 'took', Date.now() - startx));
1794
- return next(step5({ ts: result.cursor.atClusterTime, aggResult, stream, nextRes }), 'remove handled deleted updated', () => stream.close());
1923
+ stages.at(-1).$merge.into.coll;
1924
+ return next(step5({ ts: result.cursor.atClusterTime, aggResult, stream, nextRes, first }), 'remove handled deleted updated', () => stream.close());
1795
1925
  };
1796
1926
  const step5 = (l) => async () => {
1797
- log(`remove handled deleted updated db['${snapshotCollection.collectionName}'].deleteMany({ updated: true, after: null })`);
1927
+ log(streamName, `remove handled deleted updated db['${snapshotCollection.collectionName}'].deleteMany({ updated: true, after: null })`);
1798
1928
  await snapshotCollection.deleteMany({ updated: true, after: null });
1799
1929
  log('removed handled deleted updated');
1800
1930
  return next(step6(l), 'update snapshot aggregation');
1801
1931
  };
1802
1932
  const step6 = (l) => async () => {
1803
1933
  log('update snapshot aggregation', `db['${snapshotCollection.collectionName}'].updateMany({ updated: true }, [ { $set: { updated: false, after: null, before: '$after' } } ])`);
1934
+ const start = Date.now();
1804
1935
  await snapshotCollection.updateMany({ updated: true }, [
1805
1936
  {
1806
1937
  $set: {
@@ -1810,25 +1941,25 @@ const executes$2 = (view, input, streamName, skip = false, after, needs = {}) =>
1810
1941
  },
1811
1942
  },
1812
1943
  ]);
1813
- log('updated snapshot aggregation');
1944
+ log('updated snapshot aggregation', Date.now() - start);
1814
1945
  return next(step7(l), 'update __last');
1815
1946
  };
1816
1947
  const step7 = (l) => async () => {
1817
- await last.updateOne({ _id: streamName }, {
1948
+ const start = Date.now();
1949
+ const patch = {
1818
1950
  $set: {
1819
1951
  ts: l.ts,
1820
- data,
1952
+ job: null,
1821
1953
  },
1822
- }, { upsert: true });
1954
+ };
1955
+ if (l.ts)
1956
+ patch.$set = data;
1957
+ await last.updateOne({ _id: streamName }, patch, { upsert: true });
1958
+ log('updated __last', Date.now() - start, `db['${last.collectionName}'].updateOne({ _id: '${streamName}' }, `, patch, `, { upsert: true })`);
1823
1959
  return step8(l);
1824
1960
  };
1825
1961
  const step8 = (l) => {
1826
- return nextData(l.aggResult.cursor.firstBatch)(() => l.nextRes
1827
- .catch((err) => {
1828
- log('restarting', err);
1829
- return { ts: null };
1830
- })
1831
- .then(doc => doc
1962
+ return nextData(l.aggResult.cursor.firstBatch)(() => l.nextRes.then(doc => doc
1832
1963
  ? next(step3({ _id: streamName, ts: l.ts }), 'restart')
1833
1964
  : step8({ ...l, nextRes: l.stream.tryNext() })), 'wait for change');
1834
1965
  };
@@ -1858,6 +1989,8 @@ const executes$1 = (view, input, streamName, needs) => {
1858
1989
  else if (streamNames[streamName] != hash)
1859
1990
  throw new Error('streamName already used');
1860
1991
  const { collection, projection, hardMatch: pre, match } = view;
1992
+ const client = prepare();
1993
+ const pdb = client.then(cl => cl.db(collection.dbName));
1861
1994
  const removeNotYetSynchronizedFields = projection &&
1862
1995
  Object.values(mapExactToObject(projection, (_, k) => (needs[k] ?? k.startsWith('_')) ? root().of(k).has($exists(true)) : null));
1863
1996
  const hardMatch = removeNotYetSynchronizedFields
@@ -1939,14 +2072,14 @@ const executes$1 = (view, input, streamName, needs) => {
1939
2072
  info: { debug: 'wait for clone into new collection', job: undefined },
1940
2073
  };
1941
2074
  };
1942
- const makeStream = (startAt) => makeWatchStream(db, view, startAt, streamName);
2075
+ const makeStream = () => makeWatchStream(view, streamName);
1943
2076
  const step4 = (lastTS) => async () => {
1944
2077
  const raw = stages(lastTS).with(finalInput.raw(lastTS === null)).stages;
1945
- const aggResult = await aggregate(streamName, c => c({
2078
+ const stream = makeStream();
2079
+ const aggResult = await aggregate(pdb, streamName, c => c({
1946
2080
  coll: collection,
1947
2081
  input: raw,
1948
2082
  }));
1949
- const stream = makeStream(aggResult.cursor.atClusterTime);
1950
2083
  const nextRes = stream.tryNext();
1951
2084
  return next(step7({ aggResult, ts: aggResult.cursor.atClusterTime, stream, nextRes }), 'update __last', () => stream.close());
1952
2085
  };
@@ -1958,12 +2091,7 @@ const executes$1 = (view, input, streamName, needs) => {
1958
2091
  return {
1959
2092
  data: l.aggResult.cursor.firstBatch,
1960
2093
  info: { job: undefined, debug: 'wait for change' },
1961
- cont: withStop(() => l.nextRes
1962
- .catch((err) => {
1963
- log('restarting', err);
1964
- return { ts: null };
1965
- })
1966
- .then(doc => doc
2094
+ cont: withStop(() => l.nextRes.then(doc => doc
1967
2095
  ? next(step4({ _id: streamName, ts: l.ts }), 'restart')
1968
2096
  : step8({ ...l, nextRes: l.stream.tryNext() }))),
1969
2097
  };
@@ -1992,47 +2120,4 @@ const executes = (view, input, needs) => {
1992
2120
  };
1993
2121
  const single = (view, needs = {}) => pipe(input => executes(view, input, needs), emptyDelta(), concatDelta, emptyDelta);
1994
2122
 
1995
- require('dotenv').config();
1996
- const uri = process.env['MONGO_URL'];
1997
-
1998
- const enablePreAndPostImages = (coll) => coll.s.db.command({
1999
- collMod: coll.collectionName,
2000
- changeStreamPreAndPostImages: { enabled: true },
2001
- });
2002
- const prepare = async (testName) => {
2003
- const client = new MongoClient(uri, testName ? { monitorCommands: true } : {});
2004
- if (testName) {
2005
- const handler = (c) => {
2006
- writeFile(`./out/${testName}.log`, JSON.stringify(c.command) + ',\n', { flag: 'w' });
2007
- };
2008
- client.on('commandStarted', handler);
2009
- client.on('commandSucceeded', handler);
2010
- }
2011
- await client.connect();
2012
- await client.db('admin').command({
2013
- setClusterParameter: {
2014
- changeStreamOptions: {
2015
- preAndPostImages: { expireAfterSeconds: 60 },
2016
- },
2017
- },
2018
- });
2019
- return client;
2020
- };
2021
- const makeCol = async (docs, database, name) => {
2022
- if (!name) {
2023
- (name = crypto.randomUUID());
2024
- }
2025
- try {
2026
- const col = await database.createCollection(name, {
2027
- changeStreamPreAndPostImages: { enabled: true },
2028
- });
2029
- if (docs.length)
2030
- await col.insertMany([...docs]);
2031
- return col;
2032
- }
2033
- catch {
2034
- return database.collection(name);
2035
- }
2036
- };
2037
-
2038
- export { $accumulator, $and, $countDict, $entries, $eq, $exists, $expr, $getField, $group, $groupId, $groupMerge, $group_, $gt, $gtTs, $gte, $gteTs, $ifNull, $in, $insert, $insertPart, $insertX, $keys, $let, $lookup, $lt, $lte, $map, $map0, $map1, $match, $matchDelta, $merge, $merge2, $mergeId, $mergePart, $merge_, $ne, $nin, $nor, $or, $outerLookup, $pushDict, $rand, $reduce, $replaceWith, $set, $simpleInsert, $simpleMerge, $simpleMergePart, $sum, $type, $unwind, $unwindDelta, Field, Machine, add, and, anyElementTrue, array, ceil, comp, concat$1 as concat, concatArray, createIndex, ctx, current, dateAdd, dateDiff, dateLt, datePart, dayAndMonthPart, divide, enablePreAndPostImages, eq, eqTyped, except, exprMapVal, field, fieldF, fieldM, filter, filterDefined, first$1 as first, firstSure, floor, from, func, getWhenMatched, getWhenMatchedForMerge, gt, gte, inArray, isArray, ite, last, log, lt, lte, makeCol, map1, mapVal, max, maxDate, mergeExact, mergeExact0, mergeExpr, mergeObjects, minDate, monthPart, multiply, ne, nil, noop, not, notNull, now, or, pair, prepare, rand, range, regex, root, set, setField, single, size, slice, sortArray, staging, startOf, str, sub, subtract, to, toInt, val, weekPart, wrap, year };
2123
+ export { $accumulator, $and, $countDict, $entries, $eq, $exists, $expr, $getField, $group, $groupId, $groupMerge, $group_, $gt, $gtTs, $gte, $gteTs, $ifNull, $in, $insert, $insertPart, $insertX, $keys, $let, $lookup, $lt, $lte, $map, $map0, $map1, $match, $matchDelta, $merge, $merge2, $mergeId, $mergePart, $merge_, $ne, $nin, $nor, $or, $outerLookup, $pushDict, $rand, $reduce, $replaceWith, $set, $simpleInsert, $simpleMerge, $simpleMergePart, $sum, $type, $unwind, $unwindDelta, Field, Machine, add, and, anyElementTrue, array, ceil, comp, concat$1 as concat, concatArray, createIndex, ctx, current, dateAdd, dateDiff, dateLt, datePart, dayAndMonthPart, divide, enablePreAndPostImages, eq, eqTyped, except, exprMapVal, field, fieldF, fieldM, filter, filterDefined, first$1 as first, firstSure, floor, from, func, getWhenMatched, getWhenMatchedForMerge, gt, gte, inArray, isArray, ite, last, log, lt, lte, makeCol, map1, mapVal, max, maxDate, mergeExact, mergeExact0, mergeExpr, mergeObjects, minDate, monthPart, multiply, ne, nil, noop, not, notNull, now, or, pair, prepare, rand, range, regex, root, set, setF, setField, single, size, slice, sortArray, staging, startOf, str, sub, subtract, to, toInt, val, weekPart, wrap, year };
package/index.js CHANGED
@@ -1338,9 +1338,12 @@ const replace = (s) => s.replace(/\{"\$timestamp":"(\d+)"\}/g, (_, d) => T(d));
1338
1338
  const json = (a) => replace(JSON.stringify(a));
1339
1339
  const log = (...args) => console.log(new Date(), ...args.map(a => (typeof a === 'function' ? a(replace) : a && typeof a === 'object' ? json(a) : a)));
1340
1340
 
1341
- const state = { steady: false };
1341
+ const state = { steady: false, f: (_) => Promise.resolve() };
1342
1342
  let timeout = null;
1343
- const aggregate = (streamName, input, snapshot = true, start = Date.now()) => input(({ coll, input }) => {
1343
+ const setF = (f) => {
1344
+ state.f = f;
1345
+ };
1346
+ const aggregate = (db, streamName, input, snapshot = true, start = Date.now()) => input(({ coll, input }) => {
1344
1347
  const req = {
1345
1348
  aggregate: coll.collectionName,
1346
1349
  pipeline: input,
@@ -1352,9 +1355,12 @@ const aggregate = (streamName, input, snapshot = true, start = Date.now()) => in
1352
1355
  timeout = null;
1353
1356
  }
1354
1357
  log('exec', streamName, req);
1355
- return coll.s.db.command(req).then(result => {
1358
+ const start2 = Date.now();
1359
+ return db.then(d => d.command(req)).then(result => {
1360
+ log('prepare', streamName, Date.now() - start);
1361
+ log('prepare2', streamName, start2 - start);
1356
1362
  const r = result;
1357
- log('execed', streamName, (replace) => replace(JSON.stringify(req).replaceAll('$$CLUSTER_TIME', JSON.stringify(r.cursor.atClusterTime))), result, 'took', Date.now() - start);
1363
+ log('execed', streamName, (replace) => replace(JSON.stringify(req).replaceAll('"$$CLUSTER_TIME"', JSON.stringify(r.cursor.atClusterTime))), result, 'took', Date.now() - start);
1358
1364
  if (!state.steady) {
1359
1365
  if (timeout !== null)
1360
1366
  throw new Error('timeout should be null');
@@ -1557,67 +1563,129 @@ const addTeardown = (it, tr) => {
1557
1563
  };
1558
1564
  };
1559
1565
 
1560
- const changeKeys = ['fullDocument', 'fullDocumentBeforeChange'];
1561
- const subQ = (a, f) => ({ raw: g => a.raw(g.with(f)) });
1562
- const makeWatchStream = (db, { collection, projection: p, hardMatch: m }, startAt, streamName) => {
1563
- const projection = p ? { ...mapExactToObject(p, v => v), deletedAt: 1 } : 1;
1564
- const pipeline = [];
1565
- if (m) {
1566
- const q = $or(...changeKeys.map((k) => subQ(m, root().of(k))));
1567
- if (q)
1568
- pipeline.push({
1569
- $match: {
1570
- $or: [
1571
- q.raw(root()),
1572
- Object.fromEntries(changeKeys.map(k => [k, null])),
1573
- ],
1574
- },
1575
- });
1566
+ const sleep = (ms) => new Promise(r => setTimeout(r, ms));
1567
+ const getCurrentTimestamp = async (db) => {
1568
+ const adminDb = db.admin();
1569
+ const serverStatus = await adminDb.command({ serverStatus: 1 });
1570
+ return serverStatus['operationTime'];
1571
+ };
1572
+ async function getLastCommittedTs(adminDb) {
1573
+ const st = await adminDb.command({ replSetGetStatus: 1 });
1574
+ return st?.optimes?.lastCommittedOpTime?.ts ?? null;
1575
+ }
1576
+ async function waitUntilStablePast(db, oplogTs, { pollMs = 0, timeoutMs = 10_000, } = {}) {
1577
+ const adminDb = db.client.db('admin');
1578
+ const deadline = Date.now() + timeoutMs;
1579
+ while (true) {
1580
+ const stable = await getLastCommittedTs(adminDb);
1581
+ if (stable && stable.comp(oplogTs) >= 0)
1582
+ return;
1583
+ if (Date.now() > deadline) {
1584
+ throw new Error("Timed out waiting for stable timestamp to reach oplog event time");
1585
+ }
1586
+ await sleep(pollMs);
1576
1587
  }
1577
- pipeline.push({
1578
- $project: {
1579
- _id: 1,
1580
- fullDocument: projection,
1581
- fullDocumentBeforeChange: projection,
1582
- documentKey: 1,
1583
- clusterTime: 1,
1584
- },
1585
- });
1586
- pipeline.push({
1587
- $match: {
1588
- clusterTime: { $gt: startAt },
1589
- $or: [
1590
- {
1591
- $expr: {
1592
- $ne: [
1593
- { $mergeObjects: ['$fullDocument', { touchedAt: null }] },
1594
- { $mergeObjects: ['$fullDocumentBeforeChange', { touchedAt: null }] },
1595
- ],
1596
- },
1597
- },
1598
- Object.fromEntries(changeKeys.map(k => [k, null])),
1599
- ],
1600
- },
1588
+ }
1589
+ async function* tailOplog(db, opts) {
1590
+ let lastTs = opts.since ?? (await getCurrentTimestamp(db));
1591
+ const reopenDelayMs = opts.reopenDelayMs ?? 250;
1592
+ const coll = db.client.db('local').collection('oplog.rs');
1593
+ while (true) {
1594
+ const cursor = coll.find({
1595
+ ts: { $gt: lastTs },
1596
+ ns: RegExp(`^${db.namespace}\\.(?!tmp_)(?!__).*(?<!_snapshot)$`),
1597
+ op: { $in: ['i', 'u'] },
1598
+ }, {
1599
+ tailable: true,
1600
+ awaitData: true,
1601
+ noCursorTimeout: true,
1602
+ });
1603
+ try {
1604
+ for await (const doc of cursor) {
1605
+ lastTs = doc.ts;
1606
+ if (doc.op === 'i') {
1607
+ yield { ns: doc.ns, fields: new Set(Object.keys(doc.o)), doc };
1608
+ }
1609
+ else {
1610
+ if (doc.o['$v'] !== 2) {
1611
+ throw new Error(`Expected update with $v: 2, got ${JSON.stringify(doc.o)}`);
1612
+ }
1613
+ const updatedFields = [];
1614
+ const diff = doc.o['diff'];
1615
+ for (const updateOp in diff) {
1616
+ if (['u', 'i', 'd'].includes(updateOp)) {
1617
+ updatedFields.push(...Object.keys(diff[updateOp]));
1618
+ }
1619
+ else if (updateOp.startsWith('s')) {
1620
+ updatedFields.push(updateOp.slice(1));
1621
+ }
1622
+ }
1623
+ yield { ns: doc.ns, fields: new Set(updatedFields), doc };
1624
+ }
1625
+ }
1626
+ }
1627
+ catch (e) {
1628
+ log('oplog loop error', e);
1629
+ }
1630
+ finally {
1631
+ log('oplog loop ended');
1632
+ await cursor.close().catch(() => { });
1633
+ }
1634
+ await sleep(reopenDelayMs);
1635
+ }
1636
+ }
1637
+ const watchers = new Map();
1638
+ let running = false;
1639
+ const loop = async (db) => {
1640
+ log('starting oplog loop');
1641
+ for await (const { ns, fields, doc } of tailOplog(db, {})) {
1642
+ log('oplog event', ns, doc.op, [...fields]);
1643
+ const m = watchers.get(ns);
1644
+ if (!m)
1645
+ continue;
1646
+ for (const { cb, keys } of m.values()) {
1647
+ if (!keys || keys.some(k => fields.has(k))) {
1648
+ cb(doc);
1649
+ }
1650
+ }
1651
+ }
1652
+ };
1653
+ const register = (coll, keys, cb) => {
1654
+ const ns = coll.namespace;
1655
+ let m = watchers.get(ns);
1656
+ if (!m)
1657
+ watchers.set(ns, (m = new Map()));
1658
+ const id = crypto.randomUUID();
1659
+ m.set(id, { cb, keys });
1660
+ if (!running) {
1661
+ running = true;
1662
+ loop(coll.s.db);
1663
+ }
1664
+ return () => {
1665
+ m.delete(id);
1666
+ if (m.size === 0)
1667
+ watchers.delete(ns);
1668
+ };
1669
+ };
1670
+ const makeWatchStream = ({ collection, projection: p, hardMatch: m }, streamName) => {
1671
+ const projection = { ...(p ? mapExactToObject(p, v => v) : {}), deletedAt: 1 };
1672
+ let resolve = (_) => { };
1673
+ const promise = new Promise(r => (resolve = r));
1674
+ const close = register(collection, p ? Object.keys(projection) : null, (doc) => {
1675
+ log(streamName, 'change detected', doc);
1676
+ resolve(doc);
1677
+ close();
1601
1678
  });
1602
- pipeline.push({
1603
- $project: {
1604
- _id: 1,
1679
+ return {
1680
+ tryNext: async () => {
1681
+ const doc = await promise;
1682
+ const start = Date.now();
1683
+ await waitUntilStablePast(collection.s.db, doc.ts);
1684
+ log(streamName, 'stable past took', Date.now() - start);
1685
+ return doc;
1605
1686
  },
1606
- });
1607
- const stream = db.collection(collection.collectionName).watch(pipeline, {
1608
- fullDocument: 'required',
1609
- fullDocumentBeforeChange: 'required',
1610
- startAtOperationTime: startAt,
1611
- });
1612
- const tryNext = async () => {
1613
- const doc = await stream.tryNext();
1614
- if (doc)
1615
- await new Promise(resolve => setTimeout(resolve, 100));
1616
- if (doc)
1617
- log('detected', streamName, collection.collectionName, doc);
1618
- return doc;
1687
+ close: async () => close(),
1619
1688
  };
1620
- return { tryNext, close: () => stream.close() };
1621
1689
  };
1622
1690
 
1623
1691
  const actions = {
@@ -1650,9 +1718,54 @@ const getFirstStages = (view, needs) => {
1650
1718
  return { firstStages, hardMatch };
1651
1719
  };
1652
1720
 
1721
+ require('dotenv').config();
1722
+ const uri = process.env['MONGO_URL'];
1723
+
1724
+ const enablePreAndPostImages = (coll) => coll.s.db.command({
1725
+ collMod: coll.collectionName,
1726
+ changeStreamPreAndPostImages: { enabled: true },
1727
+ });
1728
+ const prepare = async (testName) => {
1729
+ const client = new mongodb.MongoClient(uri, testName ? { monitorCommands: true } : {});
1730
+ if (testName) {
1731
+ const handler = (c) => {
1732
+ promises.writeFile(`./out/${testName}.log`, JSON.stringify(c.command) + ',\n', { flag: 'w' });
1733
+ };
1734
+ client.on('commandStarted', handler);
1735
+ client.on('commandSucceeded', handler);
1736
+ }
1737
+ await client.connect();
1738
+ await client.db('admin').command({
1739
+ setClusterParameter: {
1740
+ changeStreamOptions: {
1741
+ preAndPostImages: { expireAfterSeconds: 60 },
1742
+ },
1743
+ },
1744
+ });
1745
+ return client;
1746
+ };
1747
+ const makeCol = async (docs, database, name) => {
1748
+ if (!name) {
1749
+ (name = crypto.randomUUID());
1750
+ }
1751
+ try {
1752
+ const col = await database.createCollection(name, {
1753
+ changeStreamPreAndPostImages: { enabled: true },
1754
+ });
1755
+ if (docs.length)
1756
+ await col.insertMany([...docs]);
1757
+ return col;
1758
+ }
1759
+ catch {
1760
+ return database.collection(name);
1761
+ }
1762
+ };
1763
+
1653
1764
  const streamNames = {};
1654
1765
  const executes$2 = (view, input, streamName, skip = false, after, needs = {}) => {
1655
1766
  const { collection, projection, match } = view;
1767
+ const client = prepare();
1768
+ const pdb = client.then(cl => cl.db(collection.dbName));
1656
1769
  const { firstStages, hardMatch } = getFirstStages(view, needs);
1657
1770
  const db = collection.s.db, coll = collection.collectionName;
1658
1771
  const hash = crypto$1
@@ -1675,10 +1788,22 @@ const executes$2 = (view, input, streamName, skip = false, after, needs = {}) =>
1675
1788
  : {}).catch(e => e.code == 86 || Promise.reject(e));
1676
1789
  const last = db.collection('__last');
1677
1790
  const snapshotCollection = db.collection(coll + '_' + streamName + '_snapshot');
1791
+ createIndex(snapshotCollection, { before: 1 }, {
1792
+ partialFilterExpression: { before: null },
1793
+ name: 'before_' + new mongodb.UUID().toString('base64'),
1794
+ });
1678
1795
  createIndex(snapshotCollection, { updated: 1 }, {
1679
1796
  partialFilterExpression: { updated: true },
1680
1797
  name: 'updated_' + new mongodb.UUID().toString('base64'),
1681
1798
  });
1799
+ createIndex(snapshotCollection, { updated: 1, after: 1, before: 1 }, {
1800
+ partialFilterExpression: { updated: true, after: null, before: null },
1801
+ name: 'updated_nulls_' + new mongodb.UUID().toString('base64'),
1802
+ });
1803
+ createIndex(snapshotCollection, { updated: 1, after: 1 }, {
1804
+ partialFilterExpression: { updated: true, after: null },
1805
+ name: 'updated_no_after_' + new mongodb.UUID().toString('base64'),
1806
+ });
1682
1807
  createIndex(snapshotCollection, { updated: 1 }, {
1683
1808
  partialFilterExpression: { updated: true, after: null, before: null },
1684
1809
  name: 'updated_nulls_' + new mongodb.UUID().toString('base64'),
@@ -1717,7 +1842,7 @@ const executes$2 = (view, input, streamName, skip = false, after, needs = {}) =>
1717
1842
  return next(step2, 'get last update');
1718
1843
  };
1719
1844
  const step2 = () => Promise.all([
1720
- last.findOne({ _id: streamName, data }),
1845
+ last.findOne({ _id: streamName, data, job: null }),
1721
1846
  last.findOne({ _id: streamName }),
1722
1847
  ]).then(ts => next(step2_5(ts), ts[0]
1723
1848
  ? `no teardown to handle, starting at ${ts[0].ts}`
@@ -1769,40 +1894,46 @@ const executes$2 = (view, input, streamName, skip = false, after, needs = {}) =>
1769
1894
  whenMatched: link().with($replaceWith_(ite(eq(root().of('before').expr())(ctx()('new').of('after').expr()), root().expr(), mergeObjects(root().expr(), ctx()('new').expr())))).stages,
1770
1895
  whenNotMatched: 'insert',
1771
1896
  })).stages;
1772
- const r = await aggregate(streamName, c => c({ coll: collection, input: cloneIntoNew }));
1773
- await snapshotCollection.deleteMany({ updated: true, after: null, before: null });
1897
+ const r = await aggregate(pdb, streamName, c => c({ coll: collection, input: cloneIntoNew }));
1898
+ const start = Date.now();
1899
+ const res = await snapshotCollection.deleteMany({ updated: true, after: null, before: null });
1900
+ log('deleting from cloned into new collection', Date.now() - start, res, `db['${snapshotCollection.collectionName}'].deleteMany({ updated: true, after: null, before: null })`);
1774
1901
  return next(step4({ result: r, ts: lastTS?.ts }), 'run the aggregation');
1775
1902
  };
1776
- const makeStream = (startAt) => makeWatchStream(db, view, startAt, streamName);
1903
+ const makeStream = () => makeWatchStream(view, streamName);
1777
1904
  const step4 = ({ result, ts }) => async () => {
1778
1905
  const start = Date.now();
1779
- await snapshotCollection.updateMany({ before: null }, { $set: { before: null } });
1780
- const stages = finalInput.raw(ts === undefined);
1781
- const aggResult = await aggregate(streamName, c => c({
1906
+ log('snapshot', streamName, 'ensure before null', Date.now() - start);
1907
+ const first = ts === undefined;
1908
+ const stages = finalInput.raw(first);
1909
+ await last.updateOne({ _id: streamName }, { $set: { job: 1 } }, { upsert: true });
1910
+ const stream = makeStream();
1911
+ const aggResult = await aggregate(pdb, streamName, c => c({
1782
1912
  coll: snapshotCollection,
1783
1913
  input: link()
1784
1914
  .with($match_(root().of('updated').has($eq(true))))
1915
+ .with($set_(set()({
1916
+ before: [
1917
+ 'before',
1918
+ to($ifNull(root().of('before').expr(), nil)),
1919
+ ],
1920
+ })))
1785
1921
  .with(input.delta)
1786
1922
  .with(stages).stages,
1787
1923
  }), false, start);
1788
- const stream = makeStream(result.cursor.atClusterTime);
1789
1924
  const nextRes = stream.tryNext();
1790
- const intoColl = stages.at(-1).$merge.into.coll;
1791
- const startx = Date.now();
1792
- await db
1793
- .collection(intoColl)
1794
- .countDocuments({ touchedAt: { $gte: result.cursor.atClusterTime } })
1795
- .then(count => log(`documents updated ${intoColl}`, count, 'took', Date.now() - startx));
1796
- return next(step5({ ts: result.cursor.atClusterTime, aggResult, stream, nextRes }), 'remove handled deleted updated', () => stream.close());
1925
+ stages.at(-1).$merge.into.coll;
1926
+ return next(step5({ ts: result.cursor.atClusterTime, aggResult, stream, nextRes, first }), 'remove handled deleted updated', () => stream.close());
1797
1927
  };
1798
1928
  const step5 = (l) => async () => {
1799
- log(`remove handled deleted updated db['${snapshotCollection.collectionName}'].deleteMany({ updated: true, after: null })`);
1929
+ log(streamName, `remove handled deleted updated db['${snapshotCollection.collectionName}'].deleteMany({ updated: true, after: null })`);
1800
1930
  await snapshotCollection.deleteMany({ updated: true, after: null });
1801
1931
  log('removed handled deleted updated');
1802
1932
  return next(step6(l), 'update snapshot aggregation');
1803
1933
  };
1804
1934
  const step6 = (l) => async () => {
1805
1935
  log('update snapshot aggregation', `db['${snapshotCollection.collectionName}'].updateMany({ updated: true }, [ { $set: { updated: false, after: null, before: '$after' } } ])`);
1936
+ const start = Date.now();
1806
1937
  await snapshotCollection.updateMany({ updated: true }, [
1807
1938
  {
1808
1939
  $set: {
@@ -1812,25 +1943,25 @@ const executes$2 = (view, input, streamName, skip = false, after, needs = {}) =>
1812
1943
  },
1813
1944
  },
1814
1945
  ]);
1815
- log('updated snapshot aggregation');
1946
+ log('updated snapshot aggregation', Date.now() - start);
1816
1947
  return next(step7(l), 'update __last');
1817
1948
  };
1818
1949
  const step7 = (l) => async () => {
1819
- await last.updateOne({ _id: streamName }, {
1950
+ const start = Date.now();
1951
+ const patch = {
1820
1952
  $set: {
1821
1953
  ts: l.ts,
1822
- data,
1954
+ job: null,
1823
1955
  },
1824
- }, { upsert: true });
1956
+ };
1957
+ if (l.ts)
1958
+ patch.$set = data;
1959
+ await last.updateOne({ _id: streamName }, patch, { upsert: true });
1960
+ log('updated __last', Date.now() - start, `db['${last.collectionName}'].updateOne({ _id: '${streamName}' }, `, patch, `, { upsert: true })`);
1825
1961
  return step8(l);
1826
1962
  };
1827
1963
  const step8 = (l) => {
1828
- return nextData(l.aggResult.cursor.firstBatch)(() => l.nextRes
1829
- .catch((err) => {
1830
- log('restarting', err);
1831
- return { ts: null };
1832
- })
1833
- .then(doc => doc
1964
+ return nextData(l.aggResult.cursor.firstBatch)(() => l.nextRes.then(doc => doc
1834
1965
  ? next(step3({ _id: streamName, ts: l.ts }), 'restart')
1835
1966
  : step8({ ...l, nextRes: l.stream.tryNext() })), 'wait for change');
1836
1967
  };
@@ -1860,6 +1991,8 @@ const executes$1 = (view, input, streamName, needs) => {
1860
1991
  else if (streamNames[streamName] != hash)
1861
1992
  throw new Error('streamName already used');
1862
1993
  const { collection, projection, hardMatch: pre, match } = view;
1994
+ const client = prepare();
1995
+ const pdb = client.then(cl => cl.db(collection.dbName));
1863
1996
  const removeNotYetSynchronizedFields = projection &&
1864
1997
  Object.values(mapExactToObject(projection, (_, k) => (needs[k] ?? k.startsWith('_')) ? root().of(k).has($exists(true)) : null));
1865
1998
  const hardMatch = removeNotYetSynchronizedFields
@@ -1941,14 +2074,14 @@ const executes$1 = (view, input, streamName, needs) => {
1941
2074
  info: { debug: 'wait for clone into new collection', job: undefined },
1942
2075
  };
1943
2076
  };
1944
- const makeStream = (startAt) => makeWatchStream(db, view, startAt, streamName);
2077
+ const makeStream = () => makeWatchStream(view, streamName);
1945
2078
  const step4 = (lastTS) => async () => {
1946
2079
  const raw = stages(lastTS).with(finalInput.raw(lastTS === null)).stages;
1947
- const aggResult = await aggregate(streamName, c => c({
2080
+ const stream = makeStream();
2081
+ const aggResult = await aggregate(pdb, streamName, c => c({
1948
2082
  coll: collection,
1949
2083
  input: raw,
1950
2084
  }));
1951
- const stream = makeStream(aggResult.cursor.atClusterTime);
1952
2085
  const nextRes = stream.tryNext();
1953
2086
  return next(step7({ aggResult, ts: aggResult.cursor.atClusterTime, stream, nextRes }), 'update __last', () => stream.close());
1954
2087
  };
@@ -1960,12 +2093,7 @@ const executes$1 = (view, input, streamName, needs) => {
1960
2093
  return {
1961
2094
  data: l.aggResult.cursor.firstBatch,
1962
2095
  info: { job: undefined, debug: 'wait for change' },
1963
- cont: withStop(() => l.nextRes
1964
- .catch((err) => {
1965
- log('restarting', err);
1966
- return { ts: null };
1967
- })
1968
- .then(doc => doc
2096
+ cont: withStop(() => l.nextRes.then(doc => doc
1969
2097
  ? next(step4({ _id: streamName, ts: l.ts }), 'restart')
1970
2098
  : step8({ ...l, nextRes: l.stream.tryNext() }))),
1971
2099
  };
@@ -1994,49 +2122,6 @@ const executes = (view, input, needs) => {
1994
2122
  };
1995
2123
  const single = (view, needs = {}) => pipe(input => executes(view, input, needs), emptyDelta(), concatDelta, emptyDelta);
1996
2124
 
1997
- require('dotenv').config();
1998
- const uri = process.env['MONGO_URL'];
1999
-
2000
- const enablePreAndPostImages = (coll) => coll.s.db.command({
2001
- collMod: coll.collectionName,
2002
- changeStreamPreAndPostImages: { enabled: true },
2003
- });
2004
- const prepare = async (testName) => {
2005
- const client = new mongodb.MongoClient(uri, testName ? { monitorCommands: true } : {});
2006
- if (testName) {
2007
- const handler = (c) => {
2008
- promises.writeFile(`./out/${testName}.log`, JSON.stringify(c.command) + ',\n', { flag: 'w' });
2009
- };
2010
- client.on('commandStarted', handler);
2011
- client.on('commandSucceeded', handler);
2012
- }
2013
- await client.connect();
2014
- await client.db('admin').command({
2015
- setClusterParameter: {
2016
- changeStreamOptions: {
2017
- preAndPostImages: { expireAfterSeconds: 60 },
2018
- },
2019
- },
2020
- });
2021
- return client;
2022
- };
2023
- const makeCol = async (docs, database, name) => {
2024
- if (!name) {
2025
- (name = crypto.randomUUID());
2026
- }
2027
- try {
2028
- const col = await database.createCollection(name, {
2029
- changeStreamPreAndPostImages: { enabled: true },
2030
- });
2031
- if (docs.length)
2032
- await col.insertMany([...docs]);
2033
- return col;
2034
- }
2035
- catch {
2036
- return database.collection(name);
2037
- }
2038
- };
2039
-
2040
2125
  exports.$accumulator = $accumulator;
2041
2126
  exports.$and = $and;
2042
2127
  exports.$countDict = $countDict;
@@ -2161,6 +2246,7 @@ exports.range = range;
2161
2246
  exports.regex = regex;
2162
2247
  exports.root = root;
2163
2248
  exports.set = set;
2249
+ exports.setF = setF;
2164
2250
  exports.setField = setField;
2165
2251
  exports.single = single;
2166
2252
  exports.size = size;
package/package.json CHANGED
@@ -3,7 +3,7 @@
3
3
  "module": "index.esm.js",
4
4
  "typings": "index.d.ts",
5
5
  "name": "@omegup/msync",
6
- "version": "0.1.18",
6
+ "version": "0.1.20",
7
7
  "dependencies": {
8
8
  "dayjs": "^1.11.9",
9
9
  "dotenv": "^16.3.1",