mongodb-livedata-server 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. package/README.md +63 -0
  2. package/dist/livedata_server.js +9 -0
  3. package/dist/meteor/binary-heap/max_heap.js +186 -0
  4. package/dist/meteor/binary-heap/min_heap.js +17 -0
  5. package/dist/meteor/binary-heap/min_max_heap.js +48 -0
  6. package/dist/meteor/callback-hook/hook.js +78 -0
  7. package/dist/meteor/ddp/crossbar.js +136 -0
  8. package/dist/meteor/ddp/heartbeat.js +77 -0
  9. package/dist/meteor/ddp/livedata_server.js +403 -0
  10. package/dist/meteor/ddp/method-invocation.js +72 -0
  11. package/dist/meteor/ddp/random-stream.js +100 -0
  12. package/dist/meteor/ddp/session-collection-view.js +106 -0
  13. package/dist/meteor/ddp/session-document-view.js +82 -0
  14. package/dist/meteor/ddp/session.js +570 -0
  15. package/dist/meteor/ddp/stream_server.js +181 -0
  16. package/dist/meteor/ddp/subscription.js +347 -0
  17. package/dist/meteor/ddp/utils.js +104 -0
  18. package/dist/meteor/ddp/writefence.js +111 -0
  19. package/dist/meteor/diff-sequence/diff.js +257 -0
  20. package/dist/meteor/ejson/ejson.js +569 -0
  21. package/dist/meteor/ejson/stringify.js +119 -0
  22. package/dist/meteor/ejson/utils.js +42 -0
  23. package/dist/meteor/id-map/id_map.js +92 -0
  24. package/dist/meteor/mongo/caching_change_observer.js +94 -0
  25. package/dist/meteor/mongo/doc_fetcher.js +53 -0
  26. package/dist/meteor/mongo/geojson_utils.js +41 -0
  27. package/dist/meteor/mongo/live_connection.js +264 -0
  28. package/dist/meteor/mongo/live_cursor.js +57 -0
  29. package/dist/meteor/mongo/minimongo_common.js +2002 -0
  30. package/dist/meteor/mongo/minimongo_matcher.js +217 -0
  31. package/dist/meteor/mongo/minimongo_sorter.js +268 -0
  32. package/dist/meteor/mongo/observe_driver_utils.js +73 -0
  33. package/dist/meteor/mongo/observe_multiplexer.js +228 -0
  34. package/dist/meteor/mongo/oplog-observe-driver.js +919 -0
  35. package/dist/meteor/mongo/oplog_tailing.js +352 -0
  36. package/dist/meteor/mongo/oplog_v2_converter.js +126 -0
  37. package/dist/meteor/mongo/polling_observe_driver.js +195 -0
  38. package/dist/meteor/mongo/synchronous-cursor.js +261 -0
  39. package/dist/meteor/mongo/synchronous-queue.js +110 -0
  40. package/dist/meteor/ordered-dict/ordered_dict.js +198 -0
  41. package/dist/meteor/random/AbstractRandomGenerator.js +92 -0
  42. package/dist/meteor/random/AleaRandomGenerator.js +90 -0
  43. package/dist/meteor/random/NodeRandomGenerator.js +42 -0
  44. package/dist/meteor/random/createAleaGenerator.js +32 -0
  45. package/dist/meteor/random/createRandom.js +22 -0
  46. package/dist/meteor/random/main.js +12 -0
  47. package/livedata_server.ts +3 -0
  48. package/meteor/LICENSE +28 -0
  49. package/meteor/binary-heap/max_heap.ts +225 -0
  50. package/meteor/binary-heap/min_heap.ts +15 -0
  51. package/meteor/binary-heap/min_max_heap.ts +53 -0
  52. package/meteor/callback-hook/hook.ts +85 -0
  53. package/meteor/ddp/crossbar.ts +148 -0
  54. package/meteor/ddp/heartbeat.ts +97 -0
  55. package/meteor/ddp/livedata_server.ts +473 -0
  56. package/meteor/ddp/method-invocation.ts +86 -0
  57. package/meteor/ddp/random-stream.ts +102 -0
  58. package/meteor/ddp/session-collection-view.ts +119 -0
  59. package/meteor/ddp/session-document-view.ts +92 -0
  60. package/meteor/ddp/session.ts +708 -0
  61. package/meteor/ddp/stream_server.ts +204 -0
  62. package/meteor/ddp/subscription.ts +392 -0
  63. package/meteor/ddp/utils.ts +119 -0
  64. package/meteor/ddp/writefence.ts +130 -0
  65. package/meteor/diff-sequence/diff.ts +295 -0
  66. package/meteor/ejson/ejson.ts +601 -0
  67. package/meteor/ejson/stringify.ts +122 -0
  68. package/meteor/ejson/utils.ts +38 -0
  69. package/meteor/id-map/id_map.ts +84 -0
  70. package/meteor/mongo/caching_change_observer.ts +120 -0
  71. package/meteor/mongo/doc_fetcher.ts +52 -0
  72. package/meteor/mongo/geojson_utils.ts +42 -0
  73. package/meteor/mongo/live_connection.ts +302 -0
  74. package/meteor/mongo/live_cursor.ts +79 -0
  75. package/meteor/mongo/minimongo_common.ts +2440 -0
  76. package/meteor/mongo/minimongo_matcher.ts +275 -0
  77. package/meteor/mongo/minimongo_sorter.ts +331 -0
  78. package/meteor/mongo/observe_driver_utils.ts +79 -0
  79. package/meteor/mongo/observe_multiplexer.ts +256 -0
  80. package/meteor/mongo/oplog-observe-driver.ts +1049 -0
  81. package/meteor/mongo/oplog_tailing.ts +414 -0
  82. package/meteor/mongo/oplog_v2_converter.ts +124 -0
  83. package/meteor/mongo/polling_observe_driver.ts +247 -0
  84. package/meteor/mongo/synchronous-cursor.ts +293 -0
  85. package/meteor/mongo/synchronous-queue.ts +119 -0
  86. package/meteor/ordered-dict/ordered_dict.ts +229 -0
  87. package/meteor/random/AbstractRandomGenerator.ts +99 -0
  88. package/meteor/random/AleaRandomGenerator.ts +96 -0
  89. package/meteor/random/NodeRandomGenerator.ts +37 -0
  90. package/meteor/random/createAleaGenerator.ts +31 -0
  91. package/meteor/random/createRandom.ts +19 -0
  92. package/meteor/random/main.ts +8 -0
  93. package/package.json +30 -0
  94. package/tsconfig.json +10 -0
@@ -0,0 +1,919 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.OplogObserveDriver = void 0;
4
+ const observe_driver_utils_1 = require("./observe_driver_utils");
5
+ const oplog_v2_converter_1 = require("./oplog_v2_converter");
6
+ const live_cursor_1 = require("./live_cursor");
7
+ const writefence_1 = require("../ddp/writefence");
8
+ const diff_1 = require("../diff-sequence/diff");
9
+ const min_max_heap_1 = require("../binary-heap/min_max_heap");
10
+ const max_heap_1 = require("../binary-heap/max_heap");
11
+ const minimongo_common_1 = require("./minimongo_common");
12
+ const ejson_1 = require("../ejson/ejson");
13
+ const oplog_tailing_1 = require("./oplog_tailing");
14
+ const id_map_1 = require("../id-map/id_map");
15
+ const synchronous_cursor_1 = require("./synchronous-cursor");
16
+ var PHASE;
17
+ (function (PHASE) {
18
+ PHASE["QUERYING"] = "QUERYING";
19
+ PHASE["FETCHING"] = "FETCHING";
20
+ PHASE["STEADY"] = "STEADY";
21
+ })(PHASE || (PHASE = {}));
22
+ // Exception thrown by _needToPollQuery which unrolls the stack up to the
23
+ // enclosing call to finishIfNeedToPollQuery.
24
+ var SwitchedToQuery = function () { };
25
+ var finishIfNeedToPollQuery = function (f) {
26
+ return function () {
27
+ try {
28
+ f.apply(this, arguments);
29
+ }
30
+ catch (e) {
31
+ if (!(e instanceof SwitchedToQuery))
32
+ throw e;
33
+ }
34
+ };
35
+ };
36
+ var currentId = 0;
37
+ // OplogObserveDriver is an alternative to PollingObserveDriver which follows
38
+ // the Mongo operation log instead of just re-polling the query. It obeys the
39
+ // same simple interface: constructing it starts sending observeChanges
40
+ // callbacks (and a ready() invocation) to the ObserveMultiplexer, and you stop
41
+ // it by calling the stop() method.
42
+ class OplogObserveDriver {
43
+ constructor(options) {
44
+ var self = this;
45
+ self._usesOplog = true; // tests look at this
46
+ self._id = currentId;
47
+ currentId++;
48
+ self._cursorDescription = options.cursorDescription;
49
+ self._mongoHandle = options.mongoHandle;
50
+ self._multiplexer = options.multiplexer;
51
+ if (options.ordered) {
52
+ throw Error("OplogObserveDriver only supports unordered observeChanges");
53
+ }
54
+ var sorter = options.sorter;
55
+ // We don't support $near and other geo-queries so it's OK to initialize the
56
+ // comparator only once in the constructor.
57
+ var comparator = sorter && sorter.getComparator();
58
+ if (options.cursorDescription.options.limit) {
59
+ // There are several properties ordered driver implements:
60
+ // - _limit is a positive number
61
+ // - _comparator is a function-comparator by which the query is ordered
62
+ // - _unpublishedBuffer is non-null Min/Max Heap,
63
+ // the empty buffer in STEADY phase implies that the
64
+ // everything that matches the queries selector fits
65
+ // into published set.
66
+ // - _published - Max Heap (also implements IdMap methods)
67
+ var heapOptions = { IdMap: Map };
68
+ self._limit = self._cursorDescription.options.limit;
69
+ self._comparator = comparator;
70
+ self._sorter = sorter;
71
+ self._unpublishedBuffer = new min_max_heap_1.MinMaxHeap(comparator, heapOptions);
72
+ // We need something that can find Max value in addition to IdMap interface
73
+ self._published = new max_heap_1.MaxHeap(comparator, heapOptions);
74
+ }
75
+ else {
76
+ self._limit = 0;
77
+ self._comparator = null;
78
+ self._sorter = null;
79
+ self._unpublishedBuffer = null;
80
+ self._published = new id_map_1.IdMap();
81
+ }
82
+ // Indicates if it is safe to insert a new document at the end of the buffer
83
+ // for this query. i.e. it is known that there are no documents matching the
84
+ // selector those are not in published or buffer.
85
+ self._safeAppendToBuffer = false;
86
+ self._stopped = false;
87
+ self._stopHandles = [];
88
+ self._registerPhaseChange(PHASE.QUERYING);
89
+ self._matcher = options.matcher;
90
+ // we are now using projection, not fields in the cursor description even if you pass {fields}
91
+ // in the cursor construction
92
+ var projection = self._cursorDescription.options.projection || {};
93
+ self._projectionFn = (0, minimongo_common_1._compileProjection)(projection);
94
+ // Projection function, result of combining important fields for selector and
95
+ // existing fields projection
96
+ self._sharedProjection = self._matcher.combineIntoProjection(projection);
97
+ if (sorter)
98
+ self._sharedProjection = sorter.combineIntoProjection(self._sharedProjection);
99
+ self._sharedProjectionFn = (0, minimongo_common_1._compileProjection)(self._sharedProjection);
100
+ self._needToFetch = new Map();
101
+ self._currentlyFetching = null;
102
+ self._fetchGeneration = 0;
103
+ self._requeryWhenDoneThisQuery = false;
104
+ self._writesToCommitWhenWeReachSteady = [];
105
+ // If the oplog handle tells us that it skipped some entries (because it got
106
+ // behind, say), re-poll.
107
+ self._stopHandles.push(self._mongoHandle._oplogHandle.onSkippedEntries(finishIfNeedToPollQuery(function () {
108
+ self._needToPollQuery();
109
+ })));
110
+ (0, observe_driver_utils_1.forEachTrigger)(self._cursorDescription, function (trigger) {
111
+ self._stopHandles.push(/*async*/ self._mongoHandle._oplogHandle.onOplogEntry(trigger, function (notification) {
112
+ //Meteor._noYieldsAllowed(finishIfNeedToPollQuery(function () {
113
+ var op = notification.op;
114
+ if (notification.dropCollection || notification.dropDatabase) {
115
+ // Note: this call is not allowed to block on anything (especially
116
+ // on waiting for oplog entries to catch up) because that will block
117
+ // onOplogEntry!
118
+ self._needToPollQuery();
119
+ }
120
+ else {
121
+ // All other operators should be handled depending on phase
122
+ if (self._phase === PHASE.QUERYING) {
123
+ self._handleOplogEntryQuerying(op);
124
+ }
125
+ else {
126
+ self._handleOplogEntrySteadyOrFetching(op);
127
+ }
128
+ }
129
+ //}));
130
+ }));
131
+ });
132
+ // XXX ordering w.r.t. everything else?
133
+ self._stopHandles.push((0, observe_driver_utils_1.listenAll)(self._cursorDescription, function (notification) {
134
+ // If we're not in a pre-fire write fence, we don't have to do anything.
135
+ var fence = writefence_1._WriteFence._CurrentWriteFence;
136
+ if (!fence || fence.fired)
137
+ return;
138
+ if (fence._oplogObserveDrivers) {
139
+ fence._oplogObserveDrivers[self._id] = self;
140
+ return;
141
+ }
142
+ fence._oplogObserveDrivers = {};
143
+ fence._oplogObserveDrivers[self._id] = self;
144
+ fence.onBeforeFire(async () => {
145
+ var drivers = fence._oplogObserveDrivers;
146
+ delete fence._oplogObserveDrivers;
147
+ // This fence cannot fire until we've caught up to "this point" in the
148
+ // oplog, and all observers made it back to the steady state.
149
+ await self._mongoHandle._oplogHandle.waitUntilCaughtUp();
150
+ for (const driver of Object.values(drivers)) {
151
+ if (driver._stopped)
152
+ return;
153
+ var write = fence.beginWrite();
154
+ if (driver._phase === PHASE.STEADY) {
155
+ // Make sure that all of the callbacks have made it through the
156
+ // multiplexer and been delivered to ObserveHandles before committing
157
+ // writes.
158
+ driver._multiplexer.onFlush(function () {
159
+ write.committed();
160
+ });
161
+ }
162
+ else {
163
+ driver._writesToCommitWhenWeReachSteady.push(write);
164
+ }
165
+ }
166
+ });
167
+ }));
168
+ // When Mongo fails over, we need to repoll the query, in case we processed an
169
+ // oplog entry that got rolled back.
170
+ self._stopHandles.push(self._mongoHandle._onFailover(finishIfNeedToPollQuery(function () {
171
+ self._needToPollQuery();
172
+ })));
173
+ // Give _observeChanges a chance to add the new ObserveHandle to our
174
+ // multiplexer, so that the added calls get streamed.
175
+ setImmediate(finishIfNeedToPollQuery(function () {
176
+ self._runInitialQuery();
177
+ }));
178
+ }
179
+ _addPublished(id, doc) {
180
+ var self = this;
181
+ //Meteor._noYieldsAllowed(function () {
182
+ var fields = Object.assign({}, doc);
183
+ delete fields._id;
184
+ self._published.set(id, self._sharedProjectionFn(doc));
185
+ self._multiplexer.added(id, self._projectionFn(fields));
186
+ // After adding this document, the published set might be overflowed
187
+ // (exceeding capacity specified by limit). If so, push the maximum
188
+ // element to the buffer, we might want to save it in memory to reduce the
189
+ // amount of Mongo lookups in the future.
190
+ if (self._limit && self._published.size() > self._limit) {
191
+ // XXX in theory the size of published is no more than limit+1
192
+ if (self._published.size() !== self._limit + 1) {
193
+ throw new Error("After adding to published, " +
194
+ (self._published.size() - self._limit) +
195
+ " documents are overflowing the set");
196
+ }
197
+ var overflowingDocId = self._published.maxElementId();
198
+ var overflowingDoc = self._published.get(overflowingDocId);
199
+ if ((0, ejson_1.equals)(overflowingDocId, id)) {
200
+ throw new Error("The document just added is overflowing the published set");
201
+ }
202
+ self._published.remove(overflowingDocId);
203
+ self._multiplexer.removed(overflowingDocId);
204
+ self._addBuffered(overflowingDocId, overflowingDoc);
205
+ }
206
+ //});
207
+ }
208
+ _removePublished(id) {
209
+ var self = this;
210
+ //Meteor._noYieldsAllowed(function () {
211
+ self._published.remove(id);
212
+ self._multiplexer.removed(id);
213
+ if (!self._limit || self._published.size() === self._limit)
214
+ return;
215
+ if (self._published.size() > self._limit)
216
+ throw Error("self._published got too big");
217
+ // OK, we are publishing less than the limit. Maybe we should look in the
218
+ // buffer to find the next element past what we were publishing before.
219
+ if (!self._unpublishedBuffer.empty()) {
220
+ // There's something in the buffer; move the first thing in it to
221
+ // _published.
222
+ var newDocId = self._unpublishedBuffer.minElementId();
223
+ var newDoc = self._unpublishedBuffer.get(newDocId);
224
+ self._removeBuffered(newDocId);
225
+ self._addPublished(newDocId, newDoc);
226
+ return;
227
+ }
228
+ // There's nothing in the buffer. This could mean one of a few things.
229
+ // (a) We could be in the middle of re-running the query (specifically, we
230
+ // could be in _publishNewResults). In that case, _unpublishedBuffer is
231
+ // empty because we clear it at the beginning of _publishNewResults. In
232
+ // this case, our caller already knows the entire answer to the query and
233
+ // we don't need to do anything fancy here. Just return.
234
+ if (self._phase === PHASE.QUERYING)
235
+ return;
236
+ // (b) We're pretty confident that the union of _published and
237
+ // _unpublishedBuffer contain all documents that match selector. Because
238
+ // _unpublishedBuffer is empty, that means we're confident that _published
239
+ // contains all documents that match selector. So we have nothing to do.
240
+ if (self._safeAppendToBuffer)
241
+ return;
242
+ // (c) Maybe there are other documents out there that should be in our
243
+ // buffer. But in that case, when we emptied _unpublishedBuffer in
244
+ // _removeBuffered, we should have called _needToPollQuery, which will
245
+ // either put something in _unpublishedBuffer or set _safeAppendToBuffer
246
+ // (or both), and it will put us in QUERYING for that whole time. So in
247
+ // fact, we shouldn't be able to get here.
248
+ throw new Error("Buffer inexplicably empty");
249
+ //});
250
+ }
251
+ _changePublished(id, oldDoc, newDoc) {
252
+ var self = this;
253
+ //Meteor._noYieldsAllowed(function () {
254
+ self._published.set(id, self._sharedProjectionFn(newDoc));
255
+ var projectedNew = self._projectionFn(newDoc);
256
+ var projectedOld = self._projectionFn(oldDoc);
257
+ var changed = diff_1.DiffSequence.makeChangedFields(projectedNew, projectedOld);
258
+ if (Object.keys(changed).length === 0)
259
+ self._multiplexer.changed(id, changed);
260
+ //});
261
+ }
262
+ _addBuffered(id, doc) {
263
+ var self = this;
264
+ //Meteor._noYieldsAllowed(function () {
265
+ self._unpublishedBuffer.set(id, self._sharedProjectionFn(doc));
266
+ // If something is overflowing the buffer, we just remove it from cache
267
+ if (self._unpublishedBuffer.size() > self._limit) {
268
+ var maxBufferedId = self._unpublishedBuffer.maxElementId();
269
+ self._unpublishedBuffer.remove(maxBufferedId);
270
+ // Since something matching is removed from cache (both published set and
271
+ // buffer), set flag to false
272
+ self._safeAppendToBuffer = false;
273
+ }
274
+ //});
275
+ }
276
+ // Is called either to remove the doc completely from matching set or to move
277
+ // it to the published set later.
278
+ _removeBuffered(id) {
279
+ var self = this;
280
+ //Meteor._noYieldsAllowed(function () {
281
+ self._unpublishedBuffer.remove(id);
282
+ // To keep the contract "buffer is never empty in STEADY phase unless the
283
+ // everything matching fits into published" true, we poll everything as
284
+ // soon as we see the buffer becoming empty.
285
+ if (!self._unpublishedBuffer.size() && !self._safeAppendToBuffer)
286
+ self._needToPollQuery();
287
+ //});
288
+ }
289
+ // Called when a document has joined the "Matching" results set.
290
+ // Takes responsibility of keeping _unpublishedBuffer in sync with _published
291
+ // and the effect of limit enforced.
292
+ _addMatching(doc) {
293
+ var self = this;
294
+ //Meteor._noYieldsAllowed(function () {
295
+ var id = doc._id;
296
+ if (self._published.has(id))
297
+ throw Error("tried to add something already published " + id);
298
+ if (self._limit && self._unpublishedBuffer.has(id))
299
+ throw Error("tried to add something already existed in buffer " + id);
300
+ var limit = self._limit;
301
+ var comparator = self._comparator;
302
+ var maxPublished = (limit && self._published.size() > 0)
303
+ ? self._published.get(self._published.maxElementId()) // published is MaxHeap because limit is defined
304
+ : null;
305
+ var maxBuffered = (limit && self._unpublishedBuffer.size() > 0)
306
+ ? self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId())
307
+ : null;
308
+ // The query is unlimited or didn't publish enough documents yet or the
309
+ // new document would fit into published set pushing the maximum element
310
+ // out, then we need to publish the doc.
311
+ var toPublish = !limit || self._published.size() < limit ||
312
+ comparator(doc, maxPublished) < 0;
313
+ // Otherwise we might need to buffer it (only in case of limited query).
314
+ // Buffering is allowed if the buffer is not filled up yet and all
315
+ // matching docs are either in the published set or in the buffer.
316
+ var canAppendToBuffer = !toPublish && self._safeAppendToBuffer &&
317
+ self._unpublishedBuffer.size() < limit;
318
+ // Or if it is small enough to be safely inserted to the middle or the
319
+ // beginning of the buffer.
320
+ var canInsertIntoBuffer = !toPublish && maxBuffered &&
321
+ comparator(doc, maxBuffered) <= 0;
322
+ var toBuffer = canAppendToBuffer || canInsertIntoBuffer;
323
+ if (toPublish) {
324
+ self._addPublished(id, doc);
325
+ }
326
+ else if (toBuffer) {
327
+ self._addBuffered(id, doc);
328
+ }
329
+ else {
330
+ // dropping it and not saving to the cache
331
+ self._safeAppendToBuffer = false;
332
+ }
333
+ //});
334
+ }
335
+ // Called when a document leaves the "Matching" results set.
336
+ // Takes responsibility of keeping _unpublishedBuffer in sync with _published
337
+ // and the effect of limit enforced.
338
+ _removeMatching(id) {
339
+ var self = this;
340
+ //Meteor._noYieldsAllowed(function () {
341
+ if (!self._published.has(id) && !self._limit)
342
+ throw Error("tried to remove something matching but not cached " + id);
343
+ if (self._published.has(id)) {
344
+ self._removePublished(id);
345
+ }
346
+ else if (self._unpublishedBuffer.has(id)) {
347
+ self._removeBuffered(id);
348
+ }
349
+ //});
350
+ }
351
+ _handleDoc(id, newDoc) {
352
+ var self = this;
353
+ //Meteor._noYieldsAllowed(function () {
354
+ var matchesNow = newDoc && self._matcher.documentMatches(newDoc).result;
355
+ var publishedBefore = self._published.has(id);
356
+ var bufferedBefore = self._limit && self._unpublishedBuffer.has(id);
357
+ var cachedBefore = publishedBefore || bufferedBefore;
358
+ if (matchesNow && !cachedBefore) {
359
+ self._addMatching(newDoc);
360
+ }
361
+ else if (cachedBefore && !matchesNow) {
362
+ self._removeMatching(id);
363
+ }
364
+ else if (cachedBefore && matchesNow) {
365
+ var oldDoc = self._published.get(id);
366
+ var comparator = self._comparator;
367
+ var minBuffered = self._limit && self._unpublishedBuffer.size() &&
368
+ self._unpublishedBuffer.get(self._unpublishedBuffer.minElementId());
369
+ var maxBuffered;
370
+ if (publishedBefore) {
371
+ // Unlimited case where the document stays in published once it
372
+ // matches or the case when we don't have enough matching docs to
373
+ // publish or the changed but matching doc will stay in published
374
+ // anyways.
375
+ //
376
+ // XXX: We rely on the emptiness of buffer. Be sure to maintain the
377
+ // fact that buffer can't be empty if there are matching documents not
378
+ // published. Notably, we don't want to schedule repoll and continue
379
+ // relying on this property.
380
+ var staysInPublished = !self._limit ||
381
+ self._unpublishedBuffer.size() === 0 ||
382
+ comparator(newDoc, minBuffered) <= 0;
383
+ if (staysInPublished) {
384
+ self._changePublished(id, oldDoc, newDoc);
385
+ }
386
+ else {
387
+ // after the change doc doesn't stay in the published, remove it
388
+ self._removePublished(id);
389
+ // but it can move into buffered now, check it
390
+ maxBuffered = self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId());
391
+ var toBuffer = self._safeAppendToBuffer ||
392
+ (maxBuffered && comparator(newDoc, maxBuffered) <= 0);
393
+ if (toBuffer) {
394
+ self._addBuffered(id, newDoc);
395
+ }
396
+ else {
397
+ // Throw away from both published set and buffer
398
+ self._safeAppendToBuffer = false;
399
+ }
400
+ }
401
+ }
402
+ else if (bufferedBefore) {
403
+ oldDoc = self._unpublishedBuffer.get(id);
404
+ // remove the old version manually instead of using _removeBuffered so
405
+ // we don't trigger the querying immediately. if we end this block
406
+ // with the buffer empty, we will need to trigger the query poll
407
+ // manually too.
408
+ self._unpublishedBuffer.remove(id);
409
+ // published is MaxHeap because bufferedBefore is only set when limit is defined
410
+ var maxPublished = self._published.get(self._published.maxElementId());
411
+ maxBuffered = self._unpublishedBuffer.size() && self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId());
412
+ // the buffered doc was updated, it could move to published
413
+ var toPublish = comparator(newDoc, maxPublished) < 0;
414
+ // or stays in buffer even after the change
415
+ var staysInBuffer = (!toPublish && self._safeAppendToBuffer) ||
416
+ (!toPublish && maxBuffered &&
417
+ comparator(newDoc, maxBuffered) <= 0);
418
+ if (toPublish) {
419
+ self._addPublished(id, newDoc);
420
+ }
421
+ else if (staysInBuffer) {
422
+ // stays in buffer but changes
423
+ self._unpublishedBuffer.set(id, newDoc);
424
+ }
425
+ else {
426
+ // Throw away from both published set and buffer
427
+ self._safeAppendToBuffer = false;
428
+ // Normally this check would have been done in _removeBuffered but
429
+ // we didn't use it, so we need to do it ourself now.
430
+ if (!self._unpublishedBuffer.size()) {
431
+ self._needToPollQuery();
432
+ }
433
+ }
434
+ }
435
+ else {
436
+ throw new Error("cachedBefore implies either of publishedBefore or bufferedBefore is true.");
437
+ }
438
+ }
439
+ //});
440
+ }
441
+ _fetchModifiedDocuments() {
442
+ var self = this;
443
+ //Meteor._noYieldsAllowed(function () {
444
+ self._registerPhaseChange(PHASE.FETCHING);
445
+ // Defer, because nothing called from the oplog entry handler may yield,
446
+ // but fetch() yields.
447
+ setImmediate(finishIfNeedToPollQuery(async () => {
448
+ while (!self._stopped && self._needToFetch.size > 0) {
449
+ if (self._phase === PHASE.QUERYING) {
450
+ // While fetching, we decided to go into QUERYING mode, and then we
451
+ // saw another oplog entry, so _needToFetch is not empty. But we
452
+ // shouldn't fetch these documents until AFTER the query is done.
453
+ break;
454
+ }
455
+ // Being in steady phase here would be surprising.
456
+ if (self._phase !== PHASE.FETCHING)
457
+ throw new Error("phase in fetchModifiedDocuments: " + self._phase);
458
+ self._currentlyFetching = self._needToFetch;
459
+ var thisGeneration = ++self._fetchGeneration;
460
+ self._needToFetch = new Map();
461
+ var waiting = 0;
462
+ var fut = { promise: undefined, resolve: undefined };
463
+ fut.promise = new Promise(r => fut.resolve = r);
464
+ // This loop is safe, because _currentlyFetching will not be updated
465
+ // during this loop (in fact, it is never mutated).
466
+ self._currentlyFetching.forEach((op, id) => {
467
+ waiting++;
468
+ self._mongoHandle._docFetcher.fetch(self._cursorDescription.collectionName, id, op, finishIfNeedToPollQuery((err, doc) => {
469
+ try {
470
+ if (err) {
471
+ console.log("Got exception while fetching documents", err);
472
+ // If we get an error from the fetcher (eg, trouble
473
+ // connecting to Mongo), let's just abandon the fetch phase
474
+ // altogether and fall back to polling. It's not like we're
475
+ // getting live updates anyway.
476
+ if (self._phase !== PHASE.QUERYING) {
477
+ self._needToPollQuery();
478
+ }
479
+ }
480
+ else if (!self._stopped && self._phase === PHASE.FETCHING
481
+ && self._fetchGeneration === thisGeneration) {
482
+ // We re-check the generation in case we've had an explicit
483
+ // _pollQuery call (eg, in another fiber) which should
484
+ // effectively cancel this round of fetches. (_pollQuery
485
+ // increments the generation.)
486
+ self._handleDoc(id, doc);
487
+ }
488
+ }
489
+ finally {
490
+ waiting--;
491
+ // Because fetch() never calls its callback synchronously,
492
+ // this is safe (ie, we won't call fut.return() before the
493
+ // forEach is done).
494
+ if (waiting === 0)
495
+ fut.resolve();
496
+ }
497
+ }));
498
+ });
499
+ await fut.promise();
500
+ // Exit now if we've had a _pollQuery call (here or in another fiber).
501
+ if (self._phase === PHASE.QUERYING)
502
+ return;
503
+ self._currentlyFetching = null;
504
+ }
505
+ // We're done fetching, so we can be steady, unless we've had a
506
+ // _pollQuery call (here or in another fiber).
507
+ if (self._phase !== PHASE.QUERYING)
508
+ self._beSteady();
509
+ }));
510
+ //});
511
+ }
512
+ _beSteady() {
513
+ var self = this;
514
+ //Meteor._noYieldsAllowed(function () {
515
+ self._registerPhaseChange(PHASE.STEADY);
516
+ var writes = self._writesToCommitWhenWeReachSteady;
517
+ self._writesToCommitWhenWeReachSteady = [];
518
+ self._multiplexer.onFlush(function () {
519
+ for (const w of writes) {
520
+ w.committed();
521
+ }
522
+ });
523
+ //});
524
+ }
525
+ _handleOplogEntryQuerying(op) {
526
+ var self = this;
527
+ //Meteor._noYieldsAllowed(function () {
528
+ self._needToFetch.set((0, oplog_tailing_1.idForOp)(op), op);
529
+ //});
530
+ }
531
+ _handleOplogEntrySteadyOrFetching(op) {
532
+ var self = this;
533
+ //Meteor._noYieldsAllowed(function () {
534
+ var id = (0, oplog_tailing_1.idForOp)(op);
535
+ // If we're already fetching this one, or about to, we can't optimize;
536
+ // make sure that we fetch it again if necessary.
537
+ if (self._phase === PHASE.FETCHING &&
538
+ ((self._currentlyFetching && self._currentlyFetching.has(id)) ||
539
+ self._needToFetch.has(id))) {
540
+ self._needToFetch.set(id, op);
541
+ return;
542
+ }
543
+ if (op.op === 'd') {
544
+ if (self._published.has(id) ||
545
+ (self._limit && self._unpublishedBuffer.has(id)))
546
+ self._removeMatching(id);
547
+ }
548
+ else if (op.op === 'i') {
549
+ if (self._published.has(id))
550
+ throw new Error("insert found for already-existing ID in published");
551
+ if (self._unpublishedBuffer && self._unpublishedBuffer.has(id))
552
+ throw new Error("insert found for already-existing ID in buffer");
553
+ // XXX what if selector yields? for now it can't but later it could
554
+ // have $where
555
+ if (self._matcher.documentMatches(op.o).result)
556
+ self._addMatching(op.o);
557
+ }
558
+ else if (op.op === 'u') {
559
+ // we are mapping the new oplog format on mongo 5
560
+ // to what we know better, $set
561
+ op.o = (0, oplog_v2_converter_1.oplogV2V1Converter)(op.o);
562
+ // Is this a modifier ($set/$unset, which may require us to poll the
563
+ // database to figure out if the whole document matches the selector) or
564
+ // a replacement (in which case we can just directly re-evaluate the
565
+ // selector)?
566
+ // oplog format has changed on mongodb 5, we have to support both now
567
+ // diff is the format in Mongo 5+ (oplog v2)
568
+ var isReplace = !op.o.hasOwnProperty('$set') && !op.o.hasOwnProperty('diff') && !op.o.hasOwnProperty('$unset');
569
+ // If this modifier modifies something inside an EJSON custom type (ie,
570
+ // anything with EJSON$), then we can't try to use
571
+ // LocalCollection._modify, since that just mutates the EJSON encoding,
572
+ // not the actual object.
573
+ var canDirectlyModifyDoc = !isReplace && modifierCanBeDirectlyApplied(op.o);
574
+ var publishedBefore = self._published.has(id);
575
+ var bufferedBefore = self._limit && self._unpublishedBuffer.has(id);
576
+ if (isReplace) {
577
+ self._handleDoc(id, Object.assign({ _id: id }, op.o));
578
+ }
579
+ else if ((publishedBefore || bufferedBefore) &&
580
+ canDirectlyModifyDoc) {
581
+ // Oh great, we actually know what the document is, so we can apply
582
+ // this directly.
583
+ var newDoc = self._published.has(id)
584
+ ? self._published.get(id) : self._unpublishedBuffer.get(id);
585
+ newDoc = (0, ejson_1.clone)(newDoc);
586
+ newDoc._id = id;
587
+ try {
588
+ (0, minimongo_common_1._modify)(newDoc, op.o);
589
+ }
590
+ catch (e) {
591
+ if (e.name !== "MinimongoError")
592
+ throw e;
593
+ // We didn't understand the modifier. Re-fetch.
594
+ self._needToFetch.set(id, op);
595
+ if (self._phase === PHASE.STEADY) {
596
+ self._fetchModifiedDocuments();
597
+ }
598
+ return;
599
+ }
600
+ self._handleDoc(id, self._sharedProjectionFn(newDoc));
601
+ }
602
+ else if (!canDirectlyModifyDoc ||
603
+ self._matcher.canBecomeTrueByModifier(op.o) ||
604
+ (self._sorter && self._sorter.affectedByModifier(op.o))) {
605
+ self._needToFetch.set(id, op);
606
+ if (self._phase === PHASE.STEADY)
607
+ self._fetchModifiedDocuments();
608
+ }
609
+ }
610
+ else {
611
+ throw Error("XXX SURPRISING OPERATION: " + op);
612
+ }
613
+ //});
614
+ }
615
+ // Yields!
616
+ _runInitialQuery() {
617
+ var self = this;
618
+ if (self._stopped)
619
+ throw new Error("oplog stopped surprisingly early");
620
+ self._runQuery({ initial: true }); // yields
621
+ if (self._stopped)
622
+ return; // can happen on queryError
623
+ // Allow observeChanges calls to return. (After this, it's possible for
624
+ // stop() to be called.)
625
+ self._multiplexer.ready();
626
+ self._doneQuerying(); // yields
627
+ }
628
+ // In various circumstances, we may just want to stop processing the oplog and
629
+ // re-run the initial query, just as if we were a PollingObserveDriver.
630
+ //
631
+ // This function may not block, because it is called from an oplog entry
632
+ // handler.
633
+ //
634
+ // XXX We should call this when we detect that we've been in FETCHING for "too
635
+ // long".
636
+ //
637
+ // XXX We should call this when we detect Mongo failover (since that might
638
+ // mean that some of the oplog entries we have processed have been rolled
639
+ // back). The Node Mongo driver is in the middle of a bunch of huge
640
+ // refactorings, including the way that it notifies you when primary
641
+ // changes. Will put off implementing this until driver 1.4 is out.
642
+ _pollQuery() {
643
+ var self = this;
644
+ //Meteor._noYieldsAllowed(function () {
645
+ if (self._stopped)
646
+ return;
647
+ // Yay, we get to forget about all the things we thought we had to fetch.
648
+ self._needToFetch = new Map();
649
+ self._currentlyFetching = null;
650
+ ++self._fetchGeneration; // ignore any in-flight fetches
651
+ self._registerPhaseChange(PHASE.QUERYING);
652
+ // Defer so that we don't yield. We don't need finishIfNeedToPollQuery
653
+ // here because SwitchedToQuery is not thrown in QUERYING mode.
654
+ setImmediate(async () => {
655
+ await self._runQuery();
656
+ await self._doneQuerying();
657
+ });
658
+ //});
659
+ }
660
+ // Yields!
661
+ async _runQuery(options) {
662
+ var self = this;
663
+ options = options || {};
664
+ var newResults, newBuffer;
665
+ // This while loop is just to retry failures.
666
+ while (true) {
667
+ // If we've been stopped, we don't have to run anything any more.
668
+ if (self._stopped)
669
+ return;
670
+ newResults = new id_map_1.IdMap();
671
+ newBuffer = new id_map_1.IdMap();
672
+ // Query 2x documents as the half excluded from the original query will go
673
+ // into unpublished buffer to reduce additional Mongo lookups in cases
674
+ // when documents are removed from the published set and need a
675
+ // replacement.
676
+ // XXX needs more thought on non-zero skip
677
+ // XXX 2 is a "magic number" meaning there is an extra chunk of docs for
678
+ // buffer if such is needed.
679
+ var cursor = self._cursorForQuery({ limit: self._limit * 2 });
680
+ try {
681
+ await cursor.forEach((doc, i) => {
682
+ if (!self._limit || i < self._limit) {
683
+ newResults.set(doc._id, doc);
684
+ }
685
+ else {
686
+ newBuffer.set(doc._id, doc);
687
+ }
688
+ });
689
+ break;
690
+ }
691
+ catch (e) {
692
+ if (options.initial && typeof (e.code) === 'number') {
693
+ // This is an error document sent to us by mongod, not a connection
694
+ // error generated by the client. And we've never seen this query work
695
+ // successfully. Probably it's a bad selector or something, so we
696
+ // should NOT retry. Instead, we should halt the observe (which ends
697
+ // up calling `stop` on us).
698
+ self._multiplexer.queryError(e);
699
+ return;
700
+ }
701
+ // During failover (eg) if we get an exception we should log and retry
702
+ // instead of crashing.
703
+ console.log("Got exception while polling query during failover", e);
704
+ await (0, oplog_tailing_1._sleepForMs)(100);
705
+ }
706
+ }
707
+ if (self._stopped)
708
+ return;
709
+ self._publishNewResults(newResults, newBuffer);
710
+ }
711
+ // Transitions to QUERYING and runs another query, or (if already in QUERYING)
712
+ // ensures that we will query again later.
713
+ //
714
+ // This function may not block, because it is called from an oplog entry
715
+ // handler. However, if we were not already in the QUERYING phase, it throws
716
+ // an exception that is caught by the closest surrounding
717
+ // finishIfNeedToPollQuery call; this ensures that we don't continue running
718
+ // close that was designed for another phase inside PHASE.QUERYING.
719
+ //
720
+ // (It's also necessary whenever logic in this file yields to check that other
721
+ // phases haven't put us into QUERYING mode, though; eg,
722
+ // _fetchModifiedDocuments does this.)
723
+ _needToPollQuery() {
724
+ var self = this;
725
+ //Meteor._noYieldsAllowed(function () {
726
+ if (self._stopped)
727
+ return;
728
+ // If we're not already in the middle of a query, we can query now
729
+ // (possibly pausing FETCHING).
730
+ if (self._phase !== PHASE.QUERYING) {
731
+ self._pollQuery();
732
+ throw new SwitchedToQuery;
733
+ }
734
+ // We're currently in QUERYING. Set a flag to ensure that we run another
735
+ // query when we're done.
736
+ self._requeryWhenDoneThisQuery = true;
737
+ //});
738
+ }
739
+ // Yields!
740
+ async _doneQuerying() {
741
+ var self = this;
742
+ if (self._stopped)
743
+ return;
744
+ await self._mongoHandle._oplogHandle.waitUntilCaughtUp(); // yields
745
+ if (self._stopped)
746
+ return;
747
+ if (self._phase !== PHASE.QUERYING)
748
+ throw Error("Phase unexpectedly " + self._phase);
749
+ //Meteor._noYieldsAllowed(function () {
750
+ if (self._requeryWhenDoneThisQuery) {
751
+ self._requeryWhenDoneThisQuery = false;
752
+ self._pollQuery();
753
+ }
754
+ else if (self._needToFetch.size === 0) {
755
+ self._beSteady();
756
+ }
757
+ else {
758
+ self._fetchModifiedDocuments();
759
+ }
760
+ //});
761
+ }
762
+ _cursorForQuery(optionsOverwrite) {
763
+ //return Meteor._noYieldsAllowed(function () {
764
+ // The query we run is almost the same as the cursor we are observing,
765
+ // with a few changes. We need to read all the fields that are relevant to
766
+ // the selector, not just the fields we are going to publish (that's the
767
+ // "shared" projection). And we don't want to apply any transform in the
768
+ // cursor, because observeChanges shouldn't use the transform.
769
+ var options = Object.assign(Object.assign({}, this._cursorDescription.options), optionsOverwrite);
770
+ options.projection = this._sharedProjection;
771
+ delete options.transform;
772
+ // We are NOT deep cloning fields or selector here, which should be OK.
773
+ const description = new live_cursor_1.CursorDescription(this._cursorDescription.collectionName, this._cursorDescription.selector, options);
774
+ const dbCursor = this._mongoHandle.db.collection(description.collectionName).find(description.selector, options);
775
+ return new synchronous_cursor_1.SynchronousCursor(dbCursor, this._cursorDescription, { useTransform: true });
776
+ //});
777
+ }
778
+ // Replace self._published with newResults (both are IdMaps), invoking observe
779
+ // callbacks on the multiplexer.
780
+ // Replace self._unpublishedBuffer with newBuffer.
781
+ //
782
+ // XXX This is very similar to LocalCollection._diffQueryUnorderedChanges. We
783
+ // should really: (a) Unify IdMap and OrderedDict into Unordered/OrderedDict
784
+ // (b) Rewrite diff.js to use these classes instead of arrays and objects.
785
+ _publishNewResults(newResults, newBuffer) {
786
+ var self = this;
787
+ //Meteor._noYieldsAllowed(function () {
788
+ // If the query is limited and there is a buffer, shut down so it doesn't
789
+ // stay in a way.
790
+ if (self._limit) {
791
+ self._unpublishedBuffer.clear();
792
+ }
793
+ // First remove anything that's gone. Be careful not to modify
794
+ // self._published while iterating over it.
795
+ var idsToRemove = [];
796
+ self._published.forEach(function (doc, id) {
797
+ if (!newResults.has(id))
798
+ idsToRemove.push(id);
799
+ });
800
+ for (const id of idsToRemove) {
801
+ self._removePublished(id);
802
+ }
803
+ // Now do adds and changes.
804
+ // If self has a buffer and limit, the new fetched result will be
805
+ // limited correctly as the query has sort specifier.
806
+ newResults.forEach(function (doc, id) {
807
+ self._handleDoc(id, doc);
808
+ });
809
+ // Sanity-check that everything we tried to put into _published ended up
810
+ // there.
811
+ // XXX if this is slow, remove it later
812
+ if (self._published.size() !== newResults.size()) {
813
+ console.error('The Mongo server and the Meteor query disagree on how ' +
814
+ 'many documents match your query. Cursor description: ', self._cursorDescription);
815
+ throw Error("The Mongo server and the Meteor query disagree on how " +
816
+ "many documents match your query. Maybe it is hitting a Mongo " +
817
+ "edge case? The query is: " +
818
+ (0, ejson_1.stringify)(self._cursorDescription.selector));
819
+ }
820
+ self._published.forEach(function (doc, id) {
821
+ if (!newResults.has(id))
822
+ throw Error("_published has a doc that newResults doesn't; " + id);
823
+ });
824
+ // Finally, replace the buffer
825
+ newBuffer.forEach(function (doc, id) {
826
+ self._addBuffered(id, doc);
827
+ });
828
+ self._safeAppendToBuffer = newBuffer.size() < self._limit;
829
+ //});
830
+ }
831
+ // This stop function is invoked from the onStop of the ObserveMultiplexer, so
832
+ // it shouldn't actually be possible to call it until the multiplexer is
833
+ // ready.
834
+ //
835
+ // It's important to check self._stopped after every call in this file that
836
+ // can yield!
837
+ stop() {
838
+ var self = this;
839
+ if (self._stopped)
840
+ return;
841
+ self._stopped = true;
842
+ for (const handle of self._stopHandles) {
843
+ handle.stop();
844
+ }
845
+ // Note: we *don't* use multiplexer.onFlush here because this stop
846
+ // callback is actually invoked by the multiplexer itself when it has
847
+ // determined that there are no handles left. So nothing is actually going
848
+ // to get flushed (and it's probably not valid to call methods on the
849
+ // dying multiplexer).
850
+ for (const w of self._writesToCommitWhenWeReachSteady) {
851
+ w.committed(); // maybe yields?
852
+ }
853
+ self._writesToCommitWhenWeReachSteady = null;
854
+ // Proactively drop references to potentially big things.
855
+ self._published = null;
856
+ self._unpublishedBuffer = null;
857
+ self._needToFetch = null;
858
+ self._currentlyFetching = null;
859
+ // not used?
860
+ //self._oplogEntryHandle = null;
861
+ //self._listenersHandle = null;
862
+ }
863
+ _registerPhaseChange(phase) {
864
+ var self = this;
865
+ //Meteor._noYieldsAllowed(function () {
866
+ var now = new Date;
867
+ self._phase = phase;
868
+ self._phaseStartTime = now;
869
+ //});
870
+ }
871
+ // Does our oplog tailing code support this cursor? For now, we are being very
872
+ // conservative and allowing only simple queries with simple options.
873
+ // (This is a "static method".)
874
+ static cursorSupported(cursorDescription, matcher) {
875
+ // First, check the options.
876
+ var options = cursorDescription.options;
877
+ // Did the user say no explicitly?
878
+ if (options.disableOplog)
879
+ return false;
880
+ // skip is not supported: to support it we would need to keep track of all
881
+ // "skipped" documents or at least their ids.
882
+ // limit w/o a sort specifier is not supported: current implementation needs a
883
+ // deterministic way to order documents.
884
+ if (options.skip || (options.limit && !options.sort))
885
+ return false;
886
+ // If a fields projection option is given check if it is supported by
887
+ // minimongo (some operators are not supported).
888
+ if (options.projection) {
889
+ try {
890
+ (0, minimongo_common_1._checkSupportedProjection)(options.projection);
891
+ }
892
+ catch (e) {
893
+ if (e.name === "MinimongoError") {
894
+ return false;
895
+ }
896
+ else {
897
+ throw e;
898
+ }
899
+ }
900
+ }
901
+ // We don't allow the following selectors:
902
+ // - $where (not confident that we provide the same JS environment
903
+ // as Mongo, and can yield!)
904
+ // - $near (has "interesting" properties in MongoDB, like the possibility
905
+ // of returning an ID multiple times, though even polling maybe
906
+ // have a bug there)
907
+ // XXX: once we support it, we would need to think more on how we
908
+ // initialize the comparators when we create the driver.
909
+ return !matcher.hasWhere() && !matcher.hasGeoQuery();
910
+ }
911
+ }
912
+ exports.OplogObserveDriver = OplogObserveDriver;
913
+ var modifierCanBeDirectlyApplied = function (modifier) {
914
+ return Object.entries(modifier).every(([operation, fields]) => {
915
+ return Object.entries(fields).every(([field, value]) => {
916
+ return !/EJSON\$/.test(field);
917
+ });
918
+ });
919
+ };