mongodb-livedata-server 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/README.md +2 -2
  2. package/dist/livedata_server.d.ts +4 -4
  3. package/dist/livedata_server.js +11 -11
  4. package/dist/meteor/binary-heap/max_heap.d.ts +31 -31
  5. package/dist/meteor/binary-heap/max_heap.js +186 -186
  6. package/dist/meteor/binary-heap/min_heap.d.ts +6 -6
  7. package/dist/meteor/binary-heap/min_heap.js +17 -17
  8. package/dist/meteor/binary-heap/min_max_heap.d.ts +11 -11
  9. package/dist/meteor/binary-heap/min_max_heap.js +48 -48
  10. package/dist/meteor/callback-hook/hook.d.ts +11 -11
  11. package/dist/meteor/callback-hook/hook.js +78 -78
  12. package/dist/meteor/ddp/crossbar.d.ts +15 -15
  13. package/dist/meteor/ddp/crossbar.js +136 -136
  14. package/dist/meteor/ddp/heartbeat.d.ts +19 -19
  15. package/dist/meteor/ddp/heartbeat.js +77 -77
  16. package/dist/meteor/ddp/livedata_server.d.ts +141 -142
  17. package/dist/meteor/ddp/livedata_server.js +403 -403
  18. package/dist/meteor/ddp/method-invocation.d.ts +35 -35
  19. package/dist/meteor/ddp/method-invocation.js +72 -72
  20. package/dist/meteor/ddp/random-stream.d.ts +8 -8
  21. package/dist/meteor/ddp/random-stream.js +100 -100
  22. package/dist/meteor/ddp/session-collection-view.d.ts +20 -20
  23. package/dist/meteor/ddp/session-collection-view.js +106 -106
  24. package/dist/meteor/ddp/session-document-view.d.ts +8 -8
  25. package/dist/meteor/ddp/session-document-view.js +82 -82
  26. package/dist/meteor/ddp/session.d.ts +75 -75
  27. package/dist/meteor/ddp/session.js +590 -590
  28. package/dist/meteor/ddp/stream_server.d.ts +20 -21
  29. package/dist/meteor/ddp/stream_server.js +181 -181
  30. package/dist/meteor/ddp/subscription.d.ts +94 -94
  31. package/dist/meteor/ddp/subscription.js +370 -370
  32. package/dist/meteor/ddp/utils.d.ts +8 -8
  33. package/dist/meteor/ddp/utils.js +104 -104
  34. package/dist/meteor/ddp/writefence.d.ts +20 -20
  35. package/dist/meteor/ddp/writefence.js +111 -111
  36. package/dist/meteor/diff-sequence/diff.d.ts +17 -17
  37. package/dist/meteor/diff-sequence/diff.js +257 -257
  38. package/dist/meteor/ejson/ejson.d.ts +82 -82
  39. package/dist/meteor/ejson/ejson.js +568 -569
  40. package/dist/meteor/ejson/stringify.d.ts +2 -2
  41. package/dist/meteor/ejson/stringify.js +119 -119
  42. package/dist/meteor/ejson/utils.d.ts +12 -12
  43. package/dist/meteor/ejson/utils.js +42 -42
  44. package/dist/meteor/mongo/caching_change_observer.d.ts +16 -16
  45. package/dist/meteor/mongo/caching_change_observer.js +63 -63
  46. package/dist/meteor/mongo/doc_fetcher.d.ts +7 -7
  47. package/dist/meteor/mongo/doc_fetcher.js +53 -53
  48. package/dist/meteor/mongo/geojson_utils.d.ts +3 -3
  49. package/dist/meteor/mongo/geojson_utils.js +40 -41
  50. package/dist/meteor/mongo/live_connection.d.ts +28 -28
  51. package/dist/meteor/mongo/live_connection.js +264 -264
  52. package/dist/meteor/mongo/live_cursor.d.ts +25 -25
  53. package/dist/meteor/mongo/live_cursor.js +60 -60
  54. package/dist/meteor/mongo/minimongo_common.d.ts +84 -84
  55. package/dist/meteor/mongo/minimongo_common.js +1998 -1998
  56. package/dist/meteor/mongo/minimongo_matcher.d.ts +23 -23
  57. package/dist/meteor/mongo/minimongo_matcher.js +283 -283
  58. package/dist/meteor/mongo/minimongo_sorter.d.ts +16 -16
  59. package/dist/meteor/mongo/minimongo_sorter.js +268 -268
  60. package/dist/meteor/mongo/observe_driver_utils.d.ts +9 -9
  61. package/dist/meteor/mongo/observe_driver_utils.js +72 -73
  62. package/dist/meteor/mongo/observe_multiplexer.d.ts +46 -46
  63. package/dist/meteor/mongo/observe_multiplexer.js +203 -203
  64. package/dist/meteor/mongo/oplog-observe-driver.d.ts +68 -68
  65. package/dist/meteor/mongo/oplog-observe-driver.js +918 -918
  66. package/dist/meteor/mongo/oplog_tailing.d.ts +35 -35
  67. package/dist/meteor/mongo/oplog_tailing.js +352 -352
  68. package/dist/meteor/mongo/oplog_v2_converter.d.ts +1 -1
  69. package/dist/meteor/mongo/oplog_v2_converter.js +125 -126
  70. package/dist/meteor/mongo/polling_observe_driver.d.ts +30 -30
  71. package/dist/meteor/mongo/polling_observe_driver.js +216 -221
  72. package/dist/meteor/mongo/synchronous-cursor.d.ts +17 -17
  73. package/dist/meteor/mongo/synchronous-cursor.js +261 -261
  74. package/dist/meteor/mongo/synchronous-queue.d.ts +13 -13
  75. package/dist/meteor/mongo/synchronous-queue.js +110 -110
  76. package/dist/meteor/ordered-dict/ordered_dict.d.ts +31 -31
  77. package/dist/meteor/ordered-dict/ordered_dict.js +198 -198
  78. package/dist/meteor/random/AbstractRandomGenerator.d.ts +42 -42
  79. package/dist/meteor/random/AbstractRandomGenerator.js +92 -92
  80. package/dist/meteor/random/AleaRandomGenerator.d.ts +13 -13
  81. package/dist/meteor/random/AleaRandomGenerator.js +90 -90
  82. package/dist/meteor/random/NodeRandomGenerator.d.ts +16 -16
  83. package/dist/meteor/random/NodeRandomGenerator.js +42 -42
  84. package/dist/meteor/random/createAleaGenerator.d.ts +2 -2
  85. package/dist/meteor/random/createAleaGenerator.js +32 -32
  86. package/dist/meteor/random/createRandom.d.ts +1 -1
  87. package/dist/meteor/random/createRandom.js +22 -22
  88. package/dist/meteor/random/main.d.ts +1 -1
  89. package/dist/meteor/random/main.js +12 -12
  90. package/dist/meteor/types.d.ts +1 -1
  91. package/dist/meteor/types.js +2 -2
  92. package/package.json +5 -5
@@ -1,918 +1,918 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.OplogObserveDriver = void 0;
4
- const observe_driver_utils_1 = require("./observe_driver_utils");
5
- const oplog_v2_converter_1 = require("./oplog_v2_converter");
6
- const live_cursor_1 = require("./live_cursor");
7
- const writefence_1 = require("../ddp/writefence");
8
- const diff_1 = require("../diff-sequence/diff");
9
- const min_max_heap_1 = require("../binary-heap/min_max_heap");
10
- const max_heap_1 = require("../binary-heap/max_heap");
11
- const minimongo_common_1 = require("./minimongo_common");
12
- const ejson_1 = require("../ejson/ejson");
13
- const oplog_tailing_1 = require("./oplog_tailing");
14
- const synchronous_cursor_1 = require("./synchronous-cursor");
15
- var PHASE;
16
- (function (PHASE) {
17
- PHASE["QUERYING"] = "QUERYING";
18
- PHASE["FETCHING"] = "FETCHING";
19
- PHASE["STEADY"] = "STEADY";
20
- })(PHASE || (PHASE = {}));
21
- // Exception thrown by _needToPollQuery which unrolls the stack up to the
22
- // enclosing call to finishIfNeedToPollQuery.
23
- var SwitchedToQuery = function () { };
24
- var finishIfNeedToPollQuery = function (f) {
25
- return function () {
26
- try {
27
- f.apply(this, arguments);
28
- }
29
- catch (e) {
30
- if (!(e instanceof SwitchedToQuery))
31
- throw e;
32
- }
33
- };
34
- };
35
- var currentId = 0;
36
- // OplogObserveDriver is an alternative to PollingObserveDriver which follows
37
- // the Mongo operation log instead of just re-polling the query. It obeys the
38
- // same simple interface: constructing it starts sending observeChanges
39
- // callbacks (and a ready() invocation) to the ObserveMultiplexer, and you stop
40
- // it by calling the stop() method.
41
- class OplogObserveDriver {
42
- constructor(options) {
43
- var self = this;
44
- self._usesOplog = true; // tests look at this
45
- self._id = currentId;
46
- currentId++;
47
- self._cursorDescription = options.cursorDescription;
48
- self._mongoHandle = options.mongoHandle;
49
- self._multiplexer = options.multiplexer;
50
- if (options.ordered) {
51
- throw Error("OplogObserveDriver only supports unordered observeChanges");
52
- }
53
- var sorter = options.sorter;
54
- // We don't support $near and other geo-queries so it's OK to initialize the
55
- // comparator only once in the constructor.
56
- var comparator = sorter && sorter.getComparator();
57
- if (options.cursorDescription.options.limit) {
58
- // There are several properties ordered driver implements:
59
- // - _limit is a positive number
60
- // - _comparator is a function-comparator by which the query is ordered
61
- // - _unpublishedBuffer is non-null Min/Max Heap,
62
- // the empty buffer in STEADY phase implies that the
63
- // everything that matches the queries selector fits
64
- // into published set.
65
- // - _published - Max Heap (also implements IdMap methods)
66
- var heapOptions = { IdMap: Map };
67
- self._limit = self._cursorDescription.options.limit;
68
- self._comparator = comparator;
69
- self._sorter = sorter;
70
- self._unpublishedBuffer = new min_max_heap_1.MinMaxHeap(comparator, heapOptions);
71
- // We need something that can find Max value in addition to IdMap interface
72
- self._published = new max_heap_1.MaxHeap(comparator, heapOptions);
73
- }
74
- else {
75
- self._limit = 0;
76
- self._comparator = null;
77
- self._sorter = null;
78
- self._unpublishedBuffer = null;
79
- self._published = new Map();
80
- }
81
- // Indicates if it is safe to insert a new document at the end of the buffer
82
- // for this query. i.e. it is known that there are no documents matching the
83
- // selector those are not in published or buffer.
84
- self._safeAppendToBuffer = false;
85
- self._stopped = false;
86
- self._stopHandles = [];
87
- self._registerPhaseChange(PHASE.QUERYING);
88
- self._matcher = options.matcher;
89
- // we are now using projection, not fields in the cursor description even if you pass {fields}
90
- // in the cursor construction
91
- var projection = self._cursorDescription.options.projection || {};
92
- self._projectionFn = (0, minimongo_common_1._compileProjection)(projection);
93
- // Projection function, result of combining important fields for selector and
94
- // existing fields projection
95
- self._sharedProjection = self._matcher.combineIntoProjection(projection);
96
- if (sorter)
97
- self._sharedProjection = sorter.combineIntoProjection(self._sharedProjection);
98
- self._sharedProjectionFn = (0, minimongo_common_1._compileProjection)(self._sharedProjection);
99
- self._needToFetch = new Map();
100
- self._currentlyFetching = null;
101
- self._fetchGeneration = 0;
102
- self._requeryWhenDoneThisQuery = false;
103
- self._writesToCommitWhenWeReachSteady = [];
104
- // If the oplog handle tells us that it skipped some entries (because it got
105
- // behind, say), re-poll.
106
- self._stopHandles.push(self._mongoHandle._oplogHandle.onSkippedEntries(finishIfNeedToPollQuery(function () {
107
- self._needToPollQuery();
108
- })));
109
- (0, observe_driver_utils_1.forEachTrigger)(self._cursorDescription, async (trigger) => {
110
- self._stopHandles.push(await self._mongoHandle._oplogHandle.onOplogEntry(trigger, function (notification) {
111
- //Meteor._noYieldsAllowed(finishIfNeedToPollQuery(function () {
112
- var op = notification.op;
113
- if (notification.dropCollection || notification.dropDatabase) {
114
- // Note: this call is not allowed to block on anything (especially
115
- // on waiting for oplog entries to catch up) because that will block
116
- // onOplogEntry!
117
- self._needToPollQuery();
118
- }
119
- else {
120
- // All other operators should be handled depending on phase
121
- if (self._phase === PHASE.QUERYING) {
122
- self._handleOplogEntryQuerying(op);
123
- }
124
- else {
125
- self._handleOplogEntrySteadyOrFetching(op);
126
- }
127
- }
128
- //}));
129
- }));
130
- });
131
- // XXX ordering w.r.t. everything else?
132
- self._stopHandles.push((0, observe_driver_utils_1.listenAll)(self._cursorDescription, function (notification) {
133
- // If we're not in a pre-fire write fence, we don't have to do anything.
134
- var fence = writefence_1._WriteFence._CurrentWriteFence;
135
- if (!fence || fence.fired)
136
- return;
137
- if (fence._oplogObserveDrivers) {
138
- fence._oplogObserveDrivers[self._id] = self;
139
- return;
140
- }
141
- fence._oplogObserveDrivers = {};
142
- fence._oplogObserveDrivers[self._id] = self;
143
- fence.onBeforeFire(async () => {
144
- var drivers = fence._oplogObserveDrivers;
145
- delete fence._oplogObserveDrivers;
146
- // This fence cannot fire until we've caught up to "this point" in the
147
- // oplog, and all observers made it back to the steady state.
148
- await self._mongoHandle._oplogHandle.waitUntilCaughtUp();
149
- for (const driver of Object.values(drivers)) {
150
- if (driver._stopped)
151
- return;
152
- var write = fence.beginWrite();
153
- if (driver._phase === PHASE.STEADY) {
154
- // Make sure that all of the callbacks have made it through the
155
- // multiplexer and been delivered to ObserveHandles before committing
156
- // writes.
157
- driver._multiplexer.onFlush(function () {
158
- write.committed();
159
- });
160
- }
161
- else {
162
- driver._writesToCommitWhenWeReachSteady.push(write);
163
- }
164
- }
165
- });
166
- }));
167
- // When Mongo fails over, we need to repoll the query, in case we processed an
168
- // oplog entry that got rolled back.
169
- self._stopHandles.push(self._mongoHandle._onFailover(finishIfNeedToPollQuery(function () {
170
- self._needToPollQuery();
171
- })));
172
- // Give _observeChanges a chance to add the new ObserveHandle to our
173
- // multiplexer, so that the added calls get streamed.
174
- setImmediate(finishIfNeedToPollQuery(function () {
175
- self._runInitialQuery();
176
- }));
177
- }
178
- _addPublished(id, doc) {
179
- var self = this;
180
- //Meteor._noYieldsAllowed(function () {
181
- var fields = Object.assign({}, doc);
182
- delete fields._id;
183
- self._published.set(id, self._sharedProjectionFn(doc));
184
- self._multiplexer.added(id, self._projectionFn(fields));
185
- // After adding this document, the published set might be overflowed
186
- // (exceeding capacity specified by limit). If so, push the maximum
187
- // element to the buffer, we might want to save it in memory to reduce the
188
- // amount of Mongo lookups in the future.
189
- if (self._limit && self._published.size > self._limit) {
190
- // XXX in theory the size of published is no more than limit+1
191
- if (self._published.size !== self._limit + 1) {
192
- throw new Error("After adding to published, " +
193
- (self._published.size - self._limit) +
194
- " documents are overflowing the set");
195
- }
196
- var overflowingDocId = self._published.maxElementId();
197
- var overflowingDoc = self._published.get(overflowingDocId);
198
- if ((0, ejson_1.equals)(overflowingDocId, id)) {
199
- throw new Error("The document just added is overflowing the published set");
200
- }
201
- self._published.delete(overflowingDocId);
202
- self._multiplexer.removed(overflowingDocId);
203
- self._addBuffered(overflowingDocId, overflowingDoc);
204
- }
205
- //});
206
- }
207
- _removePublished(id) {
208
- var self = this;
209
- //Meteor._noYieldsAllowed(function () {
210
- self._published.delete(id);
211
- self._multiplexer.removed(id);
212
- if (!self._limit || self._published.size === self._limit)
213
- return;
214
- if (self._published.size > self._limit)
215
- throw Error("self._published got too big");
216
- // OK, we are publishing less than the limit. Maybe we should look in the
217
- // buffer to find the next element past what we were publishing before.
218
- if (!self._unpublishedBuffer.empty()) {
219
- // There's something in the buffer; move the first thing in it to
220
- // _published.
221
- var newDocId = self._unpublishedBuffer.minElementId();
222
- var newDoc = self._unpublishedBuffer.get(newDocId);
223
- self._removeBuffered(newDocId);
224
- self._addPublished(newDocId, newDoc);
225
- return;
226
- }
227
- // There's nothing in the buffer. This could mean one of a few things.
228
- // (a) We could be in the middle of re-running the query (specifically, we
229
- // could be in _publishNewResults). In that case, _unpublishedBuffer is
230
- // empty because we clear it at the beginning of _publishNewResults. In
231
- // this case, our caller already knows the entire answer to the query and
232
- // we don't need to do anything fancy here. Just return.
233
- if (self._phase === PHASE.QUERYING)
234
- return;
235
- // (b) We're pretty confident that the union of _published and
236
- // _unpublishedBuffer contain all documents that match selector. Because
237
- // _unpublishedBuffer is empty, that means we're confident that _published
238
- // contains all documents that match selector. So we have nothing to do.
239
- if (self._safeAppendToBuffer)
240
- return;
241
- // (c) Maybe there are other documents out there that should be in our
242
- // buffer. But in that case, when we emptied _unpublishedBuffer in
243
- // _removeBuffered, we should have called _needToPollQuery, which will
244
- // either put something in _unpublishedBuffer or set _safeAppendToBuffer
245
- // (or both), and it will put us in QUERYING for that whole time. So in
246
- // fact, we shouldn't be able to get here.
247
- throw new Error("Buffer inexplicably empty");
248
- //});
249
- }
250
- _changePublished(id, oldDoc, newDoc) {
251
- var self = this;
252
- //Meteor._noYieldsAllowed(function () {
253
- self._published.set(id, self._sharedProjectionFn(newDoc));
254
- var projectedNew = self._projectionFn(newDoc);
255
- var projectedOld = self._projectionFn(oldDoc);
256
- var changed = diff_1.DiffSequence.makeChangedFields(projectedNew, projectedOld);
257
- if (Object.keys(changed).length !== 0)
258
- self._multiplexer.changed(id, changed);
259
- //});
260
- }
261
- _addBuffered(id, doc) {
262
- var self = this;
263
- //Meteor._noYieldsAllowed(function () {
264
- self._unpublishedBuffer.set(id, self._sharedProjectionFn(doc));
265
- // If something is overflowing the buffer, we just remove it from cache
266
- if (self._unpublishedBuffer.size > self._limit) {
267
- var maxBufferedId = self._unpublishedBuffer.maxElementId();
268
- self._unpublishedBuffer.delete(maxBufferedId);
269
- // Since something matching is removed from cache (both published set and
270
- // buffer), set flag to false
271
- self._safeAppendToBuffer = false;
272
- }
273
- //});
274
- }
275
- // Is called either to remove the doc completely from matching set or to move
276
- // it to the published set later.
277
- _removeBuffered(id) {
278
- var self = this;
279
- //Meteor._noYieldsAllowed(function () {
280
- self._unpublishedBuffer.delete(id);
281
- // To keep the contract "buffer is never empty in STEADY phase unless the
282
- // everything matching fits into published" true, we poll everything as
283
- // soon as we see the buffer becoming empty.
284
- if (!self._unpublishedBuffer.size && !self._safeAppendToBuffer)
285
- self._needToPollQuery();
286
- //});
287
- }
288
- // Called when a document has joined the "Matching" results set.
289
- // Takes responsibility of keeping _unpublishedBuffer in sync with _published
290
- // and the effect of limit enforced.
291
- _addMatching(doc) {
292
- var self = this;
293
- //Meteor._noYieldsAllowed(function () {
294
- var id = doc._id;
295
- if (self._published.has(id))
296
- throw Error("tried to add something already published " + id);
297
- if (self._limit && self._unpublishedBuffer.has(id))
298
- throw Error("tried to add something already existed in buffer " + id);
299
- var limit = self._limit;
300
- var comparator = self._comparator;
301
- var maxPublished = (limit && self._published.size > 0)
302
- ? self._published.get(self._published.maxElementId()) // published is MaxHeap because limit is defined
303
- : null;
304
- var maxBuffered = (limit && self._unpublishedBuffer.size > 0)
305
- ? self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId())
306
- : null;
307
- // The query is unlimited or didn't publish enough documents yet or the
308
- // new document would fit into published set pushing the maximum element
309
- // out, then we need to publish the doc.
310
- var toPublish = !limit || self._published.size < limit ||
311
- comparator(doc, maxPublished) < 0;
312
- // Otherwise we might need to buffer it (only in case of limited query).
313
- // Buffering is allowed if the buffer is not filled up yet and all
314
- // matching docs are either in the published set or in the buffer.
315
- var canAppendToBuffer = !toPublish && self._safeAppendToBuffer &&
316
- self._unpublishedBuffer.size < limit;
317
- // Or if it is small enough to be safely inserted to the middle or the
318
- // beginning of the buffer.
319
- var canInsertIntoBuffer = !toPublish && maxBuffered &&
320
- comparator(doc, maxBuffered) <= 0;
321
- var toBuffer = canAppendToBuffer || canInsertIntoBuffer;
322
- if (toPublish) {
323
- self._addPublished(id, doc);
324
- }
325
- else if (toBuffer) {
326
- self._addBuffered(id, doc);
327
- }
328
- else {
329
- // dropping it and not saving to the cache
330
- self._safeAppendToBuffer = false;
331
- }
332
- //});
333
- }
334
- // Called when a document leaves the "Matching" results set.
335
- // Takes responsibility of keeping _unpublishedBuffer in sync with _published
336
- // and the effect of limit enforced.
337
- _removeMatching(id) {
338
- var self = this;
339
- //Meteor._noYieldsAllowed(function () {
340
- if (!self._published.has(id) && !self._limit)
341
- throw Error("tried to remove something matching but not cached " + id);
342
- if (self._published.has(id)) {
343
- self._removePublished(id);
344
- }
345
- else if (self._unpublishedBuffer.has(id)) {
346
- self._removeBuffered(id);
347
- }
348
- //});
349
- }
350
- _handleDoc(id, newDoc) {
351
- var self = this;
352
- //Meteor._noYieldsAllowed(function () {
353
- var matchesNow = newDoc && self._matcher.documentMatches(newDoc).result;
354
- var publishedBefore = self._published.has(id);
355
- var bufferedBefore = self._limit && self._unpublishedBuffer.has(id);
356
- var cachedBefore = publishedBefore || bufferedBefore;
357
- if (matchesNow && !cachedBefore) {
358
- self._addMatching(newDoc);
359
- }
360
- else if (cachedBefore && !matchesNow) {
361
- self._removeMatching(id);
362
- }
363
- else if (cachedBefore && matchesNow) {
364
- var oldDoc = self._published.get(id);
365
- var comparator = self._comparator;
366
- var minBuffered = self._limit && self._unpublishedBuffer.size &&
367
- self._unpublishedBuffer.get(self._unpublishedBuffer.minElementId());
368
- var maxBuffered;
369
- if (publishedBefore) {
370
- // Unlimited case where the document stays in published once it
371
- // matches or the case when we don't have enough matching docs to
372
- // publish or the changed but matching doc will stay in published
373
- // anyways.
374
- //
375
- // XXX: We rely on the emptiness of buffer. Be sure to maintain the
376
- // fact that buffer can't be empty if there are matching documents not
377
- // published. Notably, we don't want to schedule repoll and continue
378
- // relying on this property.
379
- var staysInPublished = !self._limit ||
380
- self._unpublishedBuffer.size === 0 ||
381
- comparator(newDoc, minBuffered) <= 0;
382
- if (staysInPublished) {
383
- self._changePublished(id, oldDoc, newDoc);
384
- }
385
- else {
386
- // after the change doc doesn't stay in the published, remove it
387
- self._removePublished(id);
388
- // but it can move into buffered now, check it
389
- maxBuffered = self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId());
390
- var toBuffer = self._safeAppendToBuffer ||
391
- (maxBuffered && comparator(newDoc, maxBuffered) <= 0);
392
- if (toBuffer) {
393
- self._addBuffered(id, newDoc);
394
- }
395
- else {
396
- // Throw away from both published set and buffer
397
- self._safeAppendToBuffer = false;
398
- }
399
- }
400
- }
401
- else if (bufferedBefore) {
402
- oldDoc = self._unpublishedBuffer.get(id);
403
- // remove the old version manually instead of using _removeBuffered so
404
- // we don't trigger the querying immediately. if we end this block
405
- // with the buffer empty, we will need to trigger the query poll
406
- // manually too.
407
- self._unpublishedBuffer.delete(id);
408
- // published is MaxHeap because bufferedBefore is only set when limit is defined
409
- var maxPublished = self._published.get(self._published.maxElementId());
410
- maxBuffered = self._unpublishedBuffer.size && self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId());
411
- // the buffered doc was updated, it could move to published
412
- var toPublish = comparator(newDoc, maxPublished) < 0;
413
- // or stays in buffer even after the change
414
- var staysInBuffer = (!toPublish && self._safeAppendToBuffer) ||
415
- (!toPublish && maxBuffered &&
416
- comparator(newDoc, maxBuffered) <= 0);
417
- if (toPublish) {
418
- self._addPublished(id, newDoc);
419
- }
420
- else if (staysInBuffer) {
421
- // stays in buffer but changes
422
- self._unpublishedBuffer.set(id, newDoc);
423
- }
424
- else {
425
- // Throw away from both published set and buffer
426
- self._safeAppendToBuffer = false;
427
- // Normally this check would have been done in _removeBuffered but
428
- // we didn't use it, so we need to do it ourself now.
429
- if (!self._unpublishedBuffer.size) {
430
- self._needToPollQuery();
431
- }
432
- }
433
- }
434
- else {
435
- throw new Error("cachedBefore implies either of publishedBefore or bufferedBefore is true.");
436
- }
437
- }
438
- //});
439
- }
440
- _fetchModifiedDocuments() {
441
- var self = this;
442
- //Meteor._noYieldsAllowed(function () {
443
- self._registerPhaseChange(PHASE.FETCHING);
444
- // Defer, because nothing called from the oplog entry handler may yield,
445
- // but fetch() yields.
446
- setImmediate(finishIfNeedToPollQuery(async () => {
447
- while (!self._stopped && self._needToFetch.size > 0) {
448
- if (self._phase === PHASE.QUERYING) {
449
- // While fetching, we decided to go into QUERYING mode, and then we
450
- // saw another oplog entry, so _needToFetch is not empty. But we
451
- // shouldn't fetch these documents until AFTER the query is done.
452
- break;
453
- }
454
- // Being in steady phase here would be surprising.
455
- if (self._phase !== PHASE.FETCHING)
456
- throw new Error("phase in fetchModifiedDocuments: " + self._phase);
457
- self._currentlyFetching = self._needToFetch;
458
- var thisGeneration = ++self._fetchGeneration;
459
- self._needToFetch = new Map();
460
- var waiting = 0;
461
- var fut = { promise: undefined, resolve: undefined };
462
- fut.promise = new Promise(r => fut.resolve = r);
463
- // This loop is safe, because _currentlyFetching will not be updated
464
- // during this loop (in fact, it is never mutated).
465
- self._currentlyFetching.forEach((op, id) => {
466
- waiting++;
467
- self._mongoHandle._docFetcher.fetch(self._cursorDescription.collectionName, id, op, finishIfNeedToPollQuery((err, doc) => {
468
- try {
469
- if (err) {
470
- console.log("Got exception while fetching documents", err);
471
- // If we get an error from the fetcher (eg, trouble
472
- // connecting to Mongo), let's just abandon the fetch phase
473
- // altogether and fall back to polling. It's not like we're
474
- // getting live updates anyway.
475
- if (self._phase !== PHASE.QUERYING) {
476
- self._needToPollQuery();
477
- }
478
- }
479
- else if (!self._stopped && self._phase === PHASE.FETCHING
480
- && self._fetchGeneration === thisGeneration) {
481
- // We re-check the generation in case we've had an explicit
482
- // _pollQuery call (eg, in another fiber) which should
483
- // effectively cancel this round of fetches. (_pollQuery
484
- // increments the generation.)
485
- self._handleDoc(id, doc);
486
- }
487
- }
488
- finally {
489
- waiting--;
490
- // Because fetch() never calls its callback synchronously,
491
- // this is safe (ie, we won't call fut.return() before the
492
- // forEach is done).
493
- if (waiting === 0)
494
- fut.resolve();
495
- }
496
- }));
497
- });
498
- await fut.promise;
499
- // Exit now if we've had a _pollQuery call (here or in another fiber).
500
- if (self._phase === PHASE.QUERYING)
501
- return;
502
- self._currentlyFetching = null;
503
- }
504
- // We're done fetching, so we can be steady, unless we've had a
505
- // _pollQuery call (here or in another fiber).
506
- if (self._phase !== PHASE.QUERYING)
507
- self._beSteady();
508
- }));
509
- //});
510
- }
511
- _beSteady() {
512
- var self = this;
513
- //Meteor._noYieldsAllowed(function () {
514
- self._registerPhaseChange(PHASE.STEADY);
515
- var writes = self._writesToCommitWhenWeReachSteady;
516
- self._writesToCommitWhenWeReachSteady = [];
517
- self._multiplexer.onFlush(function () {
518
- for (const w of writes) {
519
- w.committed();
520
- }
521
- });
522
- //});
523
- }
524
- _handleOplogEntryQuerying(op) {
525
- var self = this;
526
- //Meteor._noYieldsAllowed(function () {
527
- self._needToFetch.set((0, oplog_tailing_1.idForOp)(op), op);
528
- //});
529
- }
530
- _handleOplogEntrySteadyOrFetching(op) {
531
- var self = this;
532
- //Meteor._noYieldsAllowed(function () {
533
- var id = (0, oplog_tailing_1.idForOp)(op);
534
- // If we're already fetching this one, or about to, we can't optimize;
535
- // make sure that we fetch it again if necessary.
536
- if (self._phase === PHASE.FETCHING &&
537
- ((self._currentlyFetching && self._currentlyFetching.has(id)) ||
538
- self._needToFetch.has(id))) {
539
- self._needToFetch.set(id, op);
540
- return;
541
- }
542
- if (op.op === 'd') {
543
- if (self._published.has(id) ||
544
- (self._limit && self._unpublishedBuffer.has(id)))
545
- self._removeMatching(id);
546
- }
547
- else if (op.op === 'i') {
548
- if (self._published.has(id))
549
- throw new Error("insert found for already-existing ID in published");
550
- if (self._unpublishedBuffer && self._unpublishedBuffer.has(id))
551
- throw new Error("insert found for already-existing ID in buffer");
552
- // XXX what if selector yields? for now it can't but later it could
553
- // have $where
554
- if (self._matcher.documentMatches(op.o).result)
555
- self._addMatching(op.o);
556
- }
557
- else if (op.op === 'u') {
558
- // we are mapping the new oplog format on mongo 5
559
- // to what we know better, $set
560
- op.o = (0, oplog_v2_converter_1.oplogV2V1Converter)(op.o);
561
- // Is this a modifier ($set/$unset, which may require us to poll the
562
- // database to figure out if the whole document matches the selector) or
563
- // a replacement (in which case we can just directly re-evaluate the
564
- // selector)?
565
- // oplog format has changed on mongodb 5, we have to support both now
566
- // diff is the format in Mongo 5+ (oplog v2)
567
- var isReplace = !op.o.hasOwnProperty('$set') && !op.o.hasOwnProperty('diff') && !op.o.hasOwnProperty('$unset');
568
- // If this modifier modifies something inside an EJSON custom type (ie,
569
- // anything with EJSON$), then we can't try to use
570
- // LocalCollection._modify, since that just mutates the EJSON encoding,
571
- // not the actual object.
572
- var canDirectlyModifyDoc = !isReplace && modifierCanBeDirectlyApplied(op.o);
573
- var publishedBefore = self._published.has(id);
574
- var bufferedBefore = self._limit && self._unpublishedBuffer.has(id);
575
- if (isReplace) {
576
- self._handleDoc(id, Object.assign({ _id: id }, op.o));
577
- }
578
- else if ((publishedBefore || bufferedBefore) &&
579
- canDirectlyModifyDoc) {
580
- // Oh great, we actually know what the document is, so we can apply
581
- // this directly.
582
- var newDoc = self._published.has(id)
583
- ? self._published.get(id) : self._unpublishedBuffer.get(id);
584
- newDoc = (0, ejson_1.clone)(newDoc);
585
- newDoc._id = id;
586
- try {
587
- (0, minimongo_common_1._modify)(newDoc, op.o);
588
- }
589
- catch (e) {
590
- if (e.name !== "MinimongoError")
591
- throw e;
592
- // We didn't understand the modifier. Re-fetch.
593
- self._needToFetch.set(id, op);
594
- if (self._phase === PHASE.STEADY) {
595
- self._fetchModifiedDocuments();
596
- }
597
- return;
598
- }
599
- self._handleDoc(id, self._sharedProjectionFn(newDoc));
600
- }
601
- else if (!canDirectlyModifyDoc ||
602
- self._matcher.canBecomeTrueByModifier(op.o) ||
603
- (self._sorter && self._sorter.affectedByModifier(op.o))) {
604
- self._needToFetch.set(id, op);
605
- if (self._phase === PHASE.STEADY)
606
- self._fetchModifiedDocuments();
607
- }
608
- }
609
- else {
610
- throw Error("XXX SURPRISING OPERATION: " + op);
611
- }
612
- //});
613
- }
614
- // Yields!
615
- async _runInitialQuery() {
616
- var self = this;
617
- if (self._stopped)
618
- throw new Error("oplog stopped surprisingly early");
619
- await self._runQuery({ initial: true }); // yields
620
- if (self._stopped)
621
- return; // can happen on queryError
622
- // Allow observeChanges calls to return. (After this, it's possible for
623
- // stop() to be called.)
624
- self._multiplexer.ready();
625
- await self._doneQuerying(); // yields
626
- }
627
- // In various circumstances, we may just want to stop processing the oplog and
628
- // re-run the initial query, just as if we were a PollingObserveDriver.
629
- //
630
- // This function may not block, because it is called from an oplog entry
631
- // handler.
632
- //
633
- // XXX We should call this when we detect that we've been in FETCHING for "too
634
- // long".
635
- //
636
- // XXX We should call this when we detect Mongo failover (since that might
637
- // mean that some of the oplog entries we have processed have been rolled
638
- // back). The Node Mongo driver is in the middle of a bunch of huge
639
- // refactorings, including the way that it notifies you when primary
640
- // changes. Will put off implementing this until driver 1.4 is out.
641
- _pollQuery() {
642
- var self = this;
643
- //Meteor._noYieldsAllowed(function () {
644
- if (self._stopped)
645
- return;
646
- // Yay, we get to forget about all the things we thought we had to fetch.
647
- self._needToFetch = new Map();
648
- self._currentlyFetching = null;
649
- ++self._fetchGeneration; // ignore any in-flight fetches
650
- self._registerPhaseChange(PHASE.QUERYING);
651
- // Defer so that we don't yield. We don't need finishIfNeedToPollQuery
652
- // here because SwitchedToQuery is not thrown in QUERYING mode.
653
- setImmediate(async () => {
654
- await self._runQuery();
655
- await self._doneQuerying();
656
- });
657
- //});
658
- }
659
- // Yields!
660
- async _runQuery(options) {
661
- var self = this;
662
- options = options || {};
663
- var newResults, newBuffer;
664
- // This while loop is just to retry failures.
665
- while (true) {
666
- // If we've been stopped, we don't have to run anything any more.
667
- if (self._stopped)
668
- return;
669
- newResults = new Map();
670
- newBuffer = new Map();
671
- // Query 2x documents as the half excluded from the original query will go
672
- // into unpublished buffer to reduce additional Mongo lookups in cases
673
- // when documents are removed from the published set and need a
674
- // replacement.
675
- // XXX needs more thought on non-zero skip
676
- // XXX 2 is a "magic number" meaning there is an extra chunk of docs for
677
- // buffer if such is needed.
678
- var cursor = self._cursorForQuery({ limit: self._limit * 2 });
679
- try {
680
- await cursor.forEach((doc, i) => {
681
- if (!self._limit || i < self._limit) {
682
- newResults.set(doc._id, doc);
683
- }
684
- else {
685
- newBuffer.set(doc._id, doc);
686
- }
687
- });
688
- break;
689
- }
690
- catch (e) {
691
- if (options.initial && typeof (e.code) === 'number') {
692
- // This is an error document sent to us by mongod, not a connection
693
- // error generated by the client. And we've never seen this query work
694
- // successfully. Probably it's a bad selector or something, so we
695
- // should NOT retry. Instead, we should halt the observe (which ends
696
- // up calling `stop` on us).
697
- self._multiplexer.queryError(e);
698
- return;
699
- }
700
- // During failover (eg) if we get an exception we should log and retry
701
- // instead of crashing.
702
- console.log("Got exception while polling query during failover", e);
703
- await (0, oplog_tailing_1._sleepForMs)(100);
704
- }
705
- }
706
- if (self._stopped)
707
- return;
708
- self._publishNewResults(newResults, newBuffer);
709
- }
710
- // Transitions to QUERYING and runs another query, or (if already in QUERYING)
711
- // ensures that we will query again later.
712
- //
713
- // This function may not block, because it is called from an oplog entry
714
- // handler. However, if we were not already in the QUERYING phase, it throws
715
- // an exception that is caught by the closest surrounding
716
- // finishIfNeedToPollQuery call; this ensures that we don't continue running
717
- // close that was designed for another phase inside PHASE.QUERYING.
718
- //
719
- // (It's also necessary whenever logic in this file yields to check that other
720
- // phases haven't put us into QUERYING mode, though; eg,
721
- // _fetchModifiedDocuments does this.)
722
- _needToPollQuery() {
723
- var self = this;
724
- //Meteor._noYieldsAllowed(function () {
725
- if (self._stopped)
726
- return;
727
- // If we're not already in the middle of a query, we can query now
728
- // (possibly pausing FETCHING).
729
- if (self._phase !== PHASE.QUERYING) {
730
- self._pollQuery();
731
- throw new SwitchedToQuery;
732
- }
733
- // We're currently in QUERYING. Set a flag to ensure that we run another
734
- // query when we're done.
735
- self._requeryWhenDoneThisQuery = true;
736
- //});
737
- }
738
- // Yields!
739
- async _doneQuerying() {
740
- var self = this;
741
- if (self._stopped)
742
- return;
743
- await self._mongoHandle._oplogHandle.waitUntilCaughtUp(); // yields
744
- if (self._stopped)
745
- return;
746
- if (self._phase !== PHASE.QUERYING)
747
- throw Error("Phase unexpectedly " + self._phase);
748
- //Meteor._noYieldsAllowed(function () {
749
- if (self._requeryWhenDoneThisQuery) {
750
- self._requeryWhenDoneThisQuery = false;
751
- self._pollQuery();
752
- }
753
- else if (self._needToFetch.size === 0) {
754
- self._beSteady();
755
- }
756
- else {
757
- self._fetchModifiedDocuments();
758
- }
759
- //});
760
- }
761
- _cursorForQuery(optionsOverwrite) {
762
- //return Meteor._noYieldsAllowed(function () {
763
- // The query we run is almost the same as the cursor we are observing,
764
- // with a few changes. We need to read all the fields that are relevant to
765
- // the selector, not just the fields we are going to publish (that's the
766
- // "shared" projection). And we don't want to apply any transform in the
767
- // cursor, because observeChanges shouldn't use the transform.
768
- var options = Object.assign(Object.assign({}, this._cursorDescription.options), optionsOverwrite);
769
- options.projection = this._sharedProjection;
770
- delete options.transform;
771
- // We are NOT deep cloning fields or selector here, which should be OK.
772
- const description = new live_cursor_1.CursorDescription(this._cursorDescription.collectionName, this._cursorDescription.selector, options);
773
- const dbCursor = this._mongoHandle.db.collection(description.collectionName).find(description.selector, options);
774
- return new synchronous_cursor_1.SynchronousCursor(dbCursor, this._cursorDescription, { useTransform: true });
775
- //});
776
- }
777
- // Replace self._published with newResults (both are IdMaps), invoking observe
778
- // callbacks on the multiplexer.
779
- // Replace self._unpublishedBuffer with newBuffer.
780
- //
781
- // XXX This is very similar to LocalCollection._diffQueryUnorderedChanges. We
782
- // should really: (a) Unify IdMap and OrderedDict into Unordered/OrderedDict
783
- // (b) Rewrite diff.js to use these classes instead of arrays and objects.
784
- _publishNewResults(newResults, newBuffer) {
785
- var self = this;
786
- //Meteor._noYieldsAllowed(function () {
787
- // If the query is limited and there is a buffer, shut down so it doesn't
788
- // stay in a way.
789
- if (self._limit) {
790
- self._unpublishedBuffer.clear();
791
- }
792
- // First remove anything that's gone. Be careful not to modify
793
- // self._published while iterating over it.
794
- var idsToRemove = [];
795
- self._published.forEach(function (doc, id) {
796
- if (!newResults.has(id))
797
- idsToRemove.push(id);
798
- });
799
- for (const id of idsToRemove) {
800
- self._removePublished(id);
801
- }
802
- // Now do adds and changes.
803
- // If self has a buffer and limit, the new fetched result will be
804
- // limited correctly as the query has sort specifier.
805
- newResults.forEach(function (doc, id) {
806
- self._handleDoc(id, doc);
807
- });
808
- // Sanity-check that everything we tried to put into _published ended up
809
- // there.
810
- // XXX if this is slow, remove it later
811
- if (self._published.size !== newResults.size) {
812
- console.error('The Mongo server and the Meteor query disagree on how ' +
813
- 'many documents match your query. Cursor description: ', self._cursorDescription);
814
- throw Error("The Mongo server and the Meteor query disagree on how " +
815
- "many documents match your query. Maybe it is hitting a Mongo " +
816
- "edge case? The query is: " +
817
- (0, ejson_1.stringify)(self._cursorDescription.selector));
818
- }
819
- self._published.forEach(function (doc, id) {
820
- if (!newResults.has(id))
821
- throw Error("_published has a doc that newResults doesn't; " + id);
822
- });
823
- // Finally, replace the buffer
824
- newBuffer.forEach(function (doc, id) {
825
- self._addBuffered(id, doc);
826
- });
827
- self._safeAppendToBuffer = newBuffer.size < self._limit;
828
- //});
829
- }
830
- // This stop function is invoked from the onStop of the ObserveMultiplexer, so
831
- // it shouldn't actually be possible to call it until the multiplexer is
832
- // ready.
833
- //
834
- // It's important to check self._stopped after every call in this file that
835
- // can yield!
836
- stop() {
837
- var self = this;
838
- if (self._stopped)
839
- return;
840
- self._stopped = true;
841
- for (const handle of self._stopHandles) {
842
- handle.stop();
843
- }
844
- // Note: we *don't* use multiplexer.onFlush here because this stop
845
- // callback is actually invoked by the multiplexer itself when it has
846
- // determined that there are no handles left. So nothing is actually going
847
- // to get flushed (and it's probably not valid to call methods on the
848
- // dying multiplexer).
849
- for (const w of self._writesToCommitWhenWeReachSteady) {
850
- w.committed(); // maybe yields?
851
- }
852
- self._writesToCommitWhenWeReachSteady = null;
853
- // Proactively drop references to potentially big things.
854
- self._published = null;
855
- self._unpublishedBuffer = null;
856
- self._needToFetch = null;
857
- self._currentlyFetching = null;
858
- // not used?
859
- //self._oplogEntryHandle = null;
860
- //self._listenersHandle = null;
861
- }
862
- _registerPhaseChange(phase) {
863
- var self = this;
864
- //Meteor._noYieldsAllowed(function () {
865
- var now = new Date;
866
- self._phase = phase;
867
- self._phaseStartTime = now;
868
- //});
869
- }
870
- // Does our oplog tailing code support this cursor? For now, we are being very
871
- // conservative and allowing only simple queries with simple options.
872
- // (This is a "static method".)
873
- static cursorSupported(cursorDescription, matcher) {
874
- // First, check the options.
875
- var options = cursorDescription.options;
876
- // Did the user say no explicitly?
877
- if (options.disableOplog)
878
- return false;
879
- // skip is not supported: to support it we would need to keep track of all
880
- // "skipped" documents or at least their ids.
881
- // limit w/o a sort specifier is not supported: current implementation needs a
882
- // deterministic way to order documents.
883
- if (options.skip || (options.limit && !options.sort))
884
- return false;
885
- // If a fields projection option is given check if it is supported by
886
- // minimongo (some operators are not supported).
887
- if (options.projection) {
888
- try {
889
- (0, minimongo_common_1._checkSupportedProjection)(options.projection);
890
- }
891
- catch (e) {
892
- if (e.name === "MinimongoError") {
893
- return false;
894
- }
895
- else {
896
- throw e;
897
- }
898
- }
899
- }
900
- // We don't allow the following selectors:
901
- // - $where (not confident that we provide the same JS environment
902
- // as Mongo, and can yield!)
903
- // - $near (has "interesting" properties in MongoDB, like the possibility
904
- // of returning an ID multiple times, though even polling maybe
905
- // have a bug there)
906
- // XXX: once we support it, we would need to think more on how we
907
- // initialize the comparators when we create the driver.
908
- return !matcher.hasWhere() && !matcher.hasGeoQuery();
909
- }
910
- }
911
- exports.OplogObserveDriver = OplogObserveDriver;
912
- var modifierCanBeDirectlyApplied = function (modifier) {
913
- return Object.entries(modifier).every(([operation, fields]) => {
914
- return Object.entries(fields).every(([field, value]) => {
915
- return !/EJSON\$/.test(field);
916
- });
917
- });
918
- };
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.OplogObserveDriver = void 0;
4
+ const observe_driver_utils_1 = require("./observe_driver_utils");
5
+ const oplog_v2_converter_1 = require("./oplog_v2_converter");
6
+ const live_cursor_1 = require("./live_cursor");
7
+ const writefence_1 = require("../ddp/writefence");
8
+ const diff_1 = require("../diff-sequence/diff");
9
+ const min_max_heap_1 = require("../binary-heap/min_max_heap");
10
+ const max_heap_1 = require("../binary-heap/max_heap");
11
+ const minimongo_common_1 = require("./minimongo_common");
12
+ const ejson_1 = require("../ejson/ejson");
13
+ const oplog_tailing_1 = require("./oplog_tailing");
14
+ const synchronous_cursor_1 = require("./synchronous-cursor");
15
+ var PHASE;
16
+ (function (PHASE) {
17
+ PHASE["QUERYING"] = "QUERYING";
18
+ PHASE["FETCHING"] = "FETCHING";
19
+ PHASE["STEADY"] = "STEADY";
20
+ })(PHASE || (PHASE = {}));
21
+ // Exception thrown by _needToPollQuery which unrolls the stack up to the
22
+ // enclosing call to finishIfNeedToPollQuery.
23
+ var SwitchedToQuery = function () { };
24
+ var finishIfNeedToPollQuery = function (f) {
25
+ return function () {
26
+ try {
27
+ f.apply(this, arguments);
28
+ }
29
+ catch (e) {
30
+ if (!(e instanceof SwitchedToQuery))
31
+ throw e;
32
+ }
33
+ };
34
+ };
35
+ var currentId = 0;
36
+ // OplogObserveDriver is an alternative to PollingObserveDriver which follows
37
+ // the Mongo operation log instead of just re-polling the query. It obeys the
38
+ // same simple interface: constructing it starts sending observeChanges
39
+ // callbacks (and a ready() invocation) to the ObserveMultiplexer, and you stop
40
+ // it by calling the stop() method.
41
+ class OplogObserveDriver {
42
+ constructor(options) {
43
+ var self = this;
44
+ self._usesOplog = true; // tests look at this
45
+ self._id = currentId;
46
+ currentId++;
47
+ self._cursorDescription = options.cursorDescription;
48
+ self._mongoHandle = options.mongoHandle;
49
+ self._multiplexer = options.multiplexer;
50
+ if (options.ordered) {
51
+ throw Error("OplogObserveDriver only supports unordered observeChanges");
52
+ }
53
+ var sorter = options.sorter;
54
+ // We don't support $near and other geo-queries so it's OK to initialize the
55
+ // comparator only once in the constructor.
56
+ var comparator = sorter && sorter.getComparator();
57
+ if (options.cursorDescription.options.limit) {
58
+ // There are several properties ordered driver implements:
59
+ // - _limit is a positive number
60
+ // - _comparator is a function-comparator by which the query is ordered
61
+ // - _unpublishedBuffer is non-null Min/Max Heap,
62
+ // the empty buffer in STEADY phase implies that the
63
+ // everything that matches the queries selector fits
64
+ // into published set.
65
+ // - _published - Max Heap (also implements IdMap methods)
66
+ var heapOptions = { IdMap: Map };
67
+ self._limit = self._cursorDescription.options.limit;
68
+ self._comparator = comparator;
69
+ self._sorter = sorter;
70
+ self._unpublishedBuffer = new min_max_heap_1.MinMaxHeap(comparator, heapOptions);
71
+ // We need something that can find Max value in addition to IdMap interface
72
+ self._published = new max_heap_1.MaxHeap(comparator, heapOptions);
73
+ }
74
+ else {
75
+ self._limit = 0;
76
+ self._comparator = null;
77
+ self._sorter = null;
78
+ self._unpublishedBuffer = null;
79
+ self._published = new Map();
80
+ }
81
+ // Indicates if it is safe to insert a new document at the end of the buffer
82
+ // for this query. i.e. it is known that there are no documents matching the
83
+ // selector those are not in published or buffer.
84
+ self._safeAppendToBuffer = false;
85
+ self._stopped = false;
86
+ self._stopHandles = [];
87
+ self._registerPhaseChange(PHASE.QUERYING);
88
+ self._matcher = options.matcher;
89
+ // we are now using projection, not fields in the cursor description even if you pass {fields}
90
+ // in the cursor construction
91
+ var projection = self._cursorDescription.options.projection || {};
92
+ self._projectionFn = (0, minimongo_common_1._compileProjection)(projection);
93
+ // Projection function, result of combining important fields for selector and
94
+ // existing fields projection
95
+ self._sharedProjection = self._matcher.combineIntoProjection(projection);
96
+ if (sorter)
97
+ self._sharedProjection = sorter.combineIntoProjection(self._sharedProjection);
98
+ self._sharedProjectionFn = (0, minimongo_common_1._compileProjection)(self._sharedProjection);
99
+ self._needToFetch = new Map();
100
+ self._currentlyFetching = null;
101
+ self._fetchGeneration = 0;
102
+ self._requeryWhenDoneThisQuery = false;
103
+ self._writesToCommitWhenWeReachSteady = [];
104
+ // If the oplog handle tells us that it skipped some entries (because it got
105
+ // behind, say), re-poll.
106
+ self._stopHandles.push(self._mongoHandle._oplogHandle.onSkippedEntries(finishIfNeedToPollQuery(function () {
107
+ self._needToPollQuery();
108
+ })));
109
+ (0, observe_driver_utils_1.forEachTrigger)(self._cursorDescription, async (trigger) => {
110
+ self._stopHandles.push(await self._mongoHandle._oplogHandle.onOplogEntry(trigger, function (notification) {
111
+ //Meteor._noYieldsAllowed(finishIfNeedToPollQuery(function () {
112
+ var op = notification.op;
113
+ if (notification.dropCollection || notification.dropDatabase) {
114
+ // Note: this call is not allowed to block on anything (especially
115
+ // on waiting for oplog entries to catch up) because that will block
116
+ // onOplogEntry!
117
+ self._needToPollQuery();
118
+ }
119
+ else {
120
+ // All other operators should be handled depending on phase
121
+ if (self._phase === PHASE.QUERYING) {
122
+ self._handleOplogEntryQuerying(op);
123
+ }
124
+ else {
125
+ self._handleOplogEntrySteadyOrFetching(op);
126
+ }
127
+ }
128
+ //}));
129
+ }));
130
+ });
131
+ // XXX ordering w.r.t. everything else?
132
+ self._stopHandles.push((0, observe_driver_utils_1.listenAll)(self._cursorDescription, function (notification) {
133
+ // If we're not in a pre-fire write fence, we don't have to do anything.
134
+ var fence = writefence_1._WriteFence._CurrentWriteFence;
135
+ if (!fence || fence.fired)
136
+ return;
137
+ if (fence._oplogObserveDrivers) {
138
+ fence._oplogObserveDrivers[self._id] = self;
139
+ return;
140
+ }
141
+ fence._oplogObserveDrivers = {};
142
+ fence._oplogObserveDrivers[self._id] = self;
143
+ fence.onBeforeFire(async () => {
144
+ var drivers = fence._oplogObserveDrivers;
145
+ delete fence._oplogObserveDrivers;
146
+ // This fence cannot fire until we've caught up to "this point" in the
147
+ // oplog, and all observers made it back to the steady state.
148
+ await self._mongoHandle._oplogHandle.waitUntilCaughtUp();
149
+ for (const driver of Object.values(drivers)) {
150
+ if (driver._stopped)
151
+ return;
152
+ var write = fence.beginWrite();
153
+ if (driver._phase === PHASE.STEADY) {
154
+ // Make sure that all of the callbacks have made it through the
155
+ // multiplexer and been delivered to ObserveHandles before committing
156
+ // writes.
157
+ driver._multiplexer.onFlush(function () {
158
+ write.committed();
159
+ });
160
+ }
161
+ else {
162
+ driver._writesToCommitWhenWeReachSteady.push(write);
163
+ }
164
+ }
165
+ });
166
+ }));
167
+ // When Mongo fails over, we need to repoll the query, in case we processed an
168
+ // oplog entry that got rolled back.
169
+ self._stopHandles.push(self._mongoHandle._onFailover(finishIfNeedToPollQuery(function () {
170
+ self._needToPollQuery();
171
+ })));
172
+ // Give _observeChanges a chance to add the new ObserveHandle to our
173
+ // multiplexer, so that the added calls get streamed.
174
+ setImmediate(finishIfNeedToPollQuery(function () {
175
+ self._runInitialQuery();
176
+ }));
177
+ }
178
+ _addPublished(id, doc) {
179
+ var self = this;
180
+ //Meteor._noYieldsAllowed(function () {
181
+ var fields = Object.assign({}, doc);
182
+ delete fields._id;
183
+ self._published.set(id, self._sharedProjectionFn(doc));
184
+ self._multiplexer.added(id, self._projectionFn(fields));
185
+ // After adding this document, the published set might be overflowed
186
+ // (exceeding capacity specified by limit). If so, push the maximum
187
+ // element to the buffer, we might want to save it in memory to reduce the
188
+ // amount of Mongo lookups in the future.
189
+ if (self._limit && self._published.size > self._limit) {
190
+ // XXX in theory the size of published is no more than limit+1
191
+ if (self._published.size !== self._limit + 1) {
192
+ throw new Error("After adding to published, " +
193
+ (self._published.size - self._limit) +
194
+ " documents are overflowing the set");
195
+ }
196
+ var overflowingDocId = self._published.maxElementId();
197
+ var overflowingDoc = self._published.get(overflowingDocId);
198
+ if ((0, ejson_1.equals)(overflowingDocId, id)) {
199
+ throw new Error("The document just added is overflowing the published set");
200
+ }
201
+ self._published.delete(overflowingDocId);
202
+ self._multiplexer.removed(overflowingDocId);
203
+ self._addBuffered(overflowingDocId, overflowingDoc);
204
+ }
205
+ //});
206
+ }
207
+ _removePublished(id) {
208
+ var self = this;
209
+ //Meteor._noYieldsAllowed(function () {
210
+ self._published.delete(id);
211
+ self._multiplexer.removed(id);
212
+ if (!self._limit || self._published.size === self._limit)
213
+ return;
214
+ if (self._published.size > self._limit)
215
+ throw Error("self._published got too big");
216
+ // OK, we are publishing less than the limit. Maybe we should look in the
217
+ // buffer to find the next element past what we were publishing before.
218
+ if (!self._unpublishedBuffer.empty()) {
219
+ // There's something in the buffer; move the first thing in it to
220
+ // _published.
221
+ var newDocId = self._unpublishedBuffer.minElementId();
222
+ var newDoc = self._unpublishedBuffer.get(newDocId);
223
+ self._removeBuffered(newDocId);
224
+ self._addPublished(newDocId, newDoc);
225
+ return;
226
+ }
227
+ // There's nothing in the buffer. This could mean one of a few things.
228
+ // (a) We could be in the middle of re-running the query (specifically, we
229
+ // could be in _publishNewResults). In that case, _unpublishedBuffer is
230
+ // empty because we clear it at the beginning of _publishNewResults. In
231
+ // this case, our caller already knows the entire answer to the query and
232
+ // we don't need to do anything fancy here. Just return.
233
+ if (self._phase === PHASE.QUERYING)
234
+ return;
235
+ // (b) We're pretty confident that the union of _published and
236
+ // _unpublishedBuffer contain all documents that match selector. Because
237
+ // _unpublishedBuffer is empty, that means we're confident that _published
238
+ // contains all documents that match selector. So we have nothing to do.
239
+ if (self._safeAppendToBuffer)
240
+ return;
241
+ // (c) Maybe there are other documents out there that should be in our
242
+ // buffer. But in that case, when we emptied _unpublishedBuffer in
243
+ // _removeBuffered, we should have called _needToPollQuery, which will
244
+ // either put something in _unpublishedBuffer or set _safeAppendToBuffer
245
+ // (or both), and it will put us in QUERYING for that whole time. So in
246
+ // fact, we shouldn't be able to get here.
247
+ throw new Error("Buffer inexplicably empty");
248
+ //});
249
+ }
250
+ _changePublished(id, oldDoc, newDoc) {
251
+ var self = this;
252
+ //Meteor._noYieldsAllowed(function () {
253
+ self._published.set(id, self._sharedProjectionFn(newDoc));
254
+ var projectedNew = self._projectionFn(newDoc);
255
+ var projectedOld = self._projectionFn(oldDoc);
256
+ var changed = diff_1.DiffSequence.makeChangedFields(projectedNew, projectedOld);
257
+ if (Object.keys(changed).length !== 0)
258
+ self._multiplexer.changed(id, changed);
259
+ //});
260
+ }
261
+ _addBuffered(id, doc) {
262
+ var self = this;
263
+ //Meteor._noYieldsAllowed(function () {
264
+ self._unpublishedBuffer.set(id, self._sharedProjectionFn(doc));
265
+ // If something is overflowing the buffer, we just remove it from cache
266
+ if (self._unpublishedBuffer.size > self._limit) {
267
+ var maxBufferedId = self._unpublishedBuffer.maxElementId();
268
+ self._unpublishedBuffer.delete(maxBufferedId);
269
+ // Since something matching is removed from cache (both published set and
270
+ // buffer), set flag to false
271
+ self._safeAppendToBuffer = false;
272
+ }
273
+ //});
274
+ }
275
+ // Is called either to remove the doc completely from matching set or to move
276
+ // it to the published set later.
277
+ _removeBuffered(id) {
278
+ var self = this;
279
+ //Meteor._noYieldsAllowed(function () {
280
+ self._unpublishedBuffer.delete(id);
281
+ // To keep the contract "buffer is never empty in STEADY phase unless the
282
+ // everything matching fits into published" true, we poll everything as
283
+ // soon as we see the buffer becoming empty.
284
+ if (!self._unpublishedBuffer.size && !self._safeAppendToBuffer)
285
+ self._needToPollQuery();
286
+ //});
287
+ }
288
+ // Called when a document has joined the "Matching" results set.
289
+ // Takes responsibility of keeping _unpublishedBuffer in sync with _published
290
+ // and the effect of limit enforced.
291
+ _addMatching(doc) {
292
+ var self = this;
293
+ //Meteor._noYieldsAllowed(function () {
294
+ var id = doc._id;
295
+ if (self._published.has(id))
296
+ throw Error("tried to add something already published " + id);
297
+ if (self._limit && self._unpublishedBuffer.has(id))
298
+ throw Error("tried to add something already existed in buffer " + id);
299
+ var limit = self._limit;
300
+ var comparator = self._comparator;
301
+ var maxPublished = (limit && self._published.size > 0)
302
+ ? self._published.get(self._published.maxElementId()) // published is MaxHeap because limit is defined
303
+ : null;
304
+ var maxBuffered = (limit && self._unpublishedBuffer.size > 0)
305
+ ? self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId())
306
+ : null;
307
+ // The query is unlimited or didn't publish enough documents yet or the
308
+ // new document would fit into published set pushing the maximum element
309
+ // out, then we need to publish the doc.
310
+ var toPublish = !limit || self._published.size < limit ||
311
+ comparator(doc, maxPublished) < 0;
312
+ // Otherwise we might need to buffer it (only in case of limited query).
313
+ // Buffering is allowed if the buffer is not filled up yet and all
314
+ // matching docs are either in the published set or in the buffer.
315
+ var canAppendToBuffer = !toPublish && self._safeAppendToBuffer &&
316
+ self._unpublishedBuffer.size < limit;
317
+ // Or if it is small enough to be safely inserted to the middle or the
318
+ // beginning of the buffer.
319
+ var canInsertIntoBuffer = !toPublish && maxBuffered &&
320
+ comparator(doc, maxBuffered) <= 0;
321
+ var toBuffer = canAppendToBuffer || canInsertIntoBuffer;
322
+ if (toPublish) {
323
+ self._addPublished(id, doc);
324
+ }
325
+ else if (toBuffer) {
326
+ self._addBuffered(id, doc);
327
+ }
328
+ else {
329
+ // dropping it and not saving to the cache
330
+ self._safeAppendToBuffer = false;
331
+ }
332
+ //});
333
+ }
334
+ // Called when a document leaves the "Matching" results set.
335
+ // Takes responsibility of keeping _unpublishedBuffer in sync with _published
336
+ // and the effect of limit enforced.
337
+ _removeMatching(id) {
338
+ var self = this;
339
+ //Meteor._noYieldsAllowed(function () {
340
+ if (!self._published.has(id) && !self._limit)
341
+ throw Error("tried to remove something matching but not cached " + id);
342
+ if (self._published.has(id)) {
343
+ self._removePublished(id);
344
+ }
345
+ else if (self._unpublishedBuffer.has(id)) {
346
+ self._removeBuffered(id);
347
+ }
348
+ //});
349
+ }
350
+ _handleDoc(id, newDoc) {
351
+ var self = this;
352
+ //Meteor._noYieldsAllowed(function () {
353
+ var matchesNow = newDoc && self._matcher.documentMatches(newDoc).result;
354
+ var publishedBefore = self._published.has(id);
355
+ var bufferedBefore = self._limit && self._unpublishedBuffer.has(id);
356
+ var cachedBefore = publishedBefore || bufferedBefore;
357
+ if (matchesNow && !cachedBefore) {
358
+ self._addMatching(newDoc);
359
+ }
360
+ else if (cachedBefore && !matchesNow) {
361
+ self._removeMatching(id);
362
+ }
363
+ else if (cachedBefore && matchesNow) {
364
+ var oldDoc = self._published.get(id);
365
+ var comparator = self._comparator;
366
+ var minBuffered = self._limit && self._unpublishedBuffer.size &&
367
+ self._unpublishedBuffer.get(self._unpublishedBuffer.minElementId());
368
+ var maxBuffered;
369
+ if (publishedBefore) {
370
+ // Unlimited case where the document stays in published once it
371
+ // matches or the case when we don't have enough matching docs to
372
+ // publish or the changed but matching doc will stay in published
373
+ // anyways.
374
+ //
375
+ // XXX: We rely on the emptiness of buffer. Be sure to maintain the
376
+ // fact that buffer can't be empty if there are matching documents not
377
+ // published. Notably, we don't want to schedule repoll and continue
378
+ // relying on this property.
379
+ var staysInPublished = !self._limit ||
380
+ self._unpublishedBuffer.size === 0 ||
381
+ comparator(newDoc, minBuffered) <= 0;
382
+ if (staysInPublished) {
383
+ self._changePublished(id, oldDoc, newDoc);
384
+ }
385
+ else {
386
+ // after the change doc doesn't stay in the published, remove it
387
+ self._removePublished(id);
388
+ // but it can move into buffered now, check it
389
+ maxBuffered = self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId());
390
+ var toBuffer = self._safeAppendToBuffer ||
391
+ (maxBuffered && comparator(newDoc, maxBuffered) <= 0);
392
+ if (toBuffer) {
393
+ self._addBuffered(id, newDoc);
394
+ }
395
+ else {
396
+ // Throw away from both published set and buffer
397
+ self._safeAppendToBuffer = false;
398
+ }
399
+ }
400
+ }
401
+ else if (bufferedBefore) {
402
+ oldDoc = self._unpublishedBuffer.get(id);
403
+ // remove the old version manually instead of using _removeBuffered so
404
+ // we don't trigger the querying immediately. if we end this block
405
+ // with the buffer empty, we will need to trigger the query poll
406
+ // manually too.
407
+ self._unpublishedBuffer.delete(id);
408
+ // published is MaxHeap because bufferedBefore is only set when limit is defined
409
+ var maxPublished = self._published.get(self._published.maxElementId());
410
+ maxBuffered = self._unpublishedBuffer.size && self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId());
411
+ // the buffered doc was updated, it could move to published
412
+ var toPublish = comparator(newDoc, maxPublished) < 0;
413
+ // or stays in buffer even after the change
414
+ var staysInBuffer = (!toPublish && self._safeAppendToBuffer) ||
415
+ (!toPublish && maxBuffered &&
416
+ comparator(newDoc, maxBuffered) <= 0);
417
+ if (toPublish) {
418
+ self._addPublished(id, newDoc);
419
+ }
420
+ else if (staysInBuffer) {
421
+ // stays in buffer but changes
422
+ self._unpublishedBuffer.set(id, newDoc);
423
+ }
424
+ else {
425
+ // Throw away from both published set and buffer
426
+ self._safeAppendToBuffer = false;
427
+ // Normally this check would have been done in _removeBuffered but
428
+ // we didn't use it, so we need to do it ourself now.
429
+ if (!self._unpublishedBuffer.size) {
430
+ self._needToPollQuery();
431
+ }
432
+ }
433
+ }
434
+ else {
435
+ throw new Error("cachedBefore implies either of publishedBefore or bufferedBefore is true.");
436
+ }
437
+ }
438
+ //});
439
+ }
440
+ _fetchModifiedDocuments() {
441
+ var self = this;
442
+ //Meteor._noYieldsAllowed(function () {
443
+ self._registerPhaseChange(PHASE.FETCHING);
444
+ // Defer, because nothing called from the oplog entry handler may yield,
445
+ // but fetch() yields.
446
+ setImmediate(finishIfNeedToPollQuery(async () => {
447
+ while (!self._stopped && self._needToFetch.size > 0) {
448
+ if (self._phase === PHASE.QUERYING) {
449
+ // While fetching, we decided to go into QUERYING mode, and then we
450
+ // saw another oplog entry, so _needToFetch is not empty. But we
451
+ // shouldn't fetch these documents until AFTER the query is done.
452
+ break;
453
+ }
454
+ // Being in steady phase here would be surprising.
455
+ if (self._phase !== PHASE.FETCHING)
456
+ throw new Error("phase in fetchModifiedDocuments: " + self._phase);
457
+ self._currentlyFetching = self._needToFetch;
458
+ var thisGeneration = ++self._fetchGeneration;
459
+ self._needToFetch = new Map();
460
+ var waiting = 0;
461
+ var fut = { promise: undefined, resolve: undefined };
462
+ fut.promise = new Promise(r => fut.resolve = r);
463
+ // This loop is safe, because _currentlyFetching will not be updated
464
+ // during this loop (in fact, it is never mutated).
465
+ self._currentlyFetching.forEach((op, id) => {
466
+ waiting++;
467
+ self._mongoHandle._docFetcher.fetch(self._cursorDescription.collectionName, id, op, finishIfNeedToPollQuery((err, doc) => {
468
+ try {
469
+ if (err) {
470
+ console.log("Got exception while fetching documents", err);
471
+ // If we get an error from the fetcher (eg, trouble
472
+ // connecting to Mongo), let's just abandon the fetch phase
473
+ // altogether and fall back to polling. It's not like we're
474
+ // getting live updates anyway.
475
+ if (self._phase !== PHASE.QUERYING) {
476
+ self._needToPollQuery();
477
+ }
478
+ }
479
+ else if (!self._stopped && self._phase === PHASE.FETCHING
480
+ && self._fetchGeneration === thisGeneration) {
481
+ // We re-check the generation in case we've had an explicit
482
+ // _pollQuery call (eg, in another fiber) which should
483
+ // effectively cancel this round of fetches. (_pollQuery
484
+ // increments the generation.)
485
+ self._handleDoc(id, doc);
486
+ }
487
+ }
488
+ finally {
489
+ waiting--;
490
+ // Because fetch() never calls its callback synchronously,
491
+ // this is safe (ie, we won't call fut.return() before the
492
+ // forEach is done).
493
+ if (waiting === 0)
494
+ fut.resolve();
495
+ }
496
+ }));
497
+ });
498
+ await fut.promise;
499
+ // Exit now if we've had a _pollQuery call (here or in another fiber).
500
+ if (self._phase === PHASE.QUERYING)
501
+ return;
502
+ self._currentlyFetching = null;
503
+ }
504
+ // We're done fetching, so we can be steady, unless we've had a
505
+ // _pollQuery call (here or in another fiber).
506
+ if (self._phase !== PHASE.QUERYING)
507
+ self._beSteady();
508
+ }));
509
+ //});
510
+ }
511
+ _beSteady() {
512
+ var self = this;
513
+ //Meteor._noYieldsAllowed(function () {
514
+ self._registerPhaseChange(PHASE.STEADY);
515
+ var writes = self._writesToCommitWhenWeReachSteady;
516
+ self._writesToCommitWhenWeReachSteady = [];
517
+ self._multiplexer.onFlush(function () {
518
+ for (const w of writes) {
519
+ w.committed();
520
+ }
521
+ });
522
+ //});
523
+ }
524
+ _handleOplogEntryQuerying(op) {
525
+ var self = this;
526
+ //Meteor._noYieldsAllowed(function () {
527
+ self._needToFetch.set((0, oplog_tailing_1.idForOp)(op), op);
528
+ //});
529
+ }
530
+ _handleOplogEntrySteadyOrFetching(op) {
531
+ var self = this;
532
+ //Meteor._noYieldsAllowed(function () {
533
+ var id = (0, oplog_tailing_1.idForOp)(op);
534
+ // If we're already fetching this one, or about to, we can't optimize;
535
+ // make sure that we fetch it again if necessary.
536
+ if (self._phase === PHASE.FETCHING &&
537
+ ((self._currentlyFetching && self._currentlyFetching.has(id)) ||
538
+ self._needToFetch.has(id))) {
539
+ self._needToFetch.set(id, op);
540
+ return;
541
+ }
542
+ if (op.op === 'd') {
543
+ if (self._published.has(id) ||
544
+ (self._limit && self._unpublishedBuffer.has(id)))
545
+ self._removeMatching(id);
546
+ }
547
+ else if (op.op === 'i') {
548
+ if (self._published.has(id))
549
+ throw new Error("insert found for already-existing ID in published");
550
+ if (self._unpublishedBuffer && self._unpublishedBuffer.has(id))
551
+ throw new Error("insert found for already-existing ID in buffer");
552
+ // XXX what if selector yields? for now it can't but later it could
553
+ // have $where
554
+ if (self._matcher.documentMatches(op.o).result)
555
+ self._addMatching(op.o);
556
+ }
557
+ else if (op.op === 'u') {
558
+ // we are mapping the new oplog format on mongo 5
559
+ // to what we know better, $set
560
+ op.o = (0, oplog_v2_converter_1.oplogV2V1Converter)(op.o);
561
+ // Is this a modifier ($set/$unset, which may require us to poll the
562
+ // database to figure out if the whole document matches the selector) or
563
+ // a replacement (in which case we can just directly re-evaluate the
564
+ // selector)?
565
+ // oplog format has changed on mongodb 5, we have to support both now
566
+ // diff is the format in Mongo 5+ (oplog v2)
567
+ var isReplace = !op.o.hasOwnProperty('$set') && !op.o.hasOwnProperty('diff') && !op.o.hasOwnProperty('$unset');
568
+ // If this modifier modifies something inside an EJSON custom type (ie,
569
+ // anything with EJSON$), then we can't try to use
570
+ // LocalCollection._modify, since that just mutates the EJSON encoding,
571
+ // not the actual object.
572
+ var canDirectlyModifyDoc = !isReplace && modifierCanBeDirectlyApplied(op.o);
573
+ var publishedBefore = self._published.has(id);
574
+ var bufferedBefore = self._limit && self._unpublishedBuffer.has(id);
575
+ if (isReplace) {
576
+ self._handleDoc(id, Object.assign({ _id: id }, op.o));
577
+ }
578
+ else if ((publishedBefore || bufferedBefore) &&
579
+ canDirectlyModifyDoc) {
580
+ // Oh great, we actually know what the document is, so we can apply
581
+ // this directly.
582
+ var newDoc = self._published.has(id)
583
+ ? self._published.get(id) : self._unpublishedBuffer.get(id);
584
+ newDoc = (0, ejson_1.clone)(newDoc);
585
+ newDoc._id = id;
586
+ try {
587
+ (0, minimongo_common_1._modify)(newDoc, op.o);
588
+ }
589
+ catch (e) {
590
+ if (e.name !== "MinimongoError")
591
+ throw e;
592
+ // We didn't understand the modifier. Re-fetch.
593
+ self._needToFetch.set(id, op);
594
+ if (self._phase === PHASE.STEADY) {
595
+ self._fetchModifiedDocuments();
596
+ }
597
+ return;
598
+ }
599
+ self._handleDoc(id, self._sharedProjectionFn(newDoc));
600
+ }
601
+ else if (!canDirectlyModifyDoc ||
602
+ self._matcher.canBecomeTrueByModifier(op.o) ||
603
+ (self._sorter && self._sorter.affectedByModifier(op.o))) {
604
+ self._needToFetch.set(id, op);
605
+ if (self._phase === PHASE.STEADY)
606
+ self._fetchModifiedDocuments();
607
+ }
608
+ }
609
+ else {
610
+ throw Error("XXX SURPRISING OPERATION: " + op);
611
+ }
612
+ //});
613
+ }
614
+ // Yields!
615
+ async _runInitialQuery() {
616
+ var self = this;
617
+ if (self._stopped)
618
+ throw new Error("oplog stopped surprisingly early");
619
+ await self._runQuery({ initial: true }); // yields
620
+ if (self._stopped)
621
+ return; // can happen on queryError
622
+ // Allow observeChanges calls to return. (After this, it's possible for
623
+ // stop() to be called.)
624
+ self._multiplexer.ready();
625
+ await self._doneQuerying(); // yields
626
+ }
627
+ // In various circumstances, we may just want to stop processing the oplog and
628
+ // re-run the initial query, just as if we were a PollingObserveDriver.
629
+ //
630
+ // This function may not block, because it is called from an oplog entry
631
+ // handler.
632
+ //
633
+ // XXX We should call this when we detect that we've been in FETCHING for "too
634
+ // long".
635
+ //
636
+ // XXX We should call this when we detect Mongo failover (since that might
637
+ // mean that some of the oplog entries we have processed have been rolled
638
+ // back). The Node Mongo driver is in the middle of a bunch of huge
639
+ // refactorings, including the way that it notifies you when primary
640
+ // changes. Will put off implementing this until driver 1.4 is out.
641
+ _pollQuery() {
642
+ var self = this;
643
+ //Meteor._noYieldsAllowed(function () {
644
+ if (self._stopped)
645
+ return;
646
+ // Yay, we get to forget about all the things we thought we had to fetch.
647
+ self._needToFetch = new Map();
648
+ self._currentlyFetching = null;
649
+ ++self._fetchGeneration; // ignore any in-flight fetches
650
+ self._registerPhaseChange(PHASE.QUERYING);
651
+ // Defer so that we don't yield. We don't need finishIfNeedToPollQuery
652
+ // here because SwitchedToQuery is not thrown in QUERYING mode.
653
+ setImmediate(async () => {
654
+ await self._runQuery();
655
+ await self._doneQuerying();
656
+ });
657
+ //});
658
+ }
659
+ // Yields!
660
+ async _runQuery(options) {
661
+ var self = this;
662
+ options = options || {};
663
+ var newResults, newBuffer;
664
+ // This while loop is just to retry failures.
665
+ while (true) {
666
+ // If we've been stopped, we don't have to run anything any more.
667
+ if (self._stopped)
668
+ return;
669
+ newResults = new Map();
670
+ newBuffer = new Map();
671
+ // Query 2x documents as the half excluded from the original query will go
672
+ // into unpublished buffer to reduce additional Mongo lookups in cases
673
+ // when documents are removed from the published set and need a
674
+ // replacement.
675
+ // XXX needs more thought on non-zero skip
676
+ // XXX 2 is a "magic number" meaning there is an extra chunk of docs for
677
+ // buffer if such is needed.
678
+ var cursor = self._cursorForQuery({ limit: self._limit * 2 });
679
+ try {
680
+ await cursor.forEach((doc, i) => {
681
+ if (!self._limit || i < self._limit) {
682
+ newResults.set(doc._id, doc);
683
+ }
684
+ else {
685
+ newBuffer.set(doc._id, doc);
686
+ }
687
+ });
688
+ break;
689
+ }
690
+ catch (e) {
691
+ if (options.initial && typeof (e.code) === 'number') {
692
+ // This is an error document sent to us by mongod, not a connection
693
+ // error generated by the client. And we've never seen this query work
694
+ // successfully. Probably it's a bad selector or something, so we
695
+ // should NOT retry. Instead, we should halt the observe (which ends
696
+ // up calling `stop` on us).
697
+ self._multiplexer.queryError(e);
698
+ return;
699
+ }
700
+ // During failover (eg) if we get an exception we should log and retry
701
+ // instead of crashing.
702
+ console.log("Got exception while polling query during failover", e);
703
+ await (0, oplog_tailing_1._sleepForMs)(100);
704
+ }
705
+ }
706
+ if (self._stopped)
707
+ return;
708
+ self._publishNewResults(newResults, newBuffer);
709
+ }
710
+ // Transitions to QUERYING and runs another query, or (if already in QUERYING)
711
+ // ensures that we will query again later.
712
+ //
713
+ // This function may not block, because it is called from an oplog entry
714
+ // handler. However, if we were not already in the QUERYING phase, it throws
715
+ // an exception that is caught by the closest surrounding
716
+ // finishIfNeedToPollQuery call; this ensures that we don't continue running
717
+ // close that was designed for another phase inside PHASE.QUERYING.
718
+ //
719
+ // (It's also necessary whenever logic in this file yields to check that other
720
+ // phases haven't put us into QUERYING mode, though; eg,
721
+ // _fetchModifiedDocuments does this.)
722
+ _needToPollQuery() {
723
+ var self = this;
724
+ //Meteor._noYieldsAllowed(function () {
725
+ if (self._stopped)
726
+ return;
727
+ // If we're not already in the middle of a query, we can query now
728
+ // (possibly pausing FETCHING).
729
+ if (self._phase !== PHASE.QUERYING) {
730
+ self._pollQuery();
731
+ throw new SwitchedToQuery;
732
+ }
733
+ // We're currently in QUERYING. Set a flag to ensure that we run another
734
+ // query when we're done.
735
+ self._requeryWhenDoneThisQuery = true;
736
+ //});
737
+ }
738
+ // Yields!
739
+ async _doneQuerying() {
740
+ var self = this;
741
+ if (self._stopped)
742
+ return;
743
+ await self._mongoHandle._oplogHandle.waitUntilCaughtUp(); // yields
744
+ if (self._stopped)
745
+ return;
746
+ if (self._phase !== PHASE.QUERYING)
747
+ throw Error("Phase unexpectedly " + self._phase);
748
+ //Meteor._noYieldsAllowed(function () {
749
+ if (self._requeryWhenDoneThisQuery) {
750
+ self._requeryWhenDoneThisQuery = false;
751
+ self._pollQuery();
752
+ }
753
+ else if (self._needToFetch.size === 0) {
754
+ self._beSteady();
755
+ }
756
+ else {
757
+ self._fetchModifiedDocuments();
758
+ }
759
+ //});
760
+ }
761
+ _cursorForQuery(optionsOverwrite) {
762
+ //return Meteor._noYieldsAllowed(function () {
763
+ // The query we run is almost the same as the cursor we are observing,
764
+ // with a few changes. We need to read all the fields that are relevant to
765
+ // the selector, not just the fields we are going to publish (that's the
766
+ // "shared" projection). And we don't want to apply any transform in the
767
+ // cursor, because observeChanges shouldn't use the transform.
768
+ var options = Object.assign(Object.assign({}, this._cursorDescription.options), optionsOverwrite);
769
+ options.projection = this._sharedProjection;
770
+ delete options.transform;
771
+ // We are NOT deep cloning fields or selector here, which should be OK.
772
+ const description = new live_cursor_1.CursorDescription(this._cursorDescription.collectionName, this._cursorDescription.selector, options);
773
+ const dbCursor = this._mongoHandle.db.collection(description.collectionName).find(description.selector, options);
774
+ return new synchronous_cursor_1.SynchronousCursor(dbCursor, this._cursorDescription, { useTransform: true });
775
+ //});
776
+ }
777
+ // Replace self._published with newResults (both are IdMaps), invoking observe
778
+ // callbacks on the multiplexer.
779
+ // Replace self._unpublishedBuffer with newBuffer.
780
+ //
781
+ // XXX This is very similar to LocalCollection._diffQueryUnorderedChanges. We
782
+ // should really: (a) Unify IdMap and OrderedDict into Unordered/OrderedDict
783
+ // (b) Rewrite diff.js to use these classes instead of arrays and objects.
784
+ _publishNewResults(newResults, newBuffer) {
785
+ var self = this;
786
+ //Meteor._noYieldsAllowed(function () {
787
+ // If the query is limited and there is a buffer, shut down so it doesn't
788
+ // stay in a way.
789
+ if (self._limit) {
790
+ self._unpublishedBuffer.clear();
791
+ }
792
+ // First remove anything that's gone. Be careful not to modify
793
+ // self._published while iterating over it.
794
+ var idsToRemove = [];
795
+ self._published.forEach(function (doc, id) {
796
+ if (!newResults.has(id))
797
+ idsToRemove.push(id);
798
+ });
799
+ for (const id of idsToRemove) {
800
+ self._removePublished(id);
801
+ }
802
+ // Now do adds and changes.
803
+ // If self has a buffer and limit, the new fetched result will be
804
+ // limited correctly as the query has sort specifier.
805
+ newResults.forEach(function (doc, id) {
806
+ self._handleDoc(id, doc);
807
+ });
808
+ // Sanity-check that everything we tried to put into _published ended up
809
+ // there.
810
+ // XXX if this is slow, remove it later
811
+ if (self._published.size !== newResults.size) {
812
+ console.error('The Mongo server and the Meteor query disagree on how ' +
813
+ 'many documents match your query. Cursor description: ', self._cursorDescription);
814
+ throw Error("The Mongo server and the Meteor query disagree on how " +
815
+ "many documents match your query. Maybe it is hitting a Mongo " +
816
+ "edge case? The query is: " +
817
+ (0, ejson_1.stringify)(self._cursorDescription.selector));
818
+ }
819
+ self._published.forEach(function (doc, id) {
820
+ if (!newResults.has(id))
821
+ throw Error("_published has a doc that newResults doesn't; " + id);
822
+ });
823
+ // Finally, replace the buffer
824
+ newBuffer.forEach(function (doc, id) {
825
+ self._addBuffered(id, doc);
826
+ });
827
+ self._safeAppendToBuffer = newBuffer.size < self._limit;
828
+ //});
829
+ }
830
+ // This stop function is invoked from the onStop of the ObserveMultiplexer, so
831
+ // it shouldn't actually be possible to call it until the multiplexer is
832
+ // ready.
833
+ //
834
+ // It's important to check self._stopped after every call in this file that
835
+ // can yield!
836
+ stop() {
837
+ var self = this;
838
+ if (self._stopped)
839
+ return;
840
+ self._stopped = true;
841
+ for (const handle of self._stopHandles) {
842
+ handle.stop();
843
+ }
844
+ // Note: we *don't* use multiplexer.onFlush here because this stop
845
+ // callback is actually invoked by the multiplexer itself when it has
846
+ // determined that there are no handles left. So nothing is actually going
847
+ // to get flushed (and it's probably not valid to call methods on the
848
+ // dying multiplexer).
849
+ for (const w of self._writesToCommitWhenWeReachSteady) {
850
+ w.committed(); // maybe yields?
851
+ }
852
+ self._writesToCommitWhenWeReachSteady = null;
853
+ // Proactively drop references to potentially big things.
854
+ self._published = null;
855
+ self._unpublishedBuffer = null;
856
+ self._needToFetch = null;
857
+ self._currentlyFetching = null;
858
+ // not used?
859
+ //self._oplogEntryHandle = null;
860
+ //self._listenersHandle = null;
861
+ }
862
+ _registerPhaseChange(phase) {
863
+ var self = this;
864
+ //Meteor._noYieldsAllowed(function () {
865
+ var now = new Date;
866
+ self._phase = phase;
867
+ self._phaseStartTime = now;
868
+ //});
869
+ }
870
+ // Does our oplog tailing code support this cursor? For now, we are being very
871
+ // conservative and allowing only simple queries with simple options.
872
+ // (This is a "static method".)
873
+ static cursorSupported(cursorDescription, matcher) {
874
+ // First, check the options.
875
+ var options = cursorDescription.options;
876
+ // Did the user say no explicitly?
877
+ if (options.disableOplog)
878
+ return false;
879
+ // skip is not supported: to support it we would need to keep track of all
880
+ // "skipped" documents or at least their ids.
881
+ // limit w/o a sort specifier is not supported: current implementation needs a
882
+ // deterministic way to order documents.
883
+ if (options.skip || (options.limit && !options.sort))
884
+ return false;
885
+ // If a fields projection option is given check if it is supported by
886
+ // minimongo (some operators are not supported).
887
+ if (options.projection) {
888
+ try {
889
+ (0, minimongo_common_1._checkSupportedProjection)(options.projection);
890
+ }
891
+ catch (e) {
892
+ if (e.name === "MinimongoError") {
893
+ return false;
894
+ }
895
+ else {
896
+ throw e;
897
+ }
898
+ }
899
+ }
900
+ // We don't allow the following selectors:
901
+ // - $where (not confident that we provide the same JS environment
902
+ // as Mongo, and can yield!)
903
+ // - $near (has "interesting" properties in MongoDB, like the possibility
904
+ // of returning an ID multiple times, though even polling maybe
905
+ // have a bug there)
906
+ // XXX: once we support it, we would need to think more on how we
907
+ // initialize the comparators when we create the driver.
908
+ return !matcher.hasWhere() && !matcher.hasGeoQuery();
909
+ }
910
+ }
911
+ exports.OplogObserveDriver = OplogObserveDriver;
912
+ var modifierCanBeDirectlyApplied = function (modifier) {
913
+ return Object.entries(modifier).every(([operation, fields]) => {
914
+ return Object.entries(fields).every(([field, value]) => {
915
+ return !/EJSON\$/.test(field);
916
+ });
917
+ });
918
+ };