mongodb-livedata-server 0.0.4 → 0.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. package/{livedata_server.ts → dist/livedata_server.d.ts} +1 -1
  2. package/dist/livedata_server.js +3 -1
  3. package/dist/meteor/binary-heap/max_heap.d.ts +31 -0
  4. package/dist/meteor/binary-heap/min_heap.d.ts +6 -0
  5. package/dist/meteor/binary-heap/min_max_heap.d.ts +11 -0
  6. package/dist/meteor/callback-hook/hook.d.ts +11 -0
  7. package/dist/meteor/ddp/crossbar.d.ts +15 -0
  8. package/dist/meteor/ddp/heartbeat.d.ts +19 -0
  9. package/dist/meteor/ddp/livedata_server.d.ts +141 -0
  10. package/dist/meteor/ddp/method-invocation.d.ts +25 -0
  11. package/dist/meteor/ddp/random-stream.d.ts +8 -0
  12. package/dist/meteor/ddp/session-collection-view.d.ts +27 -0
  13. package/dist/meteor/ddp/session-document-view.d.ts +8 -0
  14. package/dist/meteor/ddp/session.d.ts +69 -0
  15. package/dist/meteor/ddp/stream_server.d.ts +21 -0
  16. package/dist/meteor/ddp/subscription.d.ts +89 -0
  17. package/dist/meteor/ddp/utils.d.ts +8 -0
  18. package/dist/meteor/ddp/writefence.d.ts +20 -0
  19. package/dist/meteor/diff-sequence/diff.d.ts +13 -0
  20. package/dist/meteor/ejson/ejson.d.ts +82 -0
  21. package/dist/meteor/ejson/stringify.d.ts +2 -0
  22. package/dist/meteor/ejson/utils.d.ts +12 -0
  23. package/dist/meteor/id-map/id_map.d.ts +16 -0
  24. package/dist/meteor/mongo/caching_change_observer.d.ts +16 -0
  25. package/dist/meteor/mongo/doc_fetcher.d.ts +7 -0
  26. package/dist/meteor/mongo/geojson_utils.d.ts +3 -0
  27. package/dist/meteor/mongo/live_connection.d.ts +27 -0
  28. package/dist/meteor/mongo/live_cursor.d.ts +25 -0
  29. package/dist/meteor/mongo/minimongo_common.d.ts +84 -0
  30. package/dist/meteor/mongo/minimongo_matcher.d.ts +22 -0
  31. package/dist/meteor/mongo/minimongo_sorter.d.ts +16 -0
  32. package/dist/meteor/mongo/observe_driver_utils.d.ts +9 -0
  33. package/dist/meteor/mongo/observe_multiplexer.d.ts +36 -0
  34. package/dist/meteor/mongo/oplog-observe-driver.d.ts +67 -0
  35. package/dist/meteor/mongo/oplog_tailing.d.ts +35 -0
  36. package/dist/meteor/mongo/oplog_v2_converter.d.ts +1 -0
  37. package/dist/meteor/mongo/polling_observe_driver.d.ts +30 -0
  38. package/dist/meteor/mongo/synchronous-cursor.d.ts +17 -0
  39. package/dist/meteor/mongo/synchronous-queue.d.ts +14 -0
  40. package/dist/meteor/ordered-dict/ordered_dict.d.ts +31 -0
  41. package/dist/meteor/random/AbstractRandomGenerator.d.ts +42 -0
  42. package/dist/meteor/random/AleaRandomGenerator.d.ts +13 -0
  43. package/dist/meteor/random/NodeRandomGenerator.d.ts +16 -0
  44. package/dist/meteor/random/createAleaGenerator.d.ts +2 -0
  45. package/dist/meteor/random/createRandom.d.ts +1 -0
  46. package/dist/meteor/random/main.d.ts +1 -0
  47. package/package.json +2 -2
  48. package/meteor/LICENSE +0 -28
  49. package/meteor/binary-heap/max_heap.ts +0 -225
  50. package/meteor/binary-heap/min_heap.ts +0 -15
  51. package/meteor/binary-heap/min_max_heap.ts +0 -53
  52. package/meteor/callback-hook/hook.ts +0 -85
  53. package/meteor/ddp/crossbar.ts +0 -148
  54. package/meteor/ddp/heartbeat.ts +0 -97
  55. package/meteor/ddp/livedata_server.ts +0 -474
  56. package/meteor/ddp/method-invocation.ts +0 -86
  57. package/meteor/ddp/random-stream.ts +0 -102
  58. package/meteor/ddp/session-collection-view.ts +0 -119
  59. package/meteor/ddp/session-document-view.ts +0 -92
  60. package/meteor/ddp/session.ts +0 -708
  61. package/meteor/ddp/stream_server.ts +0 -204
  62. package/meteor/ddp/subscription.ts +0 -392
  63. package/meteor/ddp/utils.ts +0 -119
  64. package/meteor/ddp/writefence.ts +0 -130
  65. package/meteor/diff-sequence/diff.ts +0 -295
  66. package/meteor/ejson/ejson.ts +0 -601
  67. package/meteor/ejson/stringify.ts +0 -122
  68. package/meteor/ejson/utils.ts +0 -38
  69. package/meteor/id-map/id_map.ts +0 -84
  70. package/meteor/mongo/caching_change_observer.ts +0 -120
  71. package/meteor/mongo/doc_fetcher.ts +0 -52
  72. package/meteor/mongo/geojson_utils.ts +0 -42
  73. package/meteor/mongo/live_connection.ts +0 -302
  74. package/meteor/mongo/live_cursor.ts +0 -79
  75. package/meteor/mongo/minimongo_common.ts +0 -2440
  76. package/meteor/mongo/minimongo_matcher.ts +0 -275
  77. package/meteor/mongo/minimongo_sorter.ts +0 -331
  78. package/meteor/mongo/observe_driver_utils.ts +0 -79
  79. package/meteor/mongo/observe_multiplexer.ts +0 -256
  80. package/meteor/mongo/oplog-observe-driver.ts +0 -1049
  81. package/meteor/mongo/oplog_tailing.ts +0 -414
  82. package/meteor/mongo/oplog_v2_converter.ts +0 -124
  83. package/meteor/mongo/polling_observe_driver.ts +0 -247
  84. package/meteor/mongo/synchronous-cursor.ts +0 -293
  85. package/meteor/mongo/synchronous-queue.ts +0 -119
  86. package/meteor/ordered-dict/ordered_dict.ts +0 -229
  87. package/meteor/random/AbstractRandomGenerator.ts +0 -99
  88. package/meteor/random/AleaRandomGenerator.ts +0 -96
  89. package/meteor/random/NodeRandomGenerator.ts +0 -37
  90. package/meteor/random/createAleaGenerator.ts +0 -31
  91. package/meteor/random/createRandom.ts +0 -19
  92. package/meteor/random/main.ts +0 -8
  93. package/tsconfig.json +0 -10
@@ -1,1049 +0,0 @@
1
- import { LiveMongoConnection } from "./live_connection";
2
- import { MinimongoMatcher } from "./minimongo_matcher";
3
- import MinimongoSorter from "./minimongo_sorter";
4
- import { forEachTrigger, listenAll } from "./observe_driver_utils";
5
- import { ObserveMultiplexer } from "./observe_multiplexer";
6
- import { oplogV2V1Converter } from "./oplog_v2_converter";
7
- import { CursorDescription, LiveCursor } from "./live_cursor";
8
- import { _WriteFence } from "../ddp/writefence";
9
- import { DiffSequence } from "../diff-sequence/diff";
10
- import { MinMaxHeap } from "../binary-heap/min_max_heap";
11
- import { MaxHeap } from "../binary-heap/max_heap";
12
- import { _checkSupportedProjection, _compileProjection, _modify } from "./minimongo_common";
13
- import { clone, equals, stringify } from "../ejson/ejson";
14
- import { idForOp, _sleepForMs } from "./oplog_tailing";
15
- import { IdMap } from "../id-map/id_map";
16
- import { SynchronousCursor } from "./synchronous-cursor";
17
-
18
- enum PHASE {
19
- QUERYING = "QUERYING",
20
- FETCHING = "FETCHING",
21
- STEADY = "STEADY"
22
- }
23
-
24
- // Exception thrown by _needToPollQuery which unrolls the stack up to the
25
- // enclosing call to finishIfNeedToPollQuery.
26
- var SwitchedToQuery = function () { };
27
- var finishIfNeedToPollQuery = function (f) {
28
- return function () {
29
- try {
30
- f.apply(this, arguments);
31
- } catch (e) {
32
- if (!(e instanceof SwitchedToQuery))
33
- throw e;
34
- }
35
- };
36
- };
37
-
38
- interface OplogObserveDriverOptions {
39
- cursorDescription: CursorDescription<any>;
40
- mongoHandle: LiveMongoConnection;
41
- ordered: boolean;
42
- multiplexer: ObserveMultiplexer;
43
- matcher: MinimongoMatcher;
44
- sorter: MinimongoSorter;
45
- }
46
-
47
- var currentId = 0;
48
-
49
- // OplogObserveDriver is an alternative to PollingObserveDriver which follows
50
- // the Mongo operation log instead of just re-polling the query. It obeys the
51
- // same simple interface: constructing it starts sending observeChanges
52
- // callbacks (and a ready() invocation) to the ObserveMultiplexer, and you stop
53
- // it by calling the stop() method.
54
- export class OplogObserveDriver {
55
- private _usesOplog: boolean;
56
- private _id: number;
57
- private _cursorDescription: CursorDescription<any>;
58
- private _mongoHandle: LiveMongoConnection;
59
- private _multiplexer: ObserveMultiplexer;
60
- private _safeAppendToBuffer: boolean;
61
- private _stopped: boolean;
62
- private _stopHandles: any[];
63
- private _matcher: MinimongoMatcher;
64
- private _projectionFn: any;
65
- private _sharedProjection: any;
66
- private _sharedProjectionFn: any;
67
- private _limit: any;
68
- private _comparator: (a: any, b: any) => number;
69
- private _sorter: MinimongoSorter;
70
- private _unpublishedBuffer: MinMaxHeap;
71
- private _published: MaxHeap | IdMap;
72
- private _needToFetch: Map<any, any>;
73
- private _currentlyFetching: any;
74
- private _fetchGeneration: number;
75
- private _requeryWhenDoneThisQuery: boolean;
76
- private _writesToCommitWhenWeReachSteady: any[];
77
- private _phase: PHASE;
78
- private _phaseStartTime: Date;
79
-
80
- constructor(options: OplogObserveDriverOptions) {
81
- var self = this;
82
- self._usesOplog = true; // tests look at this
83
-
84
- self._id = currentId;
85
- currentId++;
86
-
87
- self._cursorDescription = options.cursorDescription;
88
- self._mongoHandle = options.mongoHandle;
89
- self._multiplexer = options.multiplexer;
90
-
91
- if (options.ordered) {
92
- throw Error("OplogObserveDriver only supports unordered observeChanges");
93
- }
94
-
95
- var sorter = options.sorter;
96
- // We don't support $near and other geo-queries so it's OK to initialize the
97
- // comparator only once in the constructor.
98
- var comparator = sorter && sorter.getComparator();
99
-
100
- if (options.cursorDescription.options.limit) {
101
- // There are several properties ordered driver implements:
102
- // - _limit is a positive number
103
- // - _comparator is a function-comparator by which the query is ordered
104
- // - _unpublishedBuffer is non-null Min/Max Heap,
105
- // the empty buffer in STEADY phase implies that the
106
- // everything that matches the queries selector fits
107
- // into published set.
108
- // - _published - Max Heap (also implements IdMap methods)
109
-
110
- var heapOptions = { IdMap: Map };
111
- self._limit = self._cursorDescription.options.limit;
112
- self._comparator = comparator;
113
- self._sorter = sorter;
114
- self._unpublishedBuffer = new MinMaxHeap(comparator, heapOptions);
115
- // We need something that can find Max value in addition to IdMap interface
116
- self._published = new MaxHeap(comparator, heapOptions);
117
- } else {
118
- self._limit = 0;
119
- self._comparator = null;
120
- self._sorter = null;
121
- self._unpublishedBuffer = null;
122
- self._published = new IdMap();
123
- }
124
-
125
- // Indicates if it is safe to insert a new document at the end of the buffer
126
- // for this query. i.e. it is known that there are no documents matching the
127
- // selector those are not in published or buffer.
128
- self._safeAppendToBuffer = false;
129
-
130
- self._stopped = false;
131
- self._stopHandles = [];
132
-
133
- self._registerPhaseChange(PHASE.QUERYING);
134
-
135
- self._matcher = options.matcher;
136
- // we are now using projection, not fields in the cursor description even if you pass {fields}
137
- // in the cursor construction
138
- var projection = self._cursorDescription.options.projection || {};
139
- self._projectionFn = _compileProjection(projection);
140
- // Projection function, result of combining important fields for selector and
141
- // existing fields projection
142
- self._sharedProjection = self._matcher.combineIntoProjection(projection);
143
- if (sorter)
144
- self._sharedProjection = sorter.combineIntoProjection(self._sharedProjection);
145
- self._sharedProjectionFn = _compileProjection(self._sharedProjection);
146
-
147
- self._needToFetch = new Map();
148
- self._currentlyFetching = null;
149
- self._fetchGeneration = 0;
150
-
151
- self._requeryWhenDoneThisQuery = false;
152
- self._writesToCommitWhenWeReachSteady = [];
153
-
154
- // If the oplog handle tells us that it skipped some entries (because it got
155
- // behind, say), re-poll.
156
- self._stopHandles.push(self._mongoHandle._oplogHandle.onSkippedEntries(
157
- finishIfNeedToPollQuery(function () {
158
- self._needToPollQuery();
159
- })
160
- ));
161
-
162
- forEachTrigger(self._cursorDescription, function (trigger) {
163
- self._stopHandles.push(/*async*/self._mongoHandle._oplogHandle.onOplogEntry(
164
- trigger, function (notification) {
165
- //Meteor._noYieldsAllowed(finishIfNeedToPollQuery(function () {
166
- var op = notification.op;
167
- if (notification.dropCollection || notification.dropDatabase) {
168
- // Note: this call is not allowed to block on anything (especially
169
- // on waiting for oplog entries to catch up) because that will block
170
- // onOplogEntry!
171
- self._needToPollQuery();
172
- } else {
173
- // All other operators should be handled depending on phase
174
- if (self._phase === PHASE.QUERYING) {
175
- self._handleOplogEntryQuerying(op);
176
- } else {
177
- self._handleOplogEntrySteadyOrFetching(op);
178
- }
179
- }
180
- //}));
181
- }
182
- ));
183
- });
184
-
185
- // XXX ordering w.r.t. everything else?
186
- self._stopHandles.push(listenAll(
187
- self._cursorDescription, function (notification) {
188
- // If we're not in a pre-fire write fence, we don't have to do anything.
189
- var fence = _WriteFence._CurrentWriteFence;
190
- if (!fence || fence.fired)
191
- return;
192
-
193
- if (fence._oplogObserveDrivers) {
194
- fence._oplogObserveDrivers[self._id] = self;
195
- return;
196
- }
197
-
198
- fence._oplogObserveDrivers = {};
199
- fence._oplogObserveDrivers[self._id] = self;
200
-
201
- fence.onBeforeFire(async () => {
202
- var drivers = fence._oplogObserveDrivers;
203
- delete fence._oplogObserveDrivers;
204
-
205
- // This fence cannot fire until we've caught up to "this point" in the
206
- // oplog, and all observers made it back to the steady state.
207
- await self._mongoHandle._oplogHandle.waitUntilCaughtUp();
208
-
209
- for (const driver of Object.values(drivers)) {
210
- if (driver._stopped)
211
- return;
212
-
213
- var write = fence.beginWrite();
214
- if (driver._phase === PHASE.STEADY) {
215
- // Make sure that all of the callbacks have made it through the
216
- // multiplexer and been delivered to ObserveHandles before committing
217
- // writes.
218
- driver._multiplexer.onFlush(function () {
219
- write.committed();
220
- });
221
- } else {
222
- driver._writesToCommitWhenWeReachSteady.push(write);
223
- }
224
- }
225
- });
226
- }
227
- ));
228
-
229
- // When Mongo fails over, we need to repoll the query, in case we processed an
230
- // oplog entry that got rolled back.
231
- self._stopHandles.push(self._mongoHandle._onFailover(finishIfNeedToPollQuery(
232
- function () {
233
- self._needToPollQuery();
234
- })));
235
-
236
- // Give _observeChanges a chance to add the new ObserveHandle to our
237
- // multiplexer, so that the added calls get streamed.
238
- setImmediate(finishIfNeedToPollQuery(function () {
239
- self._runInitialQuery();
240
- }));
241
- }
242
-
243
- _addPublished(id, doc) {
244
- var self = this;
245
- //Meteor._noYieldsAllowed(function () {
246
- var fields = { ...doc };
247
- delete fields._id;
248
- self._published.set(id, self._sharedProjectionFn(doc));
249
- self._multiplexer.added(id, self._projectionFn(fields));
250
-
251
- // After adding this document, the published set might be overflowed
252
- // (exceeding capacity specified by limit). If so, push the maximum
253
- // element to the buffer, we might want to save it in memory to reduce the
254
- // amount of Mongo lookups in the future.
255
- if (self._limit && self._published.size() > self._limit) {
256
- // XXX in theory the size of published is no more than limit+1
257
- if (self._published.size() !== self._limit + 1) {
258
- throw new Error("After adding to published, " +
259
- (self._published.size() - self._limit) +
260
- " documents are overflowing the set");
261
- }
262
-
263
- var overflowingDocId = (self._published as MaxHeap).maxElementId();
264
- var overflowingDoc = self._published.get(overflowingDocId);
265
-
266
- if (equals(overflowingDocId, id)) {
267
- throw new Error("The document just added is overflowing the published set");
268
- }
269
-
270
- self._published.remove(overflowingDocId);
271
- self._multiplexer.removed(overflowingDocId);
272
- self._addBuffered(overflowingDocId, overflowingDoc);
273
- }
274
- //});
275
- }
276
- _removePublished(id) {
277
- var self = this;
278
- //Meteor._noYieldsAllowed(function () {
279
- self._published.remove(id);
280
- self._multiplexer.removed(id);
281
- if (!self._limit || self._published.size() === self._limit)
282
- return;
283
-
284
- if (self._published.size() > self._limit)
285
- throw Error("self._published got too big");
286
-
287
- // OK, we are publishing less than the limit. Maybe we should look in the
288
- // buffer to find the next element past what we were publishing before.
289
-
290
- if (!self._unpublishedBuffer.empty()) {
291
- // There's something in the buffer; move the first thing in it to
292
- // _published.
293
- var newDocId = self._unpublishedBuffer.minElementId();
294
- var newDoc = self._unpublishedBuffer.get(newDocId);
295
- self._removeBuffered(newDocId);
296
- self._addPublished(newDocId, newDoc);
297
- return;
298
- }
299
-
300
- // There's nothing in the buffer. This could mean one of a few things.
301
-
302
- // (a) We could be in the middle of re-running the query (specifically, we
303
- // could be in _publishNewResults). In that case, _unpublishedBuffer is
304
- // empty because we clear it at the beginning of _publishNewResults. In
305
- // this case, our caller already knows the entire answer to the query and
306
- // we don't need to do anything fancy here. Just return.
307
- if (self._phase === PHASE.QUERYING)
308
- return;
309
-
310
- // (b) We're pretty confident that the union of _published and
311
- // _unpublishedBuffer contain all documents that match selector. Because
312
- // _unpublishedBuffer is empty, that means we're confident that _published
313
- // contains all documents that match selector. So we have nothing to do.
314
- if (self._safeAppendToBuffer)
315
- return;
316
-
317
- // (c) Maybe there are other documents out there that should be in our
318
- // buffer. But in that case, when we emptied _unpublishedBuffer in
319
- // _removeBuffered, we should have called _needToPollQuery, which will
320
- // either put something in _unpublishedBuffer or set _safeAppendToBuffer
321
- // (or both), and it will put us in QUERYING for that whole time. So in
322
- // fact, we shouldn't be able to get here.
323
-
324
- throw new Error("Buffer inexplicably empty");
325
- //});
326
- }
327
- _changePublished(id, oldDoc, newDoc) {
328
- var self = this;
329
- //Meteor._noYieldsAllowed(function () {
330
- self._published.set(id, self._sharedProjectionFn(newDoc));
331
- var projectedNew = self._projectionFn(newDoc);
332
- var projectedOld = self._projectionFn(oldDoc);
333
- var changed = DiffSequence.makeChangedFields(projectedNew, projectedOld);
334
- if (Object.keys(changed).length === 0)
335
- self._multiplexer.changed(id, changed);
336
- //});
337
- }
338
- _addBuffered(id, doc) {
339
- var self = this;
340
- //Meteor._noYieldsAllowed(function () {
341
- self._unpublishedBuffer.set(id, self._sharedProjectionFn(doc));
342
-
343
- // If something is overflowing the buffer, we just remove it from cache
344
- if (self._unpublishedBuffer.size() > self._limit) {
345
- var maxBufferedId = self._unpublishedBuffer.maxElementId();
346
-
347
- self._unpublishedBuffer.remove(maxBufferedId);
348
-
349
- // Since something matching is removed from cache (both published set and
350
- // buffer), set flag to false
351
- self._safeAppendToBuffer = false;
352
- }
353
- //});
354
- }
355
- // Is called either to remove the doc completely from matching set or to move
356
- // it to the published set later.
357
- _removeBuffered(id) {
358
- var self = this;
359
- //Meteor._noYieldsAllowed(function () {
360
- self._unpublishedBuffer.remove(id);
361
- // To keep the contract "buffer is never empty in STEADY phase unless the
362
- // everything matching fits into published" true, we poll everything as
363
- // soon as we see the buffer becoming empty.
364
- if (!self._unpublishedBuffer.size() && !self._safeAppendToBuffer)
365
- self._needToPollQuery();
366
- //});
367
- }
368
- // Called when a document has joined the "Matching" results set.
369
- // Takes responsibility of keeping _unpublishedBuffer in sync with _published
370
- // and the effect of limit enforced.
371
- _addMatching(doc) {
372
- var self = this;
373
- //Meteor._noYieldsAllowed(function () {
374
- var id = doc._id;
375
- if (self._published.has(id))
376
- throw Error("tried to add something already published " + id);
377
- if (self._limit && self._unpublishedBuffer.has(id))
378
- throw Error("tried to add something already existed in buffer " + id);
379
-
380
- var limit = self._limit;
381
- var comparator = self._comparator;
382
- var maxPublished = (limit && self._published.size() > 0)
383
- ? self._published.get((self._published as MaxHeap).maxElementId()) // published is MaxHeap because limit is defined
384
- : null;
385
- var maxBuffered = (limit && self._unpublishedBuffer.size() > 0)
386
- ? self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId())
387
- : null;
388
- // The query is unlimited or didn't publish enough documents yet or the
389
- // new document would fit into published set pushing the maximum element
390
- // out, then we need to publish the doc.
391
- var toPublish = !limit || self._published.size() < limit ||
392
- comparator(doc, maxPublished) < 0;
393
-
394
- // Otherwise we might need to buffer it (only in case of limited query).
395
- // Buffering is allowed if the buffer is not filled up yet and all
396
- // matching docs are either in the published set or in the buffer.
397
- var canAppendToBuffer = !toPublish && self._safeAppendToBuffer &&
398
- self._unpublishedBuffer.size() < limit;
399
-
400
- // Or if it is small enough to be safely inserted to the middle or the
401
- // beginning of the buffer.
402
- var canInsertIntoBuffer = !toPublish && maxBuffered &&
403
- comparator(doc, maxBuffered) <= 0;
404
-
405
- var toBuffer = canAppendToBuffer || canInsertIntoBuffer;
406
-
407
- if (toPublish) {
408
- self._addPublished(id, doc);
409
- } else if (toBuffer) {
410
- self._addBuffered(id, doc);
411
- } else {
412
- // dropping it and not saving to the cache
413
- self._safeAppendToBuffer = false;
414
- }
415
- //});
416
- }
417
- // Called when a document leaves the "Matching" results set.
418
- // Takes responsibility of keeping _unpublishedBuffer in sync with _published
419
- // and the effect of limit enforced.
420
- _removeMatching(id) {
421
- var self = this;
422
- //Meteor._noYieldsAllowed(function () {
423
- if (!self._published.has(id) && !self._limit)
424
- throw Error("tried to remove something matching but not cached " + id);
425
-
426
- if (self._published.has(id)) {
427
- self._removePublished(id);
428
- } else if (self._unpublishedBuffer.has(id)) {
429
- self._removeBuffered(id);
430
- }
431
- //});
432
- }
433
- _handleDoc(id, newDoc) {
434
- var self = this;
435
- //Meteor._noYieldsAllowed(function () {
436
- var matchesNow = newDoc && self._matcher.documentMatches(newDoc).result;
437
-
438
- var publishedBefore = self._published.has(id);
439
- var bufferedBefore = self._limit && self._unpublishedBuffer.has(id);
440
- var cachedBefore = publishedBefore || bufferedBefore;
441
-
442
- if (matchesNow && !cachedBefore) {
443
- self._addMatching(newDoc);
444
- } else if (cachedBefore && !matchesNow) {
445
- self._removeMatching(id);
446
- } else if (cachedBefore && matchesNow) {
447
- var oldDoc = self._published.get(id);
448
- var comparator = self._comparator;
449
- var minBuffered = self._limit && self._unpublishedBuffer.size() &&
450
- self._unpublishedBuffer.get(self._unpublishedBuffer.minElementId());
451
- var maxBuffered;
452
-
453
- if (publishedBefore) {
454
- // Unlimited case where the document stays in published once it
455
- // matches or the case when we don't have enough matching docs to
456
- // publish or the changed but matching doc will stay in published
457
- // anyways.
458
- //
459
- // XXX: We rely on the emptiness of buffer. Be sure to maintain the
460
- // fact that buffer can't be empty if there are matching documents not
461
- // published. Notably, we don't want to schedule repoll and continue
462
- // relying on this property.
463
- var staysInPublished = !self._limit ||
464
- self._unpublishedBuffer.size() === 0 ||
465
- comparator(newDoc, minBuffered) <= 0;
466
-
467
- if (staysInPublished) {
468
- self._changePublished(id, oldDoc, newDoc);
469
- } else {
470
- // after the change doc doesn't stay in the published, remove it
471
- self._removePublished(id);
472
- // but it can move into buffered now, check it
473
- maxBuffered = self._unpublishedBuffer.get(
474
- self._unpublishedBuffer.maxElementId());
475
-
476
- var toBuffer = self._safeAppendToBuffer ||
477
- (maxBuffered && comparator(newDoc, maxBuffered) <= 0);
478
-
479
- if (toBuffer) {
480
- self._addBuffered(id, newDoc);
481
- } else {
482
- // Throw away from both published set and buffer
483
- self._safeAppendToBuffer = false;
484
- }
485
- }
486
- } else if (bufferedBefore) {
487
- oldDoc = self._unpublishedBuffer.get(id);
488
- // remove the old version manually instead of using _removeBuffered so
489
- // we don't trigger the querying immediately. if we end this block
490
- // with the buffer empty, we will need to trigger the query poll
491
- // manually too.
492
- self._unpublishedBuffer.remove(id);
493
-
494
- // published is MaxHeap because bufferedBefore is only set when limit is defined
495
- var maxPublished = self._published.get((self._published as MaxHeap).maxElementId());
496
- maxBuffered = self._unpublishedBuffer.size() && self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId());
497
-
498
- // the buffered doc was updated, it could move to published
499
- var toPublish = comparator(newDoc, maxPublished) < 0;
500
-
501
- // or stays in buffer even after the change
502
- var staysInBuffer = (!toPublish && self._safeAppendToBuffer) ||
503
- (!toPublish && maxBuffered &&
504
- comparator(newDoc, maxBuffered) <= 0);
505
-
506
- if (toPublish) {
507
- self._addPublished(id, newDoc);
508
- } else if (staysInBuffer) {
509
- // stays in buffer but changes
510
- self._unpublishedBuffer.set(id, newDoc);
511
- } else {
512
- // Throw away from both published set and buffer
513
- self._safeAppendToBuffer = false;
514
- // Normally this check would have been done in _removeBuffered but
515
- // we didn't use it, so we need to do it ourself now.
516
- if (!self._unpublishedBuffer.size()) {
517
- self._needToPollQuery();
518
- }
519
- }
520
- } else {
521
- throw new Error("cachedBefore implies either of publishedBefore or bufferedBefore is true.");
522
- }
523
- }
524
- //});
525
- }
526
- _fetchModifiedDocuments() {
527
- var self = this;
528
- //Meteor._noYieldsAllowed(function () {
529
- self._registerPhaseChange(PHASE.FETCHING);
530
- // Defer, because nothing called from the oplog entry handler may yield,
531
- // but fetch() yields.
532
- setImmediate(finishIfNeedToPollQuery(async () => {
533
- while (!self._stopped && self._needToFetch.size > 0) {
534
- if (self._phase === PHASE.QUERYING) {
535
- // While fetching, we decided to go into QUERYING mode, and then we
536
- // saw another oplog entry, so _needToFetch is not empty. But we
537
- // shouldn't fetch these documents until AFTER the query is done.
538
- break;
539
- }
540
-
541
- // Being in steady phase here would be surprising.
542
- if (self._phase !== PHASE.FETCHING)
543
- throw new Error("phase in fetchModifiedDocuments: " + self._phase);
544
-
545
- self._currentlyFetching = self._needToFetch;
546
- var thisGeneration = ++self._fetchGeneration;
547
- self._needToFetch = new Map();
548
- var waiting = 0;
549
- var fut = { promise: undefined, resolve: undefined };
550
- fut.promise = new Promise(r => fut.resolve = r);
551
- // This loop is safe, because _currentlyFetching will not be updated
552
- // during this loop (in fact, it is never mutated).
553
- self._currentlyFetching.forEach((op, id) => {
554
- waiting++;
555
- self._mongoHandle._docFetcher.fetch(
556
- self._cursorDescription.collectionName, id, op,
557
- finishIfNeedToPollQuery((err, doc) => {
558
- try {
559
- if (err) {
560
- console.log("Got exception while fetching documents", err);
561
- // If we get an error from the fetcher (eg, trouble
562
- // connecting to Mongo), let's just abandon the fetch phase
563
- // altogether and fall back to polling. It's not like we're
564
- // getting live updates anyway.
565
- if (self._phase !== PHASE.QUERYING) {
566
- self._needToPollQuery();
567
- }
568
- } else if (!self._stopped && self._phase === PHASE.FETCHING
569
- && self._fetchGeneration === thisGeneration) {
570
- // We re-check the generation in case we've had an explicit
571
- // _pollQuery call (eg, in another fiber) which should
572
- // effectively cancel this round of fetches. (_pollQuery
573
- // increments the generation.)
574
- self._handleDoc(id, doc);
575
- }
576
- } finally {
577
- waiting--;
578
- // Because fetch() never calls its callback synchronously,
579
- // this is safe (ie, we won't call fut.return() before the
580
- // forEach is done).
581
- if (waiting === 0)
582
- fut.resolve();
583
- }
584
- }));
585
- });
586
- await fut.promise();
587
- // Exit now if we've had a _pollQuery call (here or in another fiber).
588
- if ((self._phase as PHASE) === PHASE.QUERYING)
589
- return;
590
- self._currentlyFetching = null;
591
- }
592
- // We're done fetching, so we can be steady, unless we've had a
593
- // _pollQuery call (here or in another fiber).
594
- if (self._phase !== PHASE.QUERYING)
595
- self._beSteady();
596
- }));
597
- //});
598
- }
599
-
600
- _beSteady() {
601
- var self = this;
602
- //Meteor._noYieldsAllowed(function () {
603
- self._registerPhaseChange(PHASE.STEADY);
604
- var writes = self._writesToCommitWhenWeReachSteady;
605
- self._writesToCommitWhenWeReachSteady = [];
606
- self._multiplexer.onFlush(function () {
607
- for (const w of writes) {
608
- w.committed();
609
- }
610
- });
611
- //});
612
- }
613
- _handleOplogEntryQuerying(op) {
614
- var self = this;
615
- //Meteor._noYieldsAllowed(function () {
616
- self._needToFetch.set(idForOp(op), op);
617
- //});
618
- }
619
- _handleOplogEntrySteadyOrFetching(op) {
620
- var self = this;
621
- //Meteor._noYieldsAllowed(function () {
622
- var id = idForOp(op);
623
- // If we're already fetching this one, or about to, we can't optimize;
624
- // make sure that we fetch it again if necessary.
625
- if (self._phase === PHASE.FETCHING &&
626
- ((self._currentlyFetching && self._currentlyFetching.has(id)) ||
627
- self._needToFetch.has(id))) {
628
- self._needToFetch.set(id, op);
629
- return;
630
- }
631
-
632
- if (op.op === 'd') {
633
- if (self._published.has(id) ||
634
- (self._limit && self._unpublishedBuffer.has(id)))
635
- self._removeMatching(id);
636
- } else if (op.op === 'i') {
637
- if (self._published.has(id))
638
- throw new Error("insert found for already-existing ID in published");
639
- if (self._unpublishedBuffer && self._unpublishedBuffer.has(id))
640
- throw new Error("insert found for already-existing ID in buffer");
641
-
642
- // XXX what if selector yields? for now it can't but later it could
643
- // have $where
644
- if (self._matcher.documentMatches(op.o).result)
645
- self._addMatching(op.o);
646
- } else if (op.op === 'u') {
647
- // we are mapping the new oplog format on mongo 5
648
- // to what we know better, $set
649
- op.o = oplogV2V1Converter(op.o)
650
- // Is this a modifier ($set/$unset, which may require us to poll the
651
- // database to figure out if the whole document matches the selector) or
652
- // a replacement (in which case we can just directly re-evaluate the
653
- // selector)?
654
- // oplog format has changed on mongodb 5, we have to support both now
655
- // diff is the format in Mongo 5+ (oplog v2)
656
- var isReplace = !op.o.hasOwnProperty('$set') && !op.o.hasOwnProperty('diff') && !op.o.hasOwnProperty('$unset');
657
- // If this modifier modifies something inside an EJSON custom type (ie,
658
- // anything with EJSON$), then we can't try to use
659
- // LocalCollection._modify, since that just mutates the EJSON encoding,
660
- // not the actual object.
661
- var canDirectlyModifyDoc =
662
- !isReplace && modifierCanBeDirectlyApplied(op.o);
663
-
664
- var publishedBefore = self._published.has(id);
665
- var bufferedBefore = self._limit && self._unpublishedBuffer.has(id);
666
-
667
- if (isReplace) {
668
- self._handleDoc(id, Object.assign({ _id: id }, op.o));
669
- } else if ((publishedBefore || bufferedBefore) &&
670
- canDirectlyModifyDoc) {
671
- // Oh great, we actually know what the document is, so we can apply
672
- // this directly.
673
- var newDoc = self._published.has(id)
674
- ? self._published.get(id) : self._unpublishedBuffer.get(id);
675
- newDoc = clone(newDoc);
676
-
677
- newDoc._id = id;
678
- try {
679
- _modify(newDoc, op.o);
680
- } catch (e) {
681
- if (e.name !== "MinimongoError")
682
- throw e;
683
- // We didn't understand the modifier. Re-fetch.
684
- self._needToFetch.set(id, op);
685
- if (self._phase === PHASE.STEADY) {
686
- self._fetchModifiedDocuments();
687
- }
688
- return;
689
- }
690
- self._handleDoc(id, self._sharedProjectionFn(newDoc));
691
- } else if (!canDirectlyModifyDoc ||
692
- self._matcher.canBecomeTrueByModifier(op.o) ||
693
- (self._sorter && self._sorter.affectedByModifier(op.o))) {
694
- self._needToFetch.set(id, op);
695
- if (self._phase === PHASE.STEADY)
696
- self._fetchModifiedDocuments();
697
- }
698
- } else {
699
- throw Error("XXX SURPRISING OPERATION: " + op);
700
- }
701
- //});
702
- }
703
- // Yields!
704
- _runInitialQuery() {
705
- var self = this;
706
- if (self._stopped)
707
- throw new Error("oplog stopped surprisingly early");
708
-
709
- self._runQuery({ initial: true }); // yields
710
-
711
- if (self._stopped)
712
- return; // can happen on queryError
713
-
714
- // Allow observeChanges calls to return. (After this, it's possible for
715
- // stop() to be called.)
716
- self._multiplexer.ready();
717
-
718
- self._doneQuerying(); // yields
719
- }
720
-
721
- // In various circumstances, we may just want to stop processing the oplog and
722
- // re-run the initial query, just as if we were a PollingObserveDriver.
723
- //
724
- // This function may not block, because it is called from an oplog entry
725
- // handler.
726
- //
727
- // XXX We should call this when we detect that we've been in FETCHING for "too
728
- // long".
729
- //
730
- // XXX We should call this when we detect Mongo failover (since that might
731
- // mean that some of the oplog entries we have processed have been rolled
732
- // back). The Node Mongo driver is in the middle of a bunch of huge
733
- // refactorings, including the way that it notifies you when primary
734
- // changes. Will put off implementing this until driver 1.4 is out.
735
- _pollQuery() {
736
- var self = this;
737
- //Meteor._noYieldsAllowed(function () {
738
- if (self._stopped)
739
- return;
740
-
741
- // Yay, we get to forget about all the things we thought we had to fetch.
742
- self._needToFetch = new Map();
743
- self._currentlyFetching = null;
744
- ++self._fetchGeneration; // ignore any in-flight fetches
745
- self._registerPhaseChange(PHASE.QUERYING);
746
-
747
- // Defer so that we don't yield. We don't need finishIfNeedToPollQuery
748
- // here because SwitchedToQuery is not thrown in QUERYING mode.
749
- setImmediate(async () => {
750
- await self._runQuery();
751
- await self._doneQuerying();
752
- });
753
- //});
754
- }
755
-
756
- // Yields!
757
- async _runQuery(options?) {
758
- var self = this;
759
- options = options || {};
760
- var newResults: IdMap, newBuffer: IdMap;
761
-
762
- // This while loop is just to retry failures.
763
- while (true) {
764
- // If we've been stopped, we don't have to run anything any more.
765
- if (self._stopped)
766
- return;
767
-
768
- newResults = new IdMap();
769
- newBuffer = new IdMap();
770
-
771
- // Query 2x documents as the half excluded from the original query will go
772
- // into unpublished buffer to reduce additional Mongo lookups in cases
773
- // when documents are removed from the published set and need a
774
- // replacement.
775
- // XXX needs more thought on non-zero skip
776
- // XXX 2 is a "magic number" meaning there is an extra chunk of docs for
777
- // buffer if such is needed.
778
- var cursor = self._cursorForQuery({ limit: self._limit * 2 });
779
- try {
780
- await cursor.forEach((doc: { _id: string }, i: number) => { // yields
781
- if (!self._limit || i < self._limit) {
782
- newResults.set(doc._id, doc);
783
- } else {
784
- newBuffer.set(doc._id, doc);
785
- }
786
- });
787
- break;
788
- } catch (e) {
789
- if (options.initial && typeof (e.code) === 'number') {
790
- // This is an error document sent to us by mongod, not a connection
791
- // error generated by the client. And we've never seen this query work
792
- // successfully. Probably it's a bad selector or something, so we
793
- // should NOT retry. Instead, we should halt the observe (which ends
794
- // up calling `stop` on us).
795
- self._multiplexer.queryError(e);
796
- return;
797
- }
798
-
799
- // During failover (eg) if we get an exception we should log and retry
800
- // instead of crashing.
801
- console.log("Got exception while polling query during failover", e);
802
- await _sleepForMs(100);
803
- }
804
- }
805
-
806
- if (self._stopped)
807
- return;
808
-
809
- self._publishNewResults(newResults, newBuffer);
810
- }
811
-
812
- // Transitions to QUERYING and runs another query, or (if already in QUERYING)
813
- // ensures that we will query again later.
814
- //
815
- // This function may not block, because it is called from an oplog entry
816
- // handler. However, if we were not already in the QUERYING phase, it throws
817
- // an exception that is caught by the closest surrounding
818
- // finishIfNeedToPollQuery call; this ensures that we don't continue running
819
- // close that was designed for another phase inside PHASE.QUERYING.
820
- //
821
- // (It's also necessary whenever logic in this file yields to check that other
822
- // phases haven't put us into QUERYING mode, though; eg,
823
- // _fetchModifiedDocuments does this.)
824
- _needToPollQuery() {
825
- var self = this;
826
- //Meteor._noYieldsAllowed(function () {
827
- if (self._stopped)
828
- return;
829
-
830
- // If we're not already in the middle of a query, we can query now
831
- // (possibly pausing FETCHING).
832
- if (self._phase !== PHASE.QUERYING) {
833
- self._pollQuery();
834
- throw new SwitchedToQuery;
835
- }
836
-
837
- // We're currently in QUERYING. Set a flag to ensure that we run another
838
- // query when we're done.
839
- self._requeryWhenDoneThisQuery = true;
840
- //});
841
- }
842
-
843
- // Yields!
844
- async _doneQuerying() {
845
- var self = this;
846
-
847
- if (self._stopped)
848
- return;
849
-
850
- await self._mongoHandle._oplogHandle.waitUntilCaughtUp(); // yields
851
-
852
- if (self._stopped)
853
- return;
854
- if (self._phase !== PHASE.QUERYING)
855
- throw Error("Phase unexpectedly " + self._phase);
856
-
857
- //Meteor._noYieldsAllowed(function () {
858
- if (self._requeryWhenDoneThisQuery) {
859
- self._requeryWhenDoneThisQuery = false;
860
- self._pollQuery();
861
- } else if (self._needToFetch.size === 0) {
862
- self._beSteady();
863
- } else {
864
- self._fetchModifiedDocuments();
865
- }
866
- //});
867
- }
868
-
869
- _cursorForQuery(optionsOverwrite: { limit: number }) {
870
- //return Meteor._noYieldsAllowed(function () {
871
- // The query we run is almost the same as the cursor we are observing,
872
- // with a few changes. We need to read all the fields that are relevant to
873
- // the selector, not just the fields we are going to publish (that's the
874
- // "shared" projection). And we don't want to apply any transform in the
875
- // cursor, because observeChanges shouldn't use the transform.
876
- var options = { ...this._cursorDescription.options, ...optionsOverwrite };
877
-
878
- options.projection = this._sharedProjection;
879
- delete options.transform;
880
- // We are NOT deep cloning fields or selector here, which should be OK.
881
- const description = new CursorDescription(
882
- this._cursorDescription.collectionName,
883
- this._cursorDescription.selector,
884
- options);
885
- const dbCursor = this._mongoHandle.db.collection(description.collectionName).find(description.selector, options);
886
- return new SynchronousCursor(dbCursor, this._cursorDescription, { useTransform: true });
887
- //});
888
- }
889
-
890
-
891
- // Replace self._published with newResults (both are IdMaps), invoking observe
892
- // callbacks on the multiplexer.
893
- // Replace self._unpublishedBuffer with newBuffer.
894
- //
895
- // XXX This is very similar to LocalCollection._diffQueryUnorderedChanges. We
896
- // should really: (a) Unify IdMap and OrderedDict into Unordered/OrderedDict
897
- // (b) Rewrite diff.js to use these classes instead of arrays and objects.
898
- _publishNewResults(newResults: IdMap, newBuffer: IdMap) {
899
- var self = this;
900
- //Meteor._noYieldsAllowed(function () {
901
-
902
- // If the query is limited and there is a buffer, shut down so it doesn't
903
- // stay in a way.
904
- if (self._limit) {
905
- self._unpublishedBuffer.clear();
906
- }
907
-
908
- // First remove anything that's gone. Be careful not to modify
909
- // self._published while iterating over it.
910
- var idsToRemove = [];
911
- self._published.forEach(function (doc, id) {
912
- if (!newResults.has(id))
913
- idsToRemove.push(id);
914
- });
915
- for (const id of idsToRemove) {
916
- self._removePublished(id);
917
- }
918
-
919
- // Now do adds and changes.
920
- // If self has a buffer and limit, the new fetched result will be
921
- // limited correctly as the query has sort specifier.
922
- newResults.forEach(function (doc, id) {
923
- self._handleDoc(id, doc);
924
- });
925
-
926
- // Sanity-check that everything we tried to put into _published ended up
927
- // there.
928
- // XXX if this is slow, remove it later
929
- if (self._published.size() !== newResults.size()) {
930
- console.error('The Mongo server and the Meteor query disagree on how ' +
931
- 'many documents match your query. Cursor description: ',
932
- self._cursorDescription);
933
- throw Error(
934
- "The Mongo server and the Meteor query disagree on how " +
935
- "many documents match your query. Maybe it is hitting a Mongo " +
936
- "edge case? The query is: " +
937
- stringify(self._cursorDescription.selector));
938
- }
939
- self._published.forEach(function (doc, id) {
940
- if (!newResults.has(id))
941
- throw Error("_published has a doc that newResults doesn't; " + id);
942
- });
943
-
944
- // Finally, replace the buffer
945
- newBuffer.forEach(function (doc, id) {
946
- self._addBuffered(id, doc);
947
- });
948
-
949
- self._safeAppendToBuffer = newBuffer.size() < self._limit;
950
- //});
951
- }
952
-
953
- // This stop function is invoked from the onStop of the ObserveMultiplexer, so
954
- // it shouldn't actually be possible to call it until the multiplexer is
955
- // ready.
956
- //
957
- // It's important to check self._stopped after every call in this file that
958
- // can yield!
959
- stop() {
960
- var self = this;
961
- if (self._stopped)
962
- return;
963
- self._stopped = true;
964
- for (const handle of self._stopHandles) {
965
- handle.stop();
966
- }
967
-
968
- // Note: we *don't* use multiplexer.onFlush here because this stop
969
- // callback is actually invoked by the multiplexer itself when it has
970
- // determined that there are no handles left. So nothing is actually going
971
- // to get flushed (and it's probably not valid to call methods on the
972
- // dying multiplexer).
973
- for (const w of self._writesToCommitWhenWeReachSteady) {
974
- w.committed(); // maybe yields?
975
- }
976
- self._writesToCommitWhenWeReachSteady = null;
977
-
978
- // Proactively drop references to potentially big things.
979
- self._published = null;
980
- self._unpublishedBuffer = null;
981
- self._needToFetch = null;
982
- self._currentlyFetching = null;
983
- // not used?
984
- //self._oplogEntryHandle = null;
985
- //self._listenersHandle = null;
986
-
987
- }
988
-
989
- _registerPhaseChange(phase) {
990
- var self = this;
991
- //Meteor._noYieldsAllowed(function () {
992
- var now = new Date;
993
-
994
- self._phase = phase;
995
- self._phaseStartTime = now;
996
- //});
997
- }
998
-
999
-
1000
- // Does our oplog tailing code support this cursor? For now, we are being very
1001
- // conservative and allowing only simple queries with simple options.
1002
- // (This is a "static method".)
1003
- public static cursorSupported(cursorDescription: CursorDescription<any>, matcher: MinimongoMatcher) {
1004
- // First, check the options.
1005
- var options = cursorDescription.options;
1006
-
1007
- // Did the user say no explicitly?
1008
- if (options.disableOplog)
1009
- return false;
1010
-
1011
- // skip is not supported: to support it we would need to keep track of all
1012
- // "skipped" documents or at least their ids.
1013
- // limit w/o a sort specifier is not supported: current implementation needs a
1014
- // deterministic way to order documents.
1015
- if (options.skip || (options.limit && !options.sort)) return false;
1016
-
1017
- // If a fields projection option is given check if it is supported by
1018
- // minimongo (some operators are not supported).
1019
- if (options.projection) {
1020
- try {
1021
- _checkSupportedProjection(options.projection);
1022
- } catch (e) {
1023
- if (e.name === "MinimongoError") {
1024
- return false;
1025
- } else {
1026
- throw e;
1027
- }
1028
- }
1029
- }
1030
-
1031
- // We don't allow the following selectors:
1032
- // - $where (not confident that we provide the same JS environment
1033
- // as Mongo, and can yield!)
1034
- // - $near (has "interesting" properties in MongoDB, like the possibility
1035
- // of returning an ID multiple times, though even polling maybe
1036
- // have a bug there)
1037
- // XXX: once we support it, we would need to think more on how we
1038
- // initialize the comparators when we create the driver.
1039
- return !matcher.hasWhere() && !matcher.hasGeoQuery();
1040
- }
1041
- }
1042
-
1043
- var modifierCanBeDirectlyApplied = function (modifier) {
1044
- return Object.entries(modifier).every(([operation, fields]) => {
1045
- return Object.entries(fields).every(([field, value]) => {
1046
- return !/EJSON\$/.test(field);
1047
- });
1048
- });
1049
- };