mongodb-livedata-server 0.0.14 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +70 -69
- package/dist/meteor/binary-heap/max_heap.d.ts +2 -2
- package/dist/meteor/binary-heap/max_heap.js +7 -7
- package/dist/meteor/binary-heap/min_max_heap.d.ts +1 -1
- package/dist/meteor/binary-heap/min_max_heap.js +3 -3
- package/dist/meteor/ddp/livedata_server.d.ts +3 -2
- package/dist/meteor/ddp/livedata_server.js +1 -0
- package/dist/meteor/ddp/session-collection-view.js +3 -4
- package/dist/meteor/ddp/session.d.ts +6 -2
- package/dist/meteor/ddp/session.js +20 -5
- package/dist/meteor/ddp/stream_server.js +1 -1
- package/dist/meteor/ddp/subscription.d.ts +2 -0
- package/dist/meteor/ddp/subscription.js +13 -0
- package/dist/meteor/diff-sequence/diff.d.ts +2 -3
- package/dist/meteor/mongo/caching_change_observer.d.ts +2 -3
- package/dist/meteor/mongo/caching_change_observer.js +6 -38
- package/dist/meteor/mongo/live_cursor.js +3 -0
- package/dist/meteor/mongo/observe_multiplexer.d.ts +3 -1
- package/dist/meteor/mongo/observe_multiplexer.js +14 -38
- package/dist/meteor/mongo/oplog-observe-driver.d.ts +4 -3
- package/dist/meteor/mongo/oplog-observe-driver.js +25 -26
- package/dist/meteor/mongo/polling_observe_driver.js +30 -4
- package/dist/meteor/ordered-dict/ordered_dict.d.ts +2 -2
- package/dist/meteor/ordered-dict/ordered_dict.js +2 -2
- package/git-filter-repo.py +4005 -0
- package/package.json +1 -1
|
@@ -1,15 +1,4 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __rest = (this && this.__rest) || function (s, e) {
|
|
3
|
-
var t = {};
|
|
4
|
-
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
|
|
5
|
-
t[p] = s[p];
|
|
6
|
-
if (s != null && typeof Object.getOwnPropertySymbols === "function")
|
|
7
|
-
for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
|
|
8
|
-
if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
|
|
9
|
-
t[p[i]] = s[p[i]];
|
|
10
|
-
}
|
|
11
|
-
return t;
|
|
12
|
-
};
|
|
13
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
14
3
|
exports.ObserveHandle = exports.ObserveMultiplexer = void 0;
|
|
15
4
|
const ejson_1 = require("../ejson/ejson");
|
|
@@ -51,9 +40,8 @@ class ObserveMultiplexer {
|
|
|
51
40
|
++self._addHandleTasksScheduledButNotPerformed;
|
|
52
41
|
await self._queue.runTask(async () => {
|
|
53
42
|
self._handles[handle._id] = handle;
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
self._sendAdds(handle);
|
|
43
|
+
if (this._ready() && this._cache.docs.size > 0)
|
|
44
|
+
handle._initialAdds(this._cache.docs);
|
|
57
45
|
--self._addHandleTasksScheduledButNotPerformed;
|
|
58
46
|
});
|
|
59
47
|
// *outside* the task, since otherwise we'd deadlock
|
|
@@ -97,6 +85,13 @@ class ObserveMultiplexer {
|
|
|
97
85
|
this._queue.queueTask(async () => {
|
|
98
86
|
if (this._ready())
|
|
99
87
|
throw Error("can't make ObserveMultiplex ready twice!");
|
|
88
|
+
if (this._cache.docs.size > 0) {
|
|
89
|
+
for (const handleId of Object.keys(this._handles)) {
|
|
90
|
+
var handle = this._handles && this._handles[handleId];
|
|
91
|
+
if (handle)
|
|
92
|
+
handle._initialAdds(this._cache.docs);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
100
95
|
this._readyFuture.resolve();
|
|
101
96
|
this._readyFuture.isResolved = true;
|
|
102
97
|
});
|
|
@@ -130,9 +125,9 @@ class ObserveMultiplexer {
|
|
|
130
125
|
callbackNames() {
|
|
131
126
|
var self = this;
|
|
132
127
|
if (self._ordered)
|
|
133
|
-
return ["addedBefore", "changed", "movedBefore", "removed"];
|
|
128
|
+
return ["initialAdds", "addedBefore", "changed", "movedBefore", "removed"];
|
|
134
129
|
else
|
|
135
|
-
return ["added", "changed", "removed"];
|
|
130
|
+
return ["initialAdds", "added", "changed", "removed"];
|
|
136
131
|
}
|
|
137
132
|
_ready() {
|
|
138
133
|
return this._readyFuture.isResolved;
|
|
@@ -151,6 +146,9 @@ class ObserveMultiplexer {
|
|
|
151
146
|
(callbackName !== 'added' && callbackName !== 'addedBefore')) {
|
|
152
147
|
throw new Error("Got " + callbackName + " during initial adds");
|
|
153
148
|
}
|
|
149
|
+
// don't actually send anything to the handles until initial adds are cached
|
|
150
|
+
if (!self._ready())
|
|
151
|
+
return;
|
|
154
152
|
// Now multiplex the callbacks out to all observe handles. It's OK if
|
|
155
153
|
// these calls yield; since we're inside a task, no other use of our queue
|
|
156
154
|
// can continue until these are done. (But we do have to be careful to not
|
|
@@ -166,28 +164,6 @@ class ObserveMultiplexer {
|
|
|
166
164
|
}
|
|
167
165
|
});
|
|
168
166
|
}
|
|
169
|
-
// Sends initial adds to a handle. It should only be called from within a task
|
|
170
|
-
// (the task that is processing the addHandleAndSendInitialAdds call). It
|
|
171
|
-
// synchronously invokes the handle's added or addedBefore; there's no need to
|
|
172
|
-
// flush the queue afterwards to ensure that the callbacks get out.
|
|
173
|
-
_sendAdds(handle) {
|
|
174
|
-
var self = this;
|
|
175
|
-
//if (self._queue.safeToRunTask())
|
|
176
|
-
// throw Error("_sendAdds may only be called from within a task!");
|
|
177
|
-
var add = self._ordered ? handle._addedBefore : handle._added;
|
|
178
|
-
if (!add)
|
|
179
|
-
return;
|
|
180
|
-
// note: docs may be an _IdMap or an OrderedDict
|
|
181
|
-
self._cache.docs.forEach((doc, id) => {
|
|
182
|
-
if (!self._handles.hasOwnProperty(handle._id))
|
|
183
|
-
throw Error("handle got removed before sending initial adds!");
|
|
184
|
-
const _a = handle.nonMutatingCallbacks ? doc : (0, ejson_1.clone)(doc), { _id } = _a, fields = __rest(_a, ["_id"]);
|
|
185
|
-
if (self._ordered)
|
|
186
|
-
add(id, fields, null); // we're going in order, so add at end
|
|
187
|
-
else
|
|
188
|
-
add(id, fields);
|
|
189
|
-
});
|
|
190
|
-
}
|
|
191
167
|
}
|
|
192
168
|
exports.ObserveMultiplexer = ObserveMultiplexer;
|
|
193
169
|
let nextObserveHandleId = 1;
|
|
@@ -3,7 +3,6 @@ import { MinimongoMatcher } from "./minimongo_matcher";
|
|
|
3
3
|
import MinimongoSorter from "./minimongo_sorter";
|
|
4
4
|
import { ObserveMultiplexer } from "./observe_multiplexer";
|
|
5
5
|
import { CursorDescription } from "./live_cursor";
|
|
6
|
-
import { IdMap } from "../id-map/id_map";
|
|
7
6
|
import { SynchronousCursor } from "./synchronous-cursor";
|
|
8
7
|
interface OplogObserveDriverOptions {
|
|
9
8
|
cursorDescription: CursorDescription<any>;
|
|
@@ -53,13 +52,15 @@ export declare class OplogObserveDriver {
|
|
|
53
52
|
_handleOplogEntrySteadyOrFetching(op: any): void;
|
|
54
53
|
_runInitialQuery(): Promise<void>;
|
|
55
54
|
_pollQuery(): void;
|
|
56
|
-
_runQuery(options?:
|
|
55
|
+
_runQuery(options?: {
|
|
56
|
+
initial?: boolean;
|
|
57
|
+
}): Promise<void>;
|
|
57
58
|
_needToPollQuery(): void;
|
|
58
59
|
_doneQuerying(): Promise<void>;
|
|
59
60
|
_cursorForQuery(optionsOverwrite: {
|
|
60
61
|
limit: number;
|
|
61
62
|
}): SynchronousCursor;
|
|
62
|
-
_publishNewResults(newResults:
|
|
63
|
+
_publishNewResults(newResults: Map<string, any>, newBuffer: Map<string, any>): void;
|
|
63
64
|
stop(): void;
|
|
64
65
|
_registerPhaseChange(phase: any): void;
|
|
65
66
|
static cursorSupported(cursorDescription: CursorDescription<any>, matcher: MinimongoMatcher): boolean;
|
|
@@ -11,7 +11,6 @@ const max_heap_1 = require("../binary-heap/max_heap");
|
|
|
11
11
|
const minimongo_common_1 = require("./minimongo_common");
|
|
12
12
|
const ejson_1 = require("../ejson/ejson");
|
|
13
13
|
const oplog_tailing_1 = require("./oplog_tailing");
|
|
14
|
-
const id_map_1 = require("../id-map/id_map");
|
|
15
14
|
const synchronous_cursor_1 = require("./synchronous-cursor");
|
|
16
15
|
var PHASE;
|
|
17
16
|
(function (PHASE) {
|
|
@@ -77,7 +76,7 @@ class OplogObserveDriver {
|
|
|
77
76
|
self._comparator = null;
|
|
78
77
|
self._sorter = null;
|
|
79
78
|
self._unpublishedBuffer = null;
|
|
80
|
-
self._published = new
|
|
79
|
+
self._published = new Map();
|
|
81
80
|
}
|
|
82
81
|
// Indicates if it is safe to insert a new document at the end of the buffer
|
|
83
82
|
// for this query. i.e. it is known that there are no documents matching the
|
|
@@ -187,11 +186,11 @@ class OplogObserveDriver {
|
|
|
187
186
|
// (exceeding capacity specified by limit). If so, push the maximum
|
|
188
187
|
// element to the buffer, we might want to save it in memory to reduce the
|
|
189
188
|
// amount of Mongo lookups in the future.
|
|
190
|
-
if (self._limit && self._published.size
|
|
189
|
+
if (self._limit && self._published.size > self._limit) {
|
|
191
190
|
// XXX in theory the size of published is no more than limit+1
|
|
192
|
-
if (self._published.size
|
|
191
|
+
if (self._published.size !== self._limit + 1) {
|
|
193
192
|
throw new Error("After adding to published, " +
|
|
194
|
-
(self._published.size
|
|
193
|
+
(self._published.size - self._limit) +
|
|
195
194
|
" documents are overflowing the set");
|
|
196
195
|
}
|
|
197
196
|
var overflowingDocId = self._published.maxElementId();
|
|
@@ -199,7 +198,7 @@ class OplogObserveDriver {
|
|
|
199
198
|
if ((0, ejson_1.equals)(overflowingDocId, id)) {
|
|
200
199
|
throw new Error("The document just added is overflowing the published set");
|
|
201
200
|
}
|
|
202
|
-
self._published.
|
|
201
|
+
self._published.delete(overflowingDocId);
|
|
203
202
|
self._multiplexer.removed(overflowingDocId);
|
|
204
203
|
self._addBuffered(overflowingDocId, overflowingDoc);
|
|
205
204
|
}
|
|
@@ -208,11 +207,11 @@ class OplogObserveDriver {
|
|
|
208
207
|
_removePublished(id) {
|
|
209
208
|
var self = this;
|
|
210
209
|
//Meteor._noYieldsAllowed(function () {
|
|
211
|
-
self._published.
|
|
210
|
+
self._published.delete(id);
|
|
212
211
|
self._multiplexer.removed(id);
|
|
213
|
-
if (!self._limit || self._published.size
|
|
212
|
+
if (!self._limit || self._published.size === self._limit)
|
|
214
213
|
return;
|
|
215
|
-
if (self._published.size
|
|
214
|
+
if (self._published.size > self._limit)
|
|
216
215
|
throw Error("self._published got too big");
|
|
217
216
|
// OK, we are publishing less than the limit. Maybe we should look in the
|
|
218
217
|
// buffer to find the next element past what we were publishing before.
|
|
@@ -264,9 +263,9 @@ class OplogObserveDriver {
|
|
|
264
263
|
//Meteor._noYieldsAllowed(function () {
|
|
265
264
|
self._unpublishedBuffer.set(id, self._sharedProjectionFn(doc));
|
|
266
265
|
// If something is overflowing the buffer, we just remove it from cache
|
|
267
|
-
if (self._unpublishedBuffer.size
|
|
266
|
+
if (self._unpublishedBuffer.size > self._limit) {
|
|
268
267
|
var maxBufferedId = self._unpublishedBuffer.maxElementId();
|
|
269
|
-
self._unpublishedBuffer.
|
|
268
|
+
self._unpublishedBuffer.delete(maxBufferedId);
|
|
270
269
|
// Since something matching is removed from cache (both published set and
|
|
271
270
|
// buffer), set flag to false
|
|
272
271
|
self._safeAppendToBuffer = false;
|
|
@@ -278,11 +277,11 @@ class OplogObserveDriver {
|
|
|
278
277
|
_removeBuffered(id) {
|
|
279
278
|
var self = this;
|
|
280
279
|
//Meteor._noYieldsAllowed(function () {
|
|
281
|
-
self._unpublishedBuffer.
|
|
280
|
+
self._unpublishedBuffer.delete(id);
|
|
282
281
|
// To keep the contract "buffer is never empty in STEADY phase unless the
|
|
283
282
|
// everything matching fits into published" true, we poll everything as
|
|
284
283
|
// soon as we see the buffer becoming empty.
|
|
285
|
-
if (!self._unpublishedBuffer.size
|
|
284
|
+
if (!self._unpublishedBuffer.size && !self._safeAppendToBuffer)
|
|
286
285
|
self._needToPollQuery();
|
|
287
286
|
//});
|
|
288
287
|
}
|
|
@@ -299,22 +298,22 @@ class OplogObserveDriver {
|
|
|
299
298
|
throw Error("tried to add something already existed in buffer " + id);
|
|
300
299
|
var limit = self._limit;
|
|
301
300
|
var comparator = self._comparator;
|
|
302
|
-
var maxPublished = (limit && self._published.size
|
|
301
|
+
var maxPublished = (limit && self._published.size > 0)
|
|
303
302
|
? self._published.get(self._published.maxElementId()) // published is MaxHeap because limit is defined
|
|
304
303
|
: null;
|
|
305
|
-
var maxBuffered = (limit && self._unpublishedBuffer.size
|
|
304
|
+
var maxBuffered = (limit && self._unpublishedBuffer.size > 0)
|
|
306
305
|
? self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId())
|
|
307
306
|
: null;
|
|
308
307
|
// The query is unlimited or didn't publish enough documents yet or the
|
|
309
308
|
// new document would fit into published set pushing the maximum element
|
|
310
309
|
// out, then we need to publish the doc.
|
|
311
|
-
var toPublish = !limit || self._published.size
|
|
310
|
+
var toPublish = !limit || self._published.size < limit ||
|
|
312
311
|
comparator(doc, maxPublished) < 0;
|
|
313
312
|
// Otherwise we might need to buffer it (only in case of limited query).
|
|
314
313
|
// Buffering is allowed if the buffer is not filled up yet and all
|
|
315
314
|
// matching docs are either in the published set or in the buffer.
|
|
316
315
|
var canAppendToBuffer = !toPublish && self._safeAppendToBuffer &&
|
|
317
|
-
self._unpublishedBuffer.size
|
|
316
|
+
self._unpublishedBuffer.size < limit;
|
|
318
317
|
// Or if it is small enough to be safely inserted to the middle or the
|
|
319
318
|
// beginning of the buffer.
|
|
320
319
|
var canInsertIntoBuffer = !toPublish && maxBuffered &&
|
|
@@ -364,7 +363,7 @@ class OplogObserveDriver {
|
|
|
364
363
|
else if (cachedBefore && matchesNow) {
|
|
365
364
|
var oldDoc = self._published.get(id);
|
|
366
365
|
var comparator = self._comparator;
|
|
367
|
-
var minBuffered = self._limit && self._unpublishedBuffer.size
|
|
366
|
+
var minBuffered = self._limit && self._unpublishedBuffer.size &&
|
|
368
367
|
self._unpublishedBuffer.get(self._unpublishedBuffer.minElementId());
|
|
369
368
|
var maxBuffered;
|
|
370
369
|
if (publishedBefore) {
|
|
@@ -378,7 +377,7 @@ class OplogObserveDriver {
|
|
|
378
377
|
// published. Notably, we don't want to schedule repoll and continue
|
|
379
378
|
// relying on this property.
|
|
380
379
|
var staysInPublished = !self._limit ||
|
|
381
|
-
self._unpublishedBuffer.size
|
|
380
|
+
self._unpublishedBuffer.size === 0 ||
|
|
382
381
|
comparator(newDoc, minBuffered) <= 0;
|
|
383
382
|
if (staysInPublished) {
|
|
384
383
|
self._changePublished(id, oldDoc, newDoc);
|
|
@@ -405,10 +404,10 @@ class OplogObserveDriver {
|
|
|
405
404
|
// we don't trigger the querying immediately. if we end this block
|
|
406
405
|
// with the buffer empty, we will need to trigger the query poll
|
|
407
406
|
// manually too.
|
|
408
|
-
self._unpublishedBuffer.
|
|
407
|
+
self._unpublishedBuffer.delete(id);
|
|
409
408
|
// published is MaxHeap because bufferedBefore is only set when limit is defined
|
|
410
409
|
var maxPublished = self._published.get(self._published.maxElementId());
|
|
411
|
-
maxBuffered = self._unpublishedBuffer.size
|
|
410
|
+
maxBuffered = self._unpublishedBuffer.size && self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId());
|
|
412
411
|
// the buffered doc was updated, it could move to published
|
|
413
412
|
var toPublish = comparator(newDoc, maxPublished) < 0;
|
|
414
413
|
// or stays in buffer even after the change
|
|
@@ -427,7 +426,7 @@ class OplogObserveDriver {
|
|
|
427
426
|
self._safeAppendToBuffer = false;
|
|
428
427
|
// Normally this check would have been done in _removeBuffered but
|
|
429
428
|
// we didn't use it, so we need to do it ourself now.
|
|
430
|
-
if (!self._unpublishedBuffer.size
|
|
429
|
+
if (!self._unpublishedBuffer.size) {
|
|
431
430
|
self._needToPollQuery();
|
|
432
431
|
}
|
|
433
432
|
}
|
|
@@ -667,8 +666,8 @@ class OplogObserveDriver {
|
|
|
667
666
|
// If we've been stopped, we don't have to run anything any more.
|
|
668
667
|
if (self._stopped)
|
|
669
668
|
return;
|
|
670
|
-
newResults = new
|
|
671
|
-
newBuffer = new
|
|
669
|
+
newResults = new Map();
|
|
670
|
+
newBuffer = new Map();
|
|
672
671
|
// Query 2x documents as the half excluded from the original query will go
|
|
673
672
|
// into unpublished buffer to reduce additional Mongo lookups in cases
|
|
674
673
|
// when documents are removed from the published set and need a
|
|
@@ -809,7 +808,7 @@ class OplogObserveDriver {
|
|
|
809
808
|
// Sanity-check that everything we tried to put into _published ended up
|
|
810
809
|
// there.
|
|
811
810
|
// XXX if this is slow, remove it later
|
|
812
|
-
if (self._published.size
|
|
811
|
+
if (self._published.size !== newResults.size) {
|
|
813
812
|
console.error('The Mongo server and the Meteor query disagree on how ' +
|
|
814
813
|
'many documents match your query. Cursor description: ', self._cursorDescription);
|
|
815
814
|
throw Error("The Mongo server and the Meteor query disagree on how " +
|
|
@@ -825,7 +824,7 @@ class OplogObserveDriver {
|
|
|
825
824
|
newBuffer.forEach(function (doc, id) {
|
|
826
825
|
self._addBuffered(id, doc);
|
|
827
826
|
});
|
|
828
|
-
self._safeAppendToBuffer = newBuffer.size
|
|
827
|
+
self._safeAppendToBuffer = newBuffer.size < self._limit;
|
|
829
828
|
//});
|
|
830
829
|
}
|
|
831
830
|
// This stop function is invoked from the onStop of the ObserveMultiplexer, so
|
|
@@ -1,11 +1,17 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __asyncValues = (this && this.__asyncValues) || function (o) {
|
|
3
|
+
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
|
|
4
|
+
var m = o[Symbol.asyncIterator], i;
|
|
5
|
+
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
|
|
6
|
+
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
|
|
7
|
+
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
|
|
8
|
+
};
|
|
2
9
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
10
|
exports.PollingObserveDriver = void 0;
|
|
4
11
|
const writefence_1 = require("../ddp/writefence");
|
|
5
12
|
const diff_1 = require("../diff-sequence/diff");
|
|
6
13
|
const synchronous_queue_1 = require("./synchronous-queue");
|
|
7
14
|
const observe_driver_utils_1 = require("./observe_driver_utils");
|
|
8
|
-
const id_map_1 = require("../id-map/id_map");
|
|
9
15
|
var POLLING_THROTTLE_MS = +process.env.METEOR_POLLING_THROTTLE_MS || 50;
|
|
10
16
|
var POLLING_INTERVAL_MS = +process.env.METEOR_POLLING_INTERVAL_MS || 10 * 1000;
|
|
11
17
|
class PollingObserveDriver {
|
|
@@ -69,6 +75,7 @@ class PollingObserveDriver {
|
|
|
69
75
|
self._taskQueue.queueTask(async () => await self._pollMongo());
|
|
70
76
|
}
|
|
71
77
|
async _pollMongo() {
|
|
78
|
+
var _a, e_1, _b, _c;
|
|
72
79
|
var self = this;
|
|
73
80
|
--self._pollsScheduledButNotStarted;
|
|
74
81
|
if (self._stopped)
|
|
@@ -79,7 +86,7 @@ class PollingObserveDriver {
|
|
|
79
86
|
if (!oldResults) {
|
|
80
87
|
first = true;
|
|
81
88
|
// XXX maybe use OrderedDict instead?
|
|
82
|
-
oldResults = self._ordered ? [] : new
|
|
89
|
+
oldResults = self._ordered ? [] : new Map();
|
|
83
90
|
}
|
|
84
91
|
// Save the list of pending writes which this round will commit.
|
|
85
92
|
var writesForCycle = self._pendingWrites;
|
|
@@ -88,8 +95,27 @@ class PollingObserveDriver {
|
|
|
88
95
|
try {
|
|
89
96
|
const cursor = self._mongoHandle.db.collection(self._cursorDescription.collectionName).find(self._cursorDescription.selector);
|
|
90
97
|
if (!self._ordered) {
|
|
91
|
-
newResults = new
|
|
92
|
-
|
|
98
|
+
newResults = new Map();
|
|
99
|
+
try {
|
|
100
|
+
for (var _d = true, cursor_1 = __asyncValues(cursor), cursor_1_1; cursor_1_1 = await cursor_1.next(), _a = cursor_1_1.done, !_a;) {
|
|
101
|
+
_c = cursor_1_1.value;
|
|
102
|
+
_d = false;
|
|
103
|
+
try {
|
|
104
|
+
const doc = _c;
|
|
105
|
+
newResults.set(doc._id, doc);
|
|
106
|
+
}
|
|
107
|
+
finally {
|
|
108
|
+
_d = true;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
113
|
+
finally {
|
|
114
|
+
try {
|
|
115
|
+
if (!_d && !_a && (_b = cursor_1.return)) await _b.call(cursor_1);
|
|
116
|
+
}
|
|
117
|
+
finally { if (e_1) throw e_1.error; }
|
|
118
|
+
}
|
|
93
119
|
}
|
|
94
120
|
else
|
|
95
121
|
newResults = await cursor.toArray();
|
|
@@ -10,12 +10,12 @@ export declare class OrderedDict {
|
|
|
10
10
|
constructor(...args: any[]);
|
|
11
11
|
_k(key: any): string;
|
|
12
12
|
empty(): boolean;
|
|
13
|
-
size(): number;
|
|
13
|
+
get size(): number;
|
|
14
14
|
_linkEltIn(elt: any): void;
|
|
15
15
|
_linkEltOut(elt: any): void;
|
|
16
16
|
putBefore(key: any, item: any, before: any): void;
|
|
17
17
|
append(key: any, item: any): void;
|
|
18
|
-
|
|
18
|
+
delete(key: any): any;
|
|
19
19
|
get(key: any): any;
|
|
20
20
|
has(key: any): any;
|
|
21
21
|
forEach(iter: (doc: any, key: string, index: number) => any, context?: any): void;
|
|
@@ -38,7 +38,7 @@ class OrderedDict {
|
|
|
38
38
|
empty() {
|
|
39
39
|
return !this._first;
|
|
40
40
|
}
|
|
41
|
-
size() {
|
|
41
|
+
get size() {
|
|
42
42
|
return this._size;
|
|
43
43
|
}
|
|
44
44
|
_linkEltIn(elt) {
|
|
@@ -82,7 +82,7 @@ class OrderedDict {
|
|
|
82
82
|
append(key, item) {
|
|
83
83
|
this.putBefore(key, item, null);
|
|
84
84
|
}
|
|
85
|
-
|
|
85
|
+
delete(key) {
|
|
86
86
|
var elt = this._dict[this._k(key)];
|
|
87
87
|
if (typeof elt === "undefined")
|
|
88
88
|
throw new Error("Item " + key + " not present in OrderedDict");
|