mongodb-livedata-server 0.0.4 → 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/{livedata_server.ts → dist/livedata_server.d.ts} +1 -1
- package/dist/livedata_server.js +3 -1
- package/dist/meteor/binary-heap/max_heap.d.ts +31 -0
- package/dist/meteor/binary-heap/min_heap.d.ts +6 -0
- package/dist/meteor/binary-heap/min_max_heap.d.ts +11 -0
- package/dist/meteor/callback-hook/hook.d.ts +11 -0
- package/dist/meteor/ddp/crossbar.d.ts +15 -0
- package/dist/meteor/ddp/heartbeat.d.ts +19 -0
- package/dist/meteor/ddp/livedata_server.d.ts +141 -0
- package/dist/meteor/ddp/method-invocation.d.ts +25 -0
- package/dist/meteor/ddp/random-stream.d.ts +8 -0
- package/dist/meteor/ddp/session-collection-view.d.ts +27 -0
- package/dist/meteor/ddp/session-document-view.d.ts +8 -0
- package/dist/meteor/ddp/session.d.ts +69 -0
- package/dist/meteor/ddp/stream_server.d.ts +21 -0
- package/dist/meteor/ddp/subscription.d.ts +89 -0
- package/dist/meteor/ddp/utils.d.ts +8 -0
- package/dist/meteor/ddp/writefence.d.ts +20 -0
- package/dist/meteor/diff-sequence/diff.d.ts +13 -0
- package/dist/meteor/ejson/ejson.d.ts +82 -0
- package/dist/meteor/ejson/stringify.d.ts +2 -0
- package/dist/meteor/ejson/utils.d.ts +12 -0
- package/dist/meteor/id-map/id_map.d.ts +16 -0
- package/dist/meteor/mongo/caching_change_observer.d.ts +16 -0
- package/dist/meteor/mongo/doc_fetcher.d.ts +7 -0
- package/dist/meteor/mongo/geojson_utils.d.ts +3 -0
- package/dist/meteor/mongo/live_connection.d.ts +27 -0
- package/dist/meteor/mongo/live_cursor.d.ts +25 -0
- package/dist/meteor/mongo/minimongo_common.d.ts +84 -0
- package/dist/meteor/mongo/minimongo_matcher.d.ts +22 -0
- package/dist/meteor/mongo/minimongo_sorter.d.ts +16 -0
- package/dist/meteor/mongo/observe_driver_utils.d.ts +9 -0
- package/dist/meteor/mongo/observe_multiplexer.d.ts +36 -0
- package/dist/meteor/mongo/oplog-observe-driver.d.ts +67 -0
- package/dist/meteor/mongo/oplog_tailing.d.ts +35 -0
- package/dist/meteor/mongo/oplog_v2_converter.d.ts +1 -0
- package/dist/meteor/mongo/polling_observe_driver.d.ts +30 -0
- package/dist/meteor/mongo/synchronous-cursor.d.ts +17 -0
- package/dist/meteor/mongo/synchronous-queue.d.ts +14 -0
- package/dist/meteor/ordered-dict/ordered_dict.d.ts +31 -0
- package/dist/meteor/random/AbstractRandomGenerator.d.ts +42 -0
- package/dist/meteor/random/AleaRandomGenerator.d.ts +13 -0
- package/dist/meteor/random/NodeRandomGenerator.d.ts +16 -0
- package/dist/meteor/random/createAleaGenerator.d.ts +2 -0
- package/dist/meteor/random/createRandom.d.ts +1 -0
- package/dist/meteor/random/main.d.ts +1 -0
- package/package.json +2 -2
- package/meteor/LICENSE +0 -28
- package/meteor/binary-heap/max_heap.ts +0 -225
- package/meteor/binary-heap/min_heap.ts +0 -15
- package/meteor/binary-heap/min_max_heap.ts +0 -53
- package/meteor/callback-hook/hook.ts +0 -85
- package/meteor/ddp/crossbar.ts +0 -148
- package/meteor/ddp/heartbeat.ts +0 -97
- package/meteor/ddp/livedata_server.ts +0 -474
- package/meteor/ddp/method-invocation.ts +0 -86
- package/meteor/ddp/random-stream.ts +0 -102
- package/meteor/ddp/session-collection-view.ts +0 -119
- package/meteor/ddp/session-document-view.ts +0 -92
- package/meteor/ddp/session.ts +0 -708
- package/meteor/ddp/stream_server.ts +0 -204
- package/meteor/ddp/subscription.ts +0 -392
- package/meteor/ddp/utils.ts +0 -119
- package/meteor/ddp/writefence.ts +0 -130
- package/meteor/diff-sequence/diff.ts +0 -295
- package/meteor/ejson/ejson.ts +0 -601
- package/meteor/ejson/stringify.ts +0 -122
- package/meteor/ejson/utils.ts +0 -38
- package/meteor/id-map/id_map.ts +0 -84
- package/meteor/mongo/caching_change_observer.ts +0 -120
- package/meteor/mongo/doc_fetcher.ts +0 -52
- package/meteor/mongo/geojson_utils.ts +0 -42
- package/meteor/mongo/live_connection.ts +0 -302
- package/meteor/mongo/live_cursor.ts +0 -79
- package/meteor/mongo/minimongo_common.ts +0 -2440
- package/meteor/mongo/minimongo_matcher.ts +0 -275
- package/meteor/mongo/minimongo_sorter.ts +0 -331
- package/meteor/mongo/observe_driver_utils.ts +0 -79
- package/meteor/mongo/observe_multiplexer.ts +0 -256
- package/meteor/mongo/oplog-observe-driver.ts +0 -1049
- package/meteor/mongo/oplog_tailing.ts +0 -414
- package/meteor/mongo/oplog_v2_converter.ts +0 -124
- package/meteor/mongo/polling_observe_driver.ts +0 -247
- package/meteor/mongo/synchronous-cursor.ts +0 -293
- package/meteor/mongo/synchronous-queue.ts +0 -119
- package/meteor/ordered-dict/ordered_dict.ts +0 -229
- package/meteor/random/AbstractRandomGenerator.ts +0 -99
- package/meteor/random/AleaRandomGenerator.ts +0 -96
- package/meteor/random/NodeRandomGenerator.ts +0 -37
- package/meteor/random/createAleaGenerator.ts +0 -31
- package/meteor/random/createRandom.ts +0 -19
- package/meteor/random/main.ts +0 -8
- package/tsconfig.json +0 -10
|
@@ -1,247 +0,0 @@
|
|
|
1
|
-
import { _InvalidationCrossbar } from "../ddp/crossbar";
|
|
2
|
-
import { _WriteFence } from "../ddp/writefence";
|
|
3
|
-
import { LiveMongoConnection } from "./live_connection";
|
|
4
|
-
import { ObserveMultiplexer } from "./observe_multiplexer";
|
|
5
|
-
import { CursorDescription } from "./live_cursor";
|
|
6
|
-
import { DiffSequence } from "../diff-sequence/diff";
|
|
7
|
-
import { _SynchronousQueue } from "./synchronous-queue";
|
|
8
|
-
import { listenAll } from "./observe_driver_utils";
|
|
9
|
-
import { IdMap } from "../id-map/id_map";
|
|
10
|
-
|
|
11
|
-
var POLLING_THROTTLE_MS = +process.env.METEOR_POLLING_THROTTLE_MS || 50;
|
|
12
|
-
var POLLING_INTERVAL_MS = +process.env.METEOR_POLLING_INTERVAL_MS || 10 * 1000;
|
|
13
|
-
|
|
14
|
-
interface PollingObserveDriverOptions<TOrdered extends boolean> {
|
|
15
|
-
cursorDescription: CursorDescription<any>;
|
|
16
|
-
mongoHandle: LiveMongoConnection;
|
|
17
|
-
ordered: TOrdered;
|
|
18
|
-
multiplexer: ObserveMultiplexer;
|
|
19
|
-
stopCallbacks?: (() => void)[];
|
|
20
|
-
matcher?: any;
|
|
21
|
-
sorter?: any;
|
|
22
|
-
}
|
|
23
|
-
|
|
24
|
-
type ResultsType<TOrdered> = TOrdered extends true ? any[] : IdMap;
|
|
25
|
-
|
|
26
|
-
export class PollingObserveDriver<TOrdered extends boolean> {
|
|
27
|
-
private _cursorDescription: CursorDescription<any>;
|
|
28
|
-
private _mongoHandle: LiveMongoConnection;
|
|
29
|
-
private _ordered: boolean;
|
|
30
|
-
private _multiplexer: ObserveMultiplexer;
|
|
31
|
-
private _stopCallbacks: (() => void)[];
|
|
32
|
-
private _stopped: boolean;
|
|
33
|
-
|
|
34
|
-
private _results: ResultsType<TOrdered> | null;
|
|
35
|
-
private _pollsScheduledButNotStarted: number;
|
|
36
|
-
private _pendingWrites: { committed: () => void }[];
|
|
37
|
-
private _ensurePollIsScheduled: Function;
|
|
38
|
-
private _taskQueue: _SynchronousQueue;
|
|
39
|
-
|
|
40
|
-
constructor(options: PollingObserveDriverOptions<TOrdered>) {
|
|
41
|
-
var self = this;
|
|
42
|
-
|
|
43
|
-
self._cursorDescription = options.cursorDescription;
|
|
44
|
-
self._mongoHandle = options.mongoHandle;
|
|
45
|
-
self._ordered = options.ordered;
|
|
46
|
-
self._multiplexer = options.multiplexer;
|
|
47
|
-
self._stopCallbacks = [];
|
|
48
|
-
self._stopped = false;
|
|
49
|
-
|
|
50
|
-
// previous results snapshot. on each poll cycle, diffs against
|
|
51
|
-
// results drives the callbacks.
|
|
52
|
-
self._results = null;
|
|
53
|
-
|
|
54
|
-
// The number of _pollMongo calls that have been added to self._taskQueue but
|
|
55
|
-
// have not started running. Used to make sure we never schedule more than one
|
|
56
|
-
// _pollMongo (other than possibly the one that is currently running). It's
|
|
57
|
-
// also used by _suspendPolling to pretend there's a poll scheduled. Usually,
|
|
58
|
-
// it's either 0 (for "no polls scheduled other than maybe one currently
|
|
59
|
-
// running") or 1 (for "a poll scheduled that isn't running yet"), but it can
|
|
60
|
-
// also be 2 if incremented by _suspendPolling.
|
|
61
|
-
self._pollsScheduledButNotStarted = 0;
|
|
62
|
-
self._pendingWrites = []; // people to notify when polling completes
|
|
63
|
-
|
|
64
|
-
// Make sure to create a separately throttled function for each
|
|
65
|
-
// PollingObserveDriver object.
|
|
66
|
-
self._ensurePollIsScheduled = throttle(
|
|
67
|
-
self._unthrottledEnsurePollIsScheduled,
|
|
68
|
-
self._cursorDescription.options.pollingThrottleMs || POLLING_THROTTLE_MS /* ms */
|
|
69
|
-
);
|
|
70
|
-
|
|
71
|
-
// XXX figure out if we still need a queue
|
|
72
|
-
self._taskQueue = new _SynchronousQueue();
|
|
73
|
-
|
|
74
|
-
var listenersHandle = listenAll(
|
|
75
|
-
self._cursorDescription, function (notification) {
|
|
76
|
-
// When someone does a transaction that might affect us, schedule a poll
|
|
77
|
-
// of the database. If that transaction happens inside of a write fence,
|
|
78
|
-
// block the fence until we've polled and notified observers.
|
|
79
|
-
var fence = _WriteFence._CurrentWriteFence;
|
|
80
|
-
if (fence)
|
|
81
|
-
self._pendingWrites.push(fence.beginWrite());
|
|
82
|
-
// Ensure a poll is scheduled... but if we already know that one is,
|
|
83
|
-
// don't hit the throttled _ensurePollIsScheduled function (which might
|
|
84
|
-
// lead to us calling it unnecessarily in <pollingThrottleMs> ms).
|
|
85
|
-
if (self._pollsScheduledButNotStarted === 0)
|
|
86
|
-
self._ensurePollIsScheduled();
|
|
87
|
-
}
|
|
88
|
-
);
|
|
89
|
-
self._stopCallbacks.push(function () { listenersHandle.stop(); });
|
|
90
|
-
|
|
91
|
-
// every once and a while, poll even if we don't think we're dirty, for
|
|
92
|
-
// eventual consistency with database writes from outside the Meteor
|
|
93
|
-
// universe.
|
|
94
|
-
var pollingInterval = self._cursorDescription.options.pollingIntervalMs || POLLING_INTERVAL_MS;
|
|
95
|
-
var intervalHandle = setInterval(() => self._ensurePollIsScheduled(), pollingInterval);
|
|
96
|
-
self._stopCallbacks.push(function () {
|
|
97
|
-
clearInterval(intervalHandle);
|
|
98
|
-
});
|
|
99
|
-
|
|
100
|
-
// Make sure we actually poll soon!
|
|
101
|
-
self._unthrottledEnsurePollIsScheduled();
|
|
102
|
-
|
|
103
|
-
};
|
|
104
|
-
|
|
105
|
-
// This is always called through _.throttle (except once at startup).
|
|
106
|
-
_unthrottledEnsurePollIsScheduled() {
|
|
107
|
-
var self = this;
|
|
108
|
-
if (self._pollsScheduledButNotStarted > 0)
|
|
109
|
-
return;
|
|
110
|
-
++self._pollsScheduledButNotStarted;
|
|
111
|
-
self._taskQueue.queueTask(async () => await self._pollMongo());
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
async _pollMongo() {
|
|
115
|
-
var self = this;
|
|
116
|
-
--self._pollsScheduledButNotStarted;
|
|
117
|
-
|
|
118
|
-
if (self._stopped)
|
|
119
|
-
return;
|
|
120
|
-
|
|
121
|
-
var first = false;
|
|
122
|
-
var newResults: any;
|
|
123
|
-
var oldResults: any = self._results;
|
|
124
|
-
if (!oldResults) {
|
|
125
|
-
first = true;
|
|
126
|
-
// XXX maybe use OrderedDict instead?
|
|
127
|
-
oldResults = self._ordered ? [] : new IdMap();
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
// Save the list of pending writes which this round will commit.
|
|
131
|
-
var writesForCycle = self._pendingWrites;
|
|
132
|
-
self._pendingWrites = [];
|
|
133
|
-
|
|
134
|
-
// Get the new query results. (This yields.)
|
|
135
|
-
try {
|
|
136
|
-
const cursor = self._mongoHandle.db.collection(self._cursorDescription.collectionName).find(self._cursorDescription.selector);
|
|
137
|
-
if (!self._ordered) {
|
|
138
|
-
newResults = new IdMap();
|
|
139
|
-
await cursor.forEach((doc) => (newResults as IdMap).set(doc._id, doc));
|
|
140
|
-
} else
|
|
141
|
-
newResults = await cursor.toArray();
|
|
142
|
-
} catch (e) {
|
|
143
|
-
if (first && typeof (e.code) === 'number') {
|
|
144
|
-
// This is an error document sent to us by mongod, not a connection
|
|
145
|
-
// error generated by the client. And we've never seen this query work
|
|
146
|
-
// successfully. Probably it's a bad selector or something, so we should
|
|
147
|
-
// NOT retry. Instead, we should halt the observe (which ends up calling
|
|
148
|
-
// `stop` on us).
|
|
149
|
-
self._multiplexer.queryError(
|
|
150
|
-
new Error(
|
|
151
|
-
"Exception while polling query " +
|
|
152
|
-
JSON.stringify(self._cursorDescription) + ": " + e.message));
|
|
153
|
-
return;
|
|
154
|
-
}
|
|
155
|
-
|
|
156
|
-
// getRawObjects can throw if we're having trouble talking to the
|
|
157
|
-
// database. That's fine --- we will repoll later anyway. But we should
|
|
158
|
-
// make sure not to lose track of this cycle's writes.
|
|
159
|
-
// (It also can throw if there's just something invalid about this query;
|
|
160
|
-
// unfortunately the ObserveDriver API doesn't provide a good way to
|
|
161
|
-
// "cancel" the observe from the inside in this case.
|
|
162
|
-
Array.prototype.push.apply(self._pendingWrites, writesForCycle);
|
|
163
|
-
console.warn("Exception while polling query " + JSON.stringify(self._cursorDescription), e);
|
|
164
|
-
return;
|
|
165
|
-
}
|
|
166
|
-
|
|
167
|
-
// Run diffs.
|
|
168
|
-
// This will trigger the callbacks via the multiplexer
|
|
169
|
-
if (!self._stopped) {
|
|
170
|
-
DiffSequence.diffQueryChanges(self._ordered, oldResults, newResults, self._multiplexer);
|
|
171
|
-
}
|
|
172
|
-
|
|
173
|
-
// Signals the multiplexer to allow all observeChanges calls that share this
|
|
174
|
-
// multiplexer to return. (This happens asynchronously, via the
|
|
175
|
-
// multiplexer's queue.)
|
|
176
|
-
if (first)
|
|
177
|
-
self._multiplexer.ready();
|
|
178
|
-
|
|
179
|
-
// Replace self._results atomically. (This assignment is what makes `first`
|
|
180
|
-
// stay through on the next cycle, so we've waited until after we've
|
|
181
|
-
// committed to ready-ing the multiplexer.)
|
|
182
|
-
self._results = newResults;
|
|
183
|
-
|
|
184
|
-
// Once the ObserveMultiplexer has processed everything we've done in this
|
|
185
|
-
// round, mark all the writes which existed before this call as
|
|
186
|
-
// commmitted. (If new writes have shown up in the meantime, there'll
|
|
187
|
-
// already be another _pollMongo task scheduled.)
|
|
188
|
-
self._multiplexer.onFlush(function () {
|
|
189
|
-
for (const w of writesForCycle) {
|
|
190
|
-
w.committed();
|
|
191
|
-
}
|
|
192
|
-
});
|
|
193
|
-
}
|
|
194
|
-
|
|
195
|
-
stop() {
|
|
196
|
-
var self = this;
|
|
197
|
-
self._stopped = true;
|
|
198
|
-
for (const c of self._stopCallbacks) {
|
|
199
|
-
c();
|
|
200
|
-
}
|
|
201
|
-
// Release any write fences that are waiting on us.
|
|
202
|
-
for (const w of self._pendingWrites) {
|
|
203
|
-
w.committed();
|
|
204
|
-
}
|
|
205
|
-
}
|
|
206
|
-
}
|
|
207
|
-
|
|
208
|
-
function throttle(func: Function, wait: number, options?) {
|
|
209
|
-
var timeout, context, args, result;
|
|
210
|
-
var previous = 0;
|
|
211
|
-
if (!options) options = {};
|
|
212
|
-
|
|
213
|
-
var later = function () {
|
|
214
|
-
previous = options.leading === false ? 0 : Date.now();
|
|
215
|
-
timeout = null;
|
|
216
|
-
result = func.apply(context, args);
|
|
217
|
-
if (!timeout) context = args = null;
|
|
218
|
-
};
|
|
219
|
-
|
|
220
|
-
var throttled = function () {
|
|
221
|
-
var _now = Date.now();
|
|
222
|
-
if (!previous && options.leading === false) previous = _now;
|
|
223
|
-
var remaining = wait - (_now - previous);
|
|
224
|
-
context = this;
|
|
225
|
-
args = arguments;
|
|
226
|
-
if (remaining <= 0 || remaining > wait) {
|
|
227
|
-
if (timeout) {
|
|
228
|
-
clearTimeout(timeout);
|
|
229
|
-
timeout = null;
|
|
230
|
-
}
|
|
231
|
-
previous = _now;
|
|
232
|
-
result = func.apply(context, args);
|
|
233
|
-
if (!timeout) context = args = null;
|
|
234
|
-
} else if (!timeout && options.trailing !== false) {
|
|
235
|
-
timeout = setTimeout(later, remaining);
|
|
236
|
-
}
|
|
237
|
-
return result;
|
|
238
|
-
};
|
|
239
|
-
|
|
240
|
-
(throttled as any).cancel = function () {
|
|
241
|
-
clearTimeout(timeout);
|
|
242
|
-
previous = 0;
|
|
243
|
-
timeout = context = args = null;
|
|
244
|
-
};
|
|
245
|
-
|
|
246
|
-
return throttled;
|
|
247
|
-
}
|
|
@@ -1,293 +0,0 @@
|
|
|
1
|
-
import MongoDB from "mongodb";
|
|
2
|
-
import { equals } from "../ejson/ejson";
|
|
3
|
-
import { OPLOG_COLLECTION } from "./oplog_tailing";
|
|
4
|
-
import { CursorDescription } from "./live_cursor";
|
|
5
|
-
|
|
6
|
-
export function _createSynchronousCursor(db: MongoDB.Db, cursorDescription: CursorDescription<any>, options?) {
|
|
7
|
-
const { useTransform } = options || {};
|
|
8
|
-
|
|
9
|
-
var collection = db.collection(cursorDescription.collectionName);
|
|
10
|
-
var cursorOptions = cursorDescription.options;
|
|
11
|
-
var mongoOptions = {
|
|
12
|
-
sort: cursorOptions.sort,
|
|
13
|
-
limit: cursorOptions.limit,
|
|
14
|
-
skip: cursorOptions.skip,
|
|
15
|
-
projection: cursorOptions.projection,
|
|
16
|
-
readPreference: cursorOptions.readPreference,
|
|
17
|
-
numberOfRetries: undefined
|
|
18
|
-
};
|
|
19
|
-
|
|
20
|
-
// Do we want a tailable cursor (which only works on capped collections)?
|
|
21
|
-
if (cursorOptions.tailable) {
|
|
22
|
-
mongoOptions.numberOfRetries = -1;
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
var dbCursor = collection.find(cursorDescription.selector, mongoOptions);
|
|
26
|
-
|
|
27
|
-
// Do we want a tailable cursor (which only works on capped collections)?
|
|
28
|
-
if (cursorOptions.tailable) {
|
|
29
|
-
// We want a tailable cursor...
|
|
30
|
-
dbCursor.addCursorFlag("tailable", true)
|
|
31
|
-
// ... and for the server to wait a bit if any getMore has no data (rather
|
|
32
|
-
// than making us put the relevant sleeps in the client)...
|
|
33
|
-
dbCursor.addCursorFlag("awaitData", true)
|
|
34
|
-
|
|
35
|
-
// And if this is on the oplog collection and the cursor specifies a 'ts',
|
|
36
|
-
// then set the undocumented oplog replay flag, which does a special scan to
|
|
37
|
-
// find the first document (instead of creating an index on ts). This is a
|
|
38
|
-
// very hard-coded Mongo flag which only works on the oplog collection and
|
|
39
|
-
// only works with the ts field.
|
|
40
|
-
if (cursorDescription.collectionName === OPLOG_COLLECTION &&
|
|
41
|
-
cursorDescription.selector.ts) {
|
|
42
|
-
dbCursor.addCursorFlag("oplogReplay", true)
|
|
43
|
-
}
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
if (typeof cursorOptions.maxTimeMs !== 'undefined') {
|
|
47
|
-
dbCursor = dbCursor.maxTimeMS(cursorOptions.maxTimeMs);
|
|
48
|
-
}
|
|
49
|
-
if (typeof cursorOptions.hint !== 'undefined') {
|
|
50
|
-
dbCursor = dbCursor.hint(cursorOptions.hint);
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
return new SynchronousCursor(dbCursor, cursorDescription, { useTransform });
|
|
54
|
-
};
|
|
55
|
-
|
|
56
|
-
export class SynchronousCursor {
|
|
57
|
-
|
|
58
|
-
private _transform: any;
|
|
59
|
-
private _visitedIds: Set<string>;
|
|
60
|
-
|
|
61
|
-
constructor(
|
|
62
|
-
private _dbCursor: MongoDB.FindCursor,
|
|
63
|
-
private _cursorDescription: CursorDescription<any>,
|
|
64
|
-
options: { useTransform?: boolean }
|
|
65
|
-
) {
|
|
66
|
-
var self = this;
|
|
67
|
-
|
|
68
|
-
if (options.useTransform && _cursorDescription.options.transform) {
|
|
69
|
-
self._transform = wrapTransform(_cursorDescription.options.transform);
|
|
70
|
-
} else {
|
|
71
|
-
self._transform = null;
|
|
72
|
-
}
|
|
73
|
-
|
|
74
|
-
self._visitedIds = new Set();
|
|
75
|
-
}
|
|
76
|
-
|
|
77
|
-
// Returns a Promise for the next object from the cursor, skipping those whose
|
|
78
|
-
// IDs we've already seen and replacing Mongo atoms with Meteor atoms.
|
|
79
|
-
async _nextObjectPromise() {
|
|
80
|
-
var self = this;
|
|
81
|
-
|
|
82
|
-
while (true) {
|
|
83
|
-
var doc = await this._dbCursor.next();
|
|
84
|
-
|
|
85
|
-
if (!doc) return null;
|
|
86
|
-
|
|
87
|
-
if (!self._cursorDescription.options.tailable && doc.hasOwnProperty('_id')) {
|
|
88
|
-
// Did Mongo give us duplicate documents in the same cursor? If so,
|
|
89
|
-
// ignore this one. (Do this before the transform, since transform might
|
|
90
|
-
// return some unrelated value.) We don't do this for tailable cursors,
|
|
91
|
-
// because we want to maintain O(1) memory usage. And if there isn't _id
|
|
92
|
-
// for some reason (maybe it's the oplog), then we don't do this either.
|
|
93
|
-
// (Be careful to do this for falsey but existing _id, though.)
|
|
94
|
-
if (self._visitedIds.has(doc._id)) continue;
|
|
95
|
-
self._visitedIds.add(doc._id);
|
|
96
|
-
}
|
|
97
|
-
|
|
98
|
-
if (self._transform)
|
|
99
|
-
doc = self._transform(doc);
|
|
100
|
-
|
|
101
|
-
return doc;
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
// Returns a promise which is resolved with the next object (like with
|
|
106
|
-
// _nextObjectPromise) or rejected if the cursor doesn't return within
|
|
107
|
-
// timeoutMS ms.
|
|
108
|
-
async _nextObjectPromiseWithTimeout(timeoutMS: number) {
|
|
109
|
-
if (!timeoutMS) {
|
|
110
|
-
return this._nextObjectPromise();
|
|
111
|
-
}
|
|
112
|
-
const nextObjectPromise = this._nextObjectPromise();
|
|
113
|
-
const timeoutErr = new Error('Client-side timeout waiting for next object');
|
|
114
|
-
const timeoutPromise = new Promise<void>((_resolve, reject) => {
|
|
115
|
-
setTimeout(() => {
|
|
116
|
-
reject(timeoutErr);
|
|
117
|
-
}, timeoutMS);
|
|
118
|
-
});
|
|
119
|
-
return Promise.race([nextObjectPromise, timeoutPromise])
|
|
120
|
-
.catch((err) => {
|
|
121
|
-
if (err === timeoutErr) {
|
|
122
|
-
this._dbCursor.close();
|
|
123
|
-
}
|
|
124
|
-
throw err;
|
|
125
|
-
});
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
close() {
|
|
129
|
-
this._dbCursor.close();
|
|
130
|
-
}
|
|
131
|
-
|
|
132
|
-
async forEach(callback: (doc: any, index: number, cursor: SynchronousCursor) => void, thisArg?) {
|
|
133
|
-
var self = this;
|
|
134
|
-
|
|
135
|
-
// Get back to the beginning.
|
|
136
|
-
self._rewind();
|
|
137
|
-
|
|
138
|
-
// We implement the loop ourself instead of using self._dbCursor.each,
|
|
139
|
-
// because "each" will call its callback outside of a fiber which makes it
|
|
140
|
-
// much more complex to make this function synchronous.
|
|
141
|
-
var index = 0;
|
|
142
|
-
while (true) {
|
|
143
|
-
var doc = await self._nextObjectPromise();
|
|
144
|
-
if (!doc) return;
|
|
145
|
-
callback.call(thisArg, doc, index++, self);
|
|
146
|
-
}
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
_rewind() {
|
|
150
|
-
var self = this;
|
|
151
|
-
|
|
152
|
-
// known to be synchronous
|
|
153
|
-
self._dbCursor.rewind();
|
|
154
|
-
|
|
155
|
-
self._visitedIds = new Set();
|
|
156
|
-
}
|
|
157
|
-
}
|
|
158
|
-
|
|
159
|
-
// Wrap a transform function to return objects that have the _id field
|
|
160
|
-
// of the untransformed document. This ensures that subsystems such as
|
|
161
|
-
// the observe-sequence package that call `observe` can keep track of
|
|
162
|
-
// the documents identities.
|
|
163
|
-
//
|
|
164
|
-
// - Require that it returns objects
|
|
165
|
-
// - If the return value has an _id field, verify that it matches the
|
|
166
|
-
// original _id field
|
|
167
|
-
// - If the return value doesn't have an _id field, add it back.
|
|
168
|
-
function wrapTransform(transform: Function & { __wrappedTransform__?: boolean }) {
|
|
169
|
-
if (!transform)
|
|
170
|
-
return null;
|
|
171
|
-
|
|
172
|
-
// No need to doubly-wrap transforms.
|
|
173
|
-
if (transform.__wrappedTransform__)
|
|
174
|
-
return transform;
|
|
175
|
-
|
|
176
|
-
const wrapped = doc => {
|
|
177
|
-
if (!doc.hasOwnProperty('_id')) {
|
|
178
|
-
// XXX do we ever have a transform on the oplog's collection? because that
|
|
179
|
-
// collection has no _id.
|
|
180
|
-
throw new Error('can only transform documents with _id');
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
const id = doc._id;
|
|
184
|
-
|
|
185
|
-
const transformed = transform(doc);
|
|
186
|
-
|
|
187
|
-
if (transformed.hasOwnProperty('_id')) {
|
|
188
|
-
if (!equals(transformed._id, id)) {
|
|
189
|
-
throw new Error('transformed document can\'t have different _id');
|
|
190
|
-
}
|
|
191
|
-
} else {
|
|
192
|
-
transformed._id = id;
|
|
193
|
-
}
|
|
194
|
-
|
|
195
|
-
return transformed;
|
|
196
|
-
};
|
|
197
|
-
|
|
198
|
-
wrapped.__wrappedTransform__ = true;
|
|
199
|
-
|
|
200
|
-
return wrapped;
|
|
201
|
-
};
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
/*
|
|
205
|
-
forEach(callback, thisArg) {
|
|
206
|
-
var self = this;
|
|
207
|
-
|
|
208
|
-
// Get back to the beginning.
|
|
209
|
-
self._rewind();
|
|
210
|
-
|
|
211
|
-
// We implement the loop ourself instead of using self._dbCursor.each,
|
|
212
|
-
// because "each" will call its callback outside of a fiber which makes it
|
|
213
|
-
// much more complex to make this function synchronous.
|
|
214
|
-
var index = 0;
|
|
215
|
-
while (true) {
|
|
216
|
-
var doc = self._nextObject();
|
|
217
|
-
if (!doc) return;
|
|
218
|
-
callback.call(thisArg, doc, index++, self._selfForIteration);
|
|
219
|
-
}
|
|
220
|
-
}
|
|
221
|
-
|
|
222
|
-
// XXX Allow overlapping callback executions if callback yields.
|
|
223
|
-
map(callback, thisArg) {
|
|
224
|
-
var self = this;
|
|
225
|
-
var res = [];
|
|
226
|
-
self.forEach(function (doc, index) {
|
|
227
|
-
res.push(callback.call(thisArg, doc, index, self._selfForIteration));
|
|
228
|
-
});
|
|
229
|
-
return res;
|
|
230
|
-
},
|
|
231
|
-
|
|
232
|
-
_rewind() {
|
|
233
|
-
var self = this;
|
|
234
|
-
|
|
235
|
-
// known to be synchronous
|
|
236
|
-
self._dbCursor.rewind();
|
|
237
|
-
|
|
238
|
-
self._visitedIds = new LocalCollection._IdMap;
|
|
239
|
-
}
|
|
240
|
-
|
|
241
|
-
// Mostly usable for tailable cursors.
|
|
242
|
-
close() {
|
|
243
|
-
var self = this;
|
|
244
|
-
|
|
245
|
-
self._dbCursor.close();
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
fetch() {
|
|
249
|
-
var self = this;
|
|
250
|
-
return self.map(_.identity);
|
|
251
|
-
}
|
|
252
|
-
|
|
253
|
-
// This method is NOT wrapped in Cursor.
|
|
254
|
-
getRawObjects(ordered) {
|
|
255
|
-
var self = this;
|
|
256
|
-
if (ordered) {
|
|
257
|
-
return self.fetch();
|
|
258
|
-
} else {
|
|
259
|
-
var results = new LocalCollection._IdMap;
|
|
260
|
-
self.forEach(function (doc) {
|
|
261
|
-
results.set(doc._id, doc);
|
|
262
|
-
});
|
|
263
|
-
return results;
|
|
264
|
-
}
|
|
265
|
-
}
|
|
266
|
-
|
|
267
|
-
[Symbol.iterator]() {
|
|
268
|
-
var self = this;
|
|
269
|
-
|
|
270
|
-
// Get back to the beginning.
|
|
271
|
-
self._rewind();
|
|
272
|
-
|
|
273
|
-
return {
|
|
274
|
-
next() {
|
|
275
|
-
const doc = self._nextObject();
|
|
276
|
-
return doc ? {
|
|
277
|
-
value: doc
|
|
278
|
-
} : {
|
|
279
|
-
done: true
|
|
280
|
-
};
|
|
281
|
-
}
|
|
282
|
-
};
|
|
283
|
-
};
|
|
284
|
-
|
|
285
|
-
[Symbol.asyncIterator]() {
|
|
286
|
-
const syncResult = this[Symbol.iterator]();
|
|
287
|
-
return {
|
|
288
|
-
async next() {
|
|
289
|
-
return Promise.resolve(syncResult.next());
|
|
290
|
-
}
|
|
291
|
-
};
|
|
292
|
-
}
|
|
293
|
-
*/
|
|
@@ -1,119 +0,0 @@
|
|
|
1
|
-
import DoubleEndedQueue from "double-ended-queue";
|
|
2
|
-
|
|
3
|
-
type AsyncFunction = (...args: any[]) => Promise<any>;
|
|
4
|
-
type TaskHandle = { task: AsyncFunction, name: string, runTaskResolve?: Function, runTaskReject?: Function };
|
|
5
|
-
|
|
6
|
-
export class _SynchronousQueue {
|
|
7
|
-
// List of tasks to run (not including a currently-running task if any). Each
|
|
8
|
-
// is an object with field 'task' (the task function to run) and 'future' (the
|
|
9
|
-
// Future associated with the blocking runTask call that queued it, or null if
|
|
10
|
-
// called from queueTask).
|
|
11
|
-
private _taskHandles = new DoubleEndedQueue<TaskHandle>();
|
|
12
|
-
|
|
13
|
-
// This is true if self._run() is either currently executing or scheduled to
|
|
14
|
-
// do so soon.
|
|
15
|
-
private _runningOrRunScheduled = false;
|
|
16
|
-
|
|
17
|
-
// This is true if we're currently draining. While we're draining, a further
|
|
18
|
-
// drain is a noop, to prevent infinite loops. "drain" is a heuristic type
|
|
19
|
-
// operation, that has a meaning like unto "what a naive person would expect
|
|
20
|
-
// when modifying a table from an observe"
|
|
21
|
-
private _draining = false;
|
|
22
|
-
|
|
23
|
-
constructor() { }
|
|
24
|
-
|
|
25
|
-
async runTask(task: AsyncFunction) {
|
|
26
|
-
return new Promise((resolve, reject) => {
|
|
27
|
-
var handle: TaskHandle = {
|
|
28
|
-
task: task,
|
|
29
|
-
name: task.name,
|
|
30
|
-
runTaskResolve: resolve,
|
|
31
|
-
runTaskReject: reject
|
|
32
|
-
};
|
|
33
|
-
this._taskHandles.push(handle);
|
|
34
|
-
this._scheduleRun();
|
|
35
|
-
// Yield. We'll get back here after the task is run (and will throw if the
|
|
36
|
-
// task throws).
|
|
37
|
-
|
|
38
|
-
})
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
queueTask(task: AsyncFunction) {
|
|
42
|
-
var self = this;
|
|
43
|
-
self._taskHandles.push({
|
|
44
|
-
task: task,
|
|
45
|
-
name: task.name
|
|
46
|
-
});
|
|
47
|
-
self._scheduleRun();
|
|
48
|
-
// No need to block.
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
async flush() {
|
|
52
|
-
var self = this;
|
|
53
|
-
await self.runTask(async () => { });
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
async drain() {
|
|
57
|
-
var self = this;
|
|
58
|
-
if (self._draining)
|
|
59
|
-
return;
|
|
60
|
-
self._draining = true;
|
|
61
|
-
while (!self._taskHandles.isEmpty()) {
|
|
62
|
-
await self.flush();
|
|
63
|
-
}
|
|
64
|
-
self._draining = false;
|
|
65
|
-
};
|
|
66
|
-
|
|
67
|
-
_scheduleRun() {
|
|
68
|
-
// Already running or scheduled? Do nothing.
|
|
69
|
-
if (this._runningOrRunScheduled)
|
|
70
|
-
return;
|
|
71
|
-
|
|
72
|
-
this._runningOrRunScheduled = true;
|
|
73
|
-
setImmediate(async () => {
|
|
74
|
-
await this._run();
|
|
75
|
-
});
|
|
76
|
-
};
|
|
77
|
-
|
|
78
|
-
async _run() {
|
|
79
|
-
var self = this;
|
|
80
|
-
|
|
81
|
-
if (!self._runningOrRunScheduled)
|
|
82
|
-
throw new Error("expected to be _runningOrRunScheduled");
|
|
83
|
-
|
|
84
|
-
if (self._taskHandles.isEmpty()) {
|
|
85
|
-
// Done running tasks! Don't immediately schedule another run, but
|
|
86
|
-
// allow future tasks to do so.
|
|
87
|
-
self._runningOrRunScheduled = false;
|
|
88
|
-
return;
|
|
89
|
-
}
|
|
90
|
-
var taskHandle = self._taskHandles.shift();
|
|
91
|
-
|
|
92
|
-
// Run the task.
|
|
93
|
-
var exception = undefined;
|
|
94
|
-
try {
|
|
95
|
-
await taskHandle.task();
|
|
96
|
-
} catch (err) {
|
|
97
|
-
if (taskHandle.runTaskReject) {
|
|
98
|
-
// We'll throw this exception through runTask.
|
|
99
|
-
exception = err;
|
|
100
|
-
} else {
|
|
101
|
-
console.error("Exception in queued task", err);
|
|
102
|
-
}
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
// Soon, run the next task, if there is any.
|
|
106
|
-
self._runningOrRunScheduled = false;
|
|
107
|
-
self._scheduleRun();
|
|
108
|
-
|
|
109
|
-
// If this was queued with runTask, let the runTask call return (throwing if
|
|
110
|
-
// the task threw).
|
|
111
|
-
if (taskHandle.runTaskReject) {
|
|
112
|
-
if (exception)
|
|
113
|
-
taskHandle.runTaskReject(exception);
|
|
114
|
-
else
|
|
115
|
-
taskHandle.runTaskResolve();
|
|
116
|
-
}
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
}
|