mongodb-livedata-server 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +63 -0
- package/dist/livedata_server.js +9 -0
- package/dist/meteor/binary-heap/max_heap.js +186 -0
- package/dist/meteor/binary-heap/min_heap.js +17 -0
- package/dist/meteor/binary-heap/min_max_heap.js +48 -0
- package/dist/meteor/callback-hook/hook.js +78 -0
- package/dist/meteor/ddp/crossbar.js +136 -0
- package/dist/meteor/ddp/heartbeat.js +77 -0
- package/dist/meteor/ddp/livedata_server.js +403 -0
- package/dist/meteor/ddp/method-invocation.js +72 -0
- package/dist/meteor/ddp/random-stream.js +100 -0
- package/dist/meteor/ddp/session-collection-view.js +106 -0
- package/dist/meteor/ddp/session-document-view.js +82 -0
- package/dist/meteor/ddp/session.js +570 -0
- package/dist/meteor/ddp/stream_server.js +181 -0
- package/dist/meteor/ddp/subscription.js +347 -0
- package/dist/meteor/ddp/utils.js +104 -0
- package/dist/meteor/ddp/writefence.js +111 -0
- package/dist/meteor/diff-sequence/diff.js +257 -0
- package/dist/meteor/ejson/ejson.js +569 -0
- package/dist/meteor/ejson/stringify.js +119 -0
- package/dist/meteor/ejson/utils.js +42 -0
- package/dist/meteor/id-map/id_map.js +92 -0
- package/dist/meteor/mongo/caching_change_observer.js +94 -0
- package/dist/meteor/mongo/doc_fetcher.js +53 -0
- package/dist/meteor/mongo/geojson_utils.js +41 -0
- package/dist/meteor/mongo/live_connection.js +264 -0
- package/dist/meteor/mongo/live_cursor.js +57 -0
- package/dist/meteor/mongo/minimongo_common.js +2002 -0
- package/dist/meteor/mongo/minimongo_matcher.js +217 -0
- package/dist/meteor/mongo/minimongo_sorter.js +268 -0
- package/dist/meteor/mongo/observe_driver_utils.js +73 -0
- package/dist/meteor/mongo/observe_multiplexer.js +228 -0
- package/dist/meteor/mongo/oplog-observe-driver.js +919 -0
- package/dist/meteor/mongo/oplog_tailing.js +352 -0
- package/dist/meteor/mongo/oplog_v2_converter.js +126 -0
- package/dist/meteor/mongo/polling_observe_driver.js +195 -0
- package/dist/meteor/mongo/synchronous-cursor.js +261 -0
- package/dist/meteor/mongo/synchronous-queue.js +110 -0
- package/dist/meteor/ordered-dict/ordered_dict.js +198 -0
- package/dist/meteor/random/AbstractRandomGenerator.js +92 -0
- package/dist/meteor/random/AleaRandomGenerator.js +90 -0
- package/dist/meteor/random/NodeRandomGenerator.js +42 -0
- package/dist/meteor/random/createAleaGenerator.js +32 -0
- package/dist/meteor/random/createRandom.js +22 -0
- package/dist/meteor/random/main.js +12 -0
- package/livedata_server.ts +3 -0
- package/meteor/LICENSE +28 -0
- package/meteor/binary-heap/max_heap.ts +225 -0
- package/meteor/binary-heap/min_heap.ts +15 -0
- package/meteor/binary-heap/min_max_heap.ts +53 -0
- package/meteor/callback-hook/hook.ts +85 -0
- package/meteor/ddp/crossbar.ts +148 -0
- package/meteor/ddp/heartbeat.ts +97 -0
- package/meteor/ddp/livedata_server.ts +473 -0
- package/meteor/ddp/method-invocation.ts +86 -0
- package/meteor/ddp/random-stream.ts +102 -0
- package/meteor/ddp/session-collection-view.ts +119 -0
- package/meteor/ddp/session-document-view.ts +92 -0
- package/meteor/ddp/session.ts +708 -0
- package/meteor/ddp/stream_server.ts +204 -0
- package/meteor/ddp/subscription.ts +392 -0
- package/meteor/ddp/utils.ts +119 -0
- package/meteor/ddp/writefence.ts +130 -0
- package/meteor/diff-sequence/diff.ts +295 -0
- package/meteor/ejson/ejson.ts +601 -0
- package/meteor/ejson/stringify.ts +122 -0
- package/meteor/ejson/utils.ts +38 -0
- package/meteor/id-map/id_map.ts +84 -0
- package/meteor/mongo/caching_change_observer.ts +120 -0
- package/meteor/mongo/doc_fetcher.ts +52 -0
- package/meteor/mongo/geojson_utils.ts +42 -0
- package/meteor/mongo/live_connection.ts +302 -0
- package/meteor/mongo/live_cursor.ts +79 -0
- package/meteor/mongo/minimongo_common.ts +2440 -0
- package/meteor/mongo/minimongo_matcher.ts +275 -0
- package/meteor/mongo/minimongo_sorter.ts +331 -0
- package/meteor/mongo/observe_driver_utils.ts +79 -0
- package/meteor/mongo/observe_multiplexer.ts +256 -0
- package/meteor/mongo/oplog-observe-driver.ts +1049 -0
- package/meteor/mongo/oplog_tailing.ts +414 -0
- package/meteor/mongo/oplog_v2_converter.ts +124 -0
- package/meteor/mongo/polling_observe_driver.ts +247 -0
- package/meteor/mongo/synchronous-cursor.ts +293 -0
- package/meteor/mongo/synchronous-queue.ts +119 -0
- package/meteor/ordered-dict/ordered_dict.ts +229 -0
- package/meteor/random/AbstractRandomGenerator.ts +99 -0
- package/meteor/random/AleaRandomGenerator.ts +96 -0
- package/meteor/random/NodeRandomGenerator.ts +37 -0
- package/meteor/random/createAleaGenerator.ts +31 -0
- package/meteor/random/createRandom.ts +19 -0
- package/meteor/random/main.ts +8 -0
- package/package.json +30 -0
- package/tsconfig.json +10 -0
|
@@ -0,0 +1,352 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports._escapeRegExp = exports._sleepForMs = exports.OplogHandle = exports.idForOp = exports.OPLOG_COLLECTION = void 0;
|
|
7
|
+
const mongodb_1 = require("mongodb");
|
|
8
|
+
const hook_1 = require("../callback-hook/hook");
|
|
9
|
+
const crossbar_1 = require("../ddp/crossbar");
|
|
10
|
+
const double_ended_queue_1 = __importDefault(require("double-ended-queue"));
|
|
11
|
+
const ejson_1 = require("../ejson/ejson");
|
|
12
|
+
const live_connection_1 = require("./live_connection");
|
|
13
|
+
const live_cursor_1 = require("./live_cursor");
|
|
14
|
+
exports.OPLOG_COLLECTION = 'oplog.rs';
|
|
15
|
+
var TOO_FAR_BEHIND = process.env.METEOR_OPLOG_TOO_FAR_BEHIND || 2000;
|
|
16
|
+
var TAIL_TIMEOUT = +process.env.METEOR_OPLOG_TAIL_TIMEOUT || 30000;
|
|
17
|
+
var showTS = function (ts) {
|
|
18
|
+
return "Timestamp(" + ts.getHighBits() + ", " + ts.getLowBits() + ")";
|
|
19
|
+
};
|
|
20
|
+
function idForOp(op) {
|
|
21
|
+
if (op.op === 'd')
|
|
22
|
+
return op.o._id;
|
|
23
|
+
else if (op.op === 'i')
|
|
24
|
+
return op.o._id;
|
|
25
|
+
else if (op.op === 'u')
|
|
26
|
+
return op.o2._id;
|
|
27
|
+
else if (op.op === 'c')
|
|
28
|
+
throw Error("Operator 'c' doesn't supply an object with id: " +
|
|
29
|
+
(0, ejson_1.stringify)(op));
|
|
30
|
+
else
|
|
31
|
+
throw Error("Unknown op: " + (0, ejson_1.stringify)(op));
|
|
32
|
+
}
|
|
33
|
+
exports.idForOp = idForOp;
|
|
34
|
+
;
|
|
35
|
+
class OplogHandle {
|
|
36
|
+
constructor(oplogUrl, dbName) {
|
|
37
|
+
var self = this;
|
|
38
|
+
self._readyFuture = { promise: undefined, resolve: undefined };
|
|
39
|
+
self._readyFuture.promise = new Promise(r => self._readyFuture.resolve = r);
|
|
40
|
+
self._oplogUrl = oplogUrl;
|
|
41
|
+
self._dbName = dbName;
|
|
42
|
+
self._oplogLastEntryConnection = null;
|
|
43
|
+
self._oplogTailConnection = null;
|
|
44
|
+
self._stopped = false;
|
|
45
|
+
self._tailHandle = null;
|
|
46
|
+
self._crossbar = new crossbar_1._Crossbar({
|
|
47
|
+
factPackage: "mongo-livedata", factName: "oplog-watchers"
|
|
48
|
+
});
|
|
49
|
+
self._baseOplogSelector = {
|
|
50
|
+
ns: new RegExp("^(?:" + _escapeRegExp(self._dbName) + "\\.|admin\\.$cmd)"),
|
|
51
|
+
$or: [
|
|
52
|
+
{ op: { $in: ['i', 'u', 'd'] } },
|
|
53
|
+
// drop collection
|
|
54
|
+
{ op: 'c', 'o.drop': { $exists: true } },
|
|
55
|
+
{ op: 'c', 'o.dropDatabase': 1 },
|
|
56
|
+
{ op: 'c', 'o.applyOps': { $exists: true } },
|
|
57
|
+
]
|
|
58
|
+
};
|
|
59
|
+
// Data structures to support waitUntilCaughtUp(). Each oplog entry has a
|
|
60
|
+
// MongoTimestamp object on it (which is not the same as a Date --- it's a
|
|
61
|
+
// combination of time and an incrementing counter; see
|
|
62
|
+
// http://docs.mongodb.org/manual/reference/bson-types/#timestamps).
|
|
63
|
+
//
|
|
64
|
+
// _catchingUpFutures is an array of {ts: MongoTimestamp, future: Future}
|
|
65
|
+
// objects, sorted by ascending timestamp. _lastProcessedTS is the
|
|
66
|
+
// MongoTimestamp of the last oplog entry we've processed.
|
|
67
|
+
//
|
|
68
|
+
// Each time we call waitUntilCaughtUp, we take a peek at the final oplog
|
|
69
|
+
// entry in the db. If we've already processed it (ie, it is not greater than
|
|
70
|
+
// _lastProcessedTS), waitUntilCaughtUp immediately returns. Otherwise,
|
|
71
|
+
// waitUntilCaughtUp makes a new Future and inserts it along with the final
|
|
72
|
+
// timestamp entry that it read, into _catchingUpFutures. waitUntilCaughtUp
|
|
73
|
+
// then waits on that future, which is resolved once _lastProcessedTS is
|
|
74
|
+
// incremented to be past its timestamp by the worker fiber.
|
|
75
|
+
//
|
|
76
|
+
// XXX use a priority queue or something else that's faster than an array
|
|
77
|
+
self._catchingUpFutures = [];
|
|
78
|
+
self._lastProcessedTS = null;
|
|
79
|
+
self._onSkippedEntriesHook = new hook_1.Hook({
|
|
80
|
+
debugPrintExceptions: "onSkippedEntries callback"
|
|
81
|
+
});
|
|
82
|
+
self._entryQueue = new double_ended_queue_1.default();
|
|
83
|
+
self._workerActive = false;
|
|
84
|
+
self._startTailing();
|
|
85
|
+
}
|
|
86
|
+
;
|
|
87
|
+
stop() {
|
|
88
|
+
var self = this;
|
|
89
|
+
if (self._stopped)
|
|
90
|
+
return;
|
|
91
|
+
self._stopped = true;
|
|
92
|
+
if (self._tailHandle)
|
|
93
|
+
self._tailHandle.stop();
|
|
94
|
+
// XXX should close connections too
|
|
95
|
+
}
|
|
96
|
+
async onOplogEntry(trigger, callback) {
|
|
97
|
+
var self = this;
|
|
98
|
+
if (self._stopped)
|
|
99
|
+
throw new Error("Called onOplogEntry on stopped handle!");
|
|
100
|
+
// Calling onOplogEntry requires us to wait for the tailing to be ready.
|
|
101
|
+
await self._readyFuture.promise;
|
|
102
|
+
var originalCallback = callback;
|
|
103
|
+
callback = function (notification) {
|
|
104
|
+
try {
|
|
105
|
+
originalCallback(notification);
|
|
106
|
+
}
|
|
107
|
+
catch (err) {
|
|
108
|
+
console.error("Error in oplog callback", err);
|
|
109
|
+
}
|
|
110
|
+
};
|
|
111
|
+
var listenHandle = self._crossbar.listen(trigger, callback);
|
|
112
|
+
return {
|
|
113
|
+
stop: function () {
|
|
114
|
+
listenHandle.stop();
|
|
115
|
+
}
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
// Register a callback to be invoked any time we skip oplog entries (eg,
|
|
119
|
+
// because we are too far behind).
|
|
120
|
+
onSkippedEntries(callback) {
|
|
121
|
+
var self = this;
|
|
122
|
+
if (self._stopped)
|
|
123
|
+
throw new Error("Called onSkippedEntries on stopped handle!");
|
|
124
|
+
return self._onSkippedEntriesHook.register(callback);
|
|
125
|
+
}
|
|
126
|
+
// Calls `callback` once the oplog has been processed up to a point that is
|
|
127
|
+
// roughly "now": specifically, once we've processed all ops that are
|
|
128
|
+
// currently visible.
|
|
129
|
+
// XXX become convinced that this is actually safe even if oplogConnection
|
|
130
|
+
// is some kind of pool
|
|
131
|
+
async waitUntilCaughtUp() {
|
|
132
|
+
var self = this;
|
|
133
|
+
if (self._stopped)
|
|
134
|
+
throw new Error("Called waitUntilCaughtUp on stopped handle!");
|
|
135
|
+
// Calling waitUntilCaughtUp requries us to wait for the oplog connection to
|
|
136
|
+
// be ready.
|
|
137
|
+
await self._readyFuture.promise;
|
|
138
|
+
var lastEntry;
|
|
139
|
+
while (!self._stopped) {
|
|
140
|
+
// We need to make the selector at least as restrictive as the actual
|
|
141
|
+
// tailing selector (ie, we need to specify the DB name) or else we might
|
|
142
|
+
// find a TS that won't show up in the actual tail stream.
|
|
143
|
+
try {
|
|
144
|
+
lastEntry = await self._oplogLastEntryConnection.db.collection(exports.OPLOG_COLLECTION).findOne(self._baseOplogSelector, { projection: { ts: 1 }, sort: { $natural: -1 } });
|
|
145
|
+
break;
|
|
146
|
+
}
|
|
147
|
+
catch (e) {
|
|
148
|
+
// During failover (eg) if we get an exception we should log and retry
|
|
149
|
+
// instead of crashing.
|
|
150
|
+
console.warn("Got exception while reading last entry", e);
|
|
151
|
+
await _sleepForMs(100);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
if (self._stopped)
|
|
155
|
+
return;
|
|
156
|
+
if (!lastEntry) {
|
|
157
|
+
// Really, nothing in the oplog? Well, we've processed everything.
|
|
158
|
+
return;
|
|
159
|
+
}
|
|
160
|
+
var ts = lastEntry.ts;
|
|
161
|
+
if (!ts)
|
|
162
|
+
throw Error("oplog entry without ts: " + (0, ejson_1.stringify)(lastEntry));
|
|
163
|
+
if (self._lastProcessedTS && ts.lessThanOrEqual(self._lastProcessedTS)) {
|
|
164
|
+
// We've already caught up to here.
|
|
165
|
+
return;
|
|
166
|
+
}
|
|
167
|
+
// Insert the future into our list. Almost always, this will be at the end,
|
|
168
|
+
// but it's conceivable that if we fail over from one primary to another,
|
|
169
|
+
// the oplog entries we see will go backwards.
|
|
170
|
+
var insertAfter = self._catchingUpFutures.length;
|
|
171
|
+
while (insertAfter - 1 > 0 && self._catchingUpFutures[insertAfter - 1].ts.greaterThan(ts)) {
|
|
172
|
+
insertAfter--;
|
|
173
|
+
}
|
|
174
|
+
var f = { promise: null, resolve: null };
|
|
175
|
+
f.promise = new Promise((r) => f.resolve = r);
|
|
176
|
+
self._catchingUpFutures.splice(insertAfter, 0, { ts: ts, future: f });
|
|
177
|
+
await f.promise;
|
|
178
|
+
}
|
|
179
|
+
async _startTailing() {
|
|
180
|
+
var self = this;
|
|
181
|
+
// First, make sure that we're talking to the local database.
|
|
182
|
+
if (self._oplogUrl.indexOf("/local?") === -1 && !self._oplogUrl.endsWith("/local")) {
|
|
183
|
+
throw Error("$MONGO_OPLOG_URL must be set to the 'local' database of " +
|
|
184
|
+
"a Mongo replica set");
|
|
185
|
+
}
|
|
186
|
+
// We make two separate connections to Mongo. The Node Mongo driver
|
|
187
|
+
// implements a naive round-robin connection pool: each "connection" is a
|
|
188
|
+
// pool of several (5 by default) TCP connections, and each request is
|
|
189
|
+
// rotated through the pools. Tailable cursor queries block on the server
|
|
190
|
+
// until there is some data to return (or until a few seconds have
|
|
191
|
+
// passed). So if the connection pool used for tailing cursors is the same
|
|
192
|
+
// pool used for other queries, the other queries will be delayed by seconds
|
|
193
|
+
// 1/5 of the time.
|
|
194
|
+
//
|
|
195
|
+
// The tail connection will only ever be running a single tail command, so
|
|
196
|
+
// it only needs to make one underlying TCP connection.
|
|
197
|
+
self._oplogTailConnection = new live_connection_1.LiveMongoConnection(self._oplogUrl, { maxPoolSize: 1 });
|
|
198
|
+
// XXX better docs, but: it's to get monotonic results
|
|
199
|
+
// XXX is it safe to say "if there's an in flight query, just use its
|
|
200
|
+
// results"? I don't think so but should consider that
|
|
201
|
+
self._oplogLastEntryConnection = new live_connection_1.LiveMongoConnection(self._oplogUrl, { maxPoolSize: 1 });
|
|
202
|
+
// Now, make sure that there actually is a repl set here. If not, oplog
|
|
203
|
+
// tailing won't ever find anything!
|
|
204
|
+
// More on the isMasterDoc
|
|
205
|
+
// https://docs.mongodb.com/manual/reference/command/isMaster/
|
|
206
|
+
const isMasterDoc = await self._oplogLastEntryConnection.db.admin().command({ ismaster: 1 });
|
|
207
|
+
if (!(isMasterDoc && isMasterDoc.setName)) {
|
|
208
|
+
throw Error("$MONGO_OPLOG_URL must be set to the 'local' database of " +
|
|
209
|
+
"a Mongo replica set");
|
|
210
|
+
}
|
|
211
|
+
// Find the last oplog entry.
|
|
212
|
+
var lastOplogEntry = await self._oplogLastEntryConnection.db.collection(exports.OPLOG_COLLECTION).findOne({}, { sort: { $natural: -1 }, projection: { ts: 1 } });
|
|
213
|
+
var oplogSelector = Object.assign({}, self._baseOplogSelector);
|
|
214
|
+
if (lastOplogEntry) {
|
|
215
|
+
// Start after the last entry that currently exists.
|
|
216
|
+
oplogSelector.ts = { $gt: lastOplogEntry.ts };
|
|
217
|
+
// If there are any calls to callWhenProcessedLatest before any other
|
|
218
|
+
// oplog entries show up, allow callWhenProcessedLatest to call its
|
|
219
|
+
// callback immediately.
|
|
220
|
+
self._lastProcessedTS = lastOplogEntry.ts;
|
|
221
|
+
}
|
|
222
|
+
var cursorDescription = new live_cursor_1.CursorDescription(exports.OPLOG_COLLECTION, oplogSelector, { tailable: true });
|
|
223
|
+
// Start tailing the oplog.
|
|
224
|
+
//
|
|
225
|
+
// We restart the low-level oplog query every 30 seconds if we didn't get a
|
|
226
|
+
// doc. This is a workaround for #8598: the Node Mongo driver has at least
|
|
227
|
+
// one bug that can lead to query callbacks never getting called (even with
|
|
228
|
+
// an error) when leadership failover occur.
|
|
229
|
+
self._tailHandle = self._oplogTailConnection.tail(cursorDescription, function (doc) {
|
|
230
|
+
self._entryQueue.push(doc);
|
|
231
|
+
self._maybeStartWorker();
|
|
232
|
+
}, TAIL_TIMEOUT);
|
|
233
|
+
self._readyFuture.resolve();
|
|
234
|
+
}
|
|
235
|
+
_maybeStartWorker() {
|
|
236
|
+
var self = this;
|
|
237
|
+
if (self._workerActive)
|
|
238
|
+
return;
|
|
239
|
+
self._workerActive = true;
|
|
240
|
+
setImmediate(function () {
|
|
241
|
+
// May be called recursively in case of transactions.
|
|
242
|
+
function handleDoc(doc) {
|
|
243
|
+
if (doc.ns === "admin.$cmd") {
|
|
244
|
+
if (doc.o.applyOps) {
|
|
245
|
+
// This was a successful transaction, so we need to apply the
|
|
246
|
+
// operations that were involved.
|
|
247
|
+
let nextTimestamp = doc.ts;
|
|
248
|
+
doc.o.applyOps.forEach(op => {
|
|
249
|
+
// See https://github.com/meteor/meteor/issues/10420.
|
|
250
|
+
if (!op.ts) {
|
|
251
|
+
op.ts = nextTimestamp;
|
|
252
|
+
nextTimestamp = nextTimestamp.add(mongodb_1.Long.ONE);
|
|
253
|
+
}
|
|
254
|
+
handleDoc(op);
|
|
255
|
+
});
|
|
256
|
+
return;
|
|
257
|
+
}
|
|
258
|
+
throw new Error("Unknown command " + (0, ejson_1.stringify)(doc));
|
|
259
|
+
}
|
|
260
|
+
const trigger = {
|
|
261
|
+
dropCollection: false,
|
|
262
|
+
dropDatabase: false,
|
|
263
|
+
op: doc,
|
|
264
|
+
collection: undefined,
|
|
265
|
+
id: undefined
|
|
266
|
+
};
|
|
267
|
+
if (typeof doc.ns === "string" &&
|
|
268
|
+
doc.ns.startsWith(self._dbName + ".")) {
|
|
269
|
+
trigger.collection = doc.ns.slice(self._dbName.length + 1);
|
|
270
|
+
}
|
|
271
|
+
// Is it a special command and the collection name is hidden
|
|
272
|
+
// somewhere in operator?
|
|
273
|
+
if (trigger.collection === "$cmd") {
|
|
274
|
+
if (doc.o.dropDatabase) {
|
|
275
|
+
delete trigger.collection;
|
|
276
|
+
trigger.dropDatabase = true;
|
|
277
|
+
}
|
|
278
|
+
else if (doc.o.hasOwnProperty("drop")) {
|
|
279
|
+
trigger.collection = doc.o.drop;
|
|
280
|
+
trigger.dropCollection = true;
|
|
281
|
+
trigger.id = null;
|
|
282
|
+
}
|
|
283
|
+
else {
|
|
284
|
+
throw Error("Unknown command " + (0, ejson_1.stringify)(doc));
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
else {
|
|
288
|
+
// All other ops have an id.
|
|
289
|
+
trigger.id = idForOp(doc);
|
|
290
|
+
}
|
|
291
|
+
self._crossbar.fire(trigger);
|
|
292
|
+
}
|
|
293
|
+
try {
|
|
294
|
+
while (!self._stopped &&
|
|
295
|
+
!self._entryQueue.isEmpty()) {
|
|
296
|
+
// Are we too far behind? Just tell our observers that they need to
|
|
297
|
+
// repoll, and drop our queue.
|
|
298
|
+
if (self._entryQueue.length > TOO_FAR_BEHIND) {
|
|
299
|
+
var lastEntry = self._entryQueue.pop();
|
|
300
|
+
self._entryQueue.clear();
|
|
301
|
+
self._onSkippedEntriesHook.each(function (callback) {
|
|
302
|
+
callback();
|
|
303
|
+
return true;
|
|
304
|
+
});
|
|
305
|
+
// Free any waitUntilCaughtUp() calls that were waiting for us to
|
|
306
|
+
// pass something that we just skipped.
|
|
307
|
+
self._setLastProcessedTS(lastEntry.ts);
|
|
308
|
+
continue;
|
|
309
|
+
}
|
|
310
|
+
const doc = self._entryQueue.shift();
|
|
311
|
+
// Fire trigger(s) for this doc.
|
|
312
|
+
handleDoc(doc);
|
|
313
|
+
// Now that we've processed this operation, process pending
|
|
314
|
+
// sequencers.
|
|
315
|
+
if (doc.ts) {
|
|
316
|
+
self._setLastProcessedTS(doc.ts);
|
|
317
|
+
}
|
|
318
|
+
else {
|
|
319
|
+
throw Error("oplog entry without ts: " + (0, ejson_1.stringify)(doc));
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
finally {
|
|
324
|
+
self._workerActive = false;
|
|
325
|
+
}
|
|
326
|
+
});
|
|
327
|
+
}
|
|
328
|
+
_setLastProcessedTS(ts) {
|
|
329
|
+
var self = this;
|
|
330
|
+
self._lastProcessedTS = ts;
|
|
331
|
+
while (self._catchingUpFutures.length > 0 && self._catchingUpFutures[0].ts.lessThanOrEqual(self._lastProcessedTS)) {
|
|
332
|
+
var sequencer = self._catchingUpFutures.shift();
|
|
333
|
+
sequencer.future.return();
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
//Methods used on tests to dinamically change TOO_FAR_BEHIND
|
|
337
|
+
_defineTooFarBehind(value) {
|
|
338
|
+
TOO_FAR_BEHIND = value;
|
|
339
|
+
}
|
|
340
|
+
_resetTooFarBehind() {
|
|
341
|
+
TOO_FAR_BEHIND = process.env.METEOR_OPLOG_TOO_FAR_BEHIND || 2000;
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
exports.OplogHandle = OplogHandle;
|
|
345
|
+
async function _sleepForMs(ms) {
|
|
346
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
347
|
+
}
|
|
348
|
+
exports._sleepForMs = _sleepForMs;
|
|
349
|
+
function _escapeRegExp(s) {
|
|
350
|
+
return s.replace(/[\.*+?^$\\|(){}[\]]/g, '\\$&');
|
|
351
|
+
}
|
|
352
|
+
exports._escapeRegExp = _escapeRegExp;
|
|
@@ -0,0 +1,126 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
// Converter of the new MongoDB Oplog format (>=5.0) to the one that Meteor
|
|
3
|
+
// handles well, i.e., `$set` and `$unset`. The new format is completely new,
|
|
4
|
+
// and looks as follows:
|
|
5
|
+
//
|
|
6
|
+
// { $v: 2, diff: Diff }
|
|
7
|
+
//
|
|
8
|
+
// where `Diff` is a recursive structure:
|
|
9
|
+
//
|
|
10
|
+
// {
|
|
11
|
+
// // Nested updates (sometimes also represented with an s-field).
|
|
12
|
+
// // Example: `{ $set: { 'foo.bar': 1 } }`.
|
|
13
|
+
// i: { <key>: <value>, ... },
|
|
14
|
+
//
|
|
15
|
+
// // Top-level updates.
|
|
16
|
+
// // Example: `{ $set: { foo: { bar: 1 } } }`.
|
|
17
|
+
// u: { <key>: <value>, ... },
|
|
18
|
+
//
|
|
19
|
+
// // Unsets.
|
|
20
|
+
// // Example: `{ $unset: { foo: '' } }`.
|
|
21
|
+
// d: { <key>: false, ... },
|
|
22
|
+
//
|
|
23
|
+
// // Array operations.
|
|
24
|
+
// // Example: `{ $push: { foo: 'bar' } }`.
|
|
25
|
+
// s<key>: { a: true, u<index>: <value>, ... },
|
|
26
|
+
// ...
|
|
27
|
+
//
|
|
28
|
+
// // Nested operations (sometimes also represented in the `i` field).
|
|
29
|
+
// // Example: `{ $set: { 'foo.bar': 1 } }`.
|
|
30
|
+
// s<key>: Diff,
|
|
31
|
+
// ...
|
|
32
|
+
// }
|
|
33
|
+
//
|
|
34
|
+
// (all fields are optional).
|
|
35
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
36
|
+
exports.oplogV2V1Converter = void 0;
|
|
37
|
+
function join(prefix, key) {
|
|
38
|
+
return prefix ? `${prefix}.${key}` : key;
|
|
39
|
+
}
|
|
40
|
+
const arrayOperatorKeyRegex = /^(a|u\d+)$/;
|
|
41
|
+
function isArrayOperatorKey(field) {
|
|
42
|
+
return arrayOperatorKeyRegex.test(field);
|
|
43
|
+
}
|
|
44
|
+
function isArrayOperator(operator) {
|
|
45
|
+
return operator.a === true && Object.keys(operator).every(isArrayOperatorKey);
|
|
46
|
+
}
|
|
47
|
+
function flattenObjectInto(target, source, prefix) {
|
|
48
|
+
if (Array.isArray(source) || typeof source !== 'object' || source === null) {
|
|
49
|
+
target[prefix] = source;
|
|
50
|
+
}
|
|
51
|
+
else {
|
|
52
|
+
const entries = Object.entries(source);
|
|
53
|
+
if (entries.length) {
|
|
54
|
+
entries.forEach(([key, value]) => {
|
|
55
|
+
flattenObjectInto(target, value, join(prefix, key));
|
|
56
|
+
});
|
|
57
|
+
}
|
|
58
|
+
else {
|
|
59
|
+
target[prefix] = source;
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
const logDebugMessages = !!process.env.OPLOG_CONVERTER_DEBUG;
|
|
64
|
+
function convertOplogDiff(oplogEntry, diff, prefix) {
|
|
65
|
+
if (logDebugMessages) {
|
|
66
|
+
console.log(`convertOplogDiff(${JSON.stringify(oplogEntry)}, ${JSON.stringify(diff)}, ${JSON.stringify(prefix)})`);
|
|
67
|
+
}
|
|
68
|
+
Object.entries(diff).forEach(([diffKey, value]) => {
|
|
69
|
+
var _a, _b, _c;
|
|
70
|
+
if (diffKey === 'd') {
|
|
71
|
+
// Handle `$unset`s.
|
|
72
|
+
(_a = oplogEntry.$unset) !== null && _a !== void 0 ? _a : (oplogEntry.$unset = {});
|
|
73
|
+
Object.keys(value).forEach(key => {
|
|
74
|
+
oplogEntry.$unset[join(prefix, key)] = true;
|
|
75
|
+
});
|
|
76
|
+
}
|
|
77
|
+
else if (diffKey === 'i') {
|
|
78
|
+
// Handle (potentially) nested `$set`s.
|
|
79
|
+
(_b = oplogEntry.$set) !== null && _b !== void 0 ? _b : (oplogEntry.$set = {});
|
|
80
|
+
flattenObjectInto(oplogEntry.$set, value, prefix);
|
|
81
|
+
}
|
|
82
|
+
else if (diffKey === 'u') {
|
|
83
|
+
// Handle flat `$set`s.
|
|
84
|
+
(_c = oplogEntry.$set) !== null && _c !== void 0 ? _c : (oplogEntry.$set = {});
|
|
85
|
+
Object.entries(value).forEach(([key, value]) => {
|
|
86
|
+
oplogEntry.$set[join(prefix, key)] = value;
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
else {
|
|
90
|
+
// Handle s-fields.
|
|
91
|
+
const key = diffKey.slice(1);
|
|
92
|
+
if (isArrayOperator(value)) {
|
|
93
|
+
// Array operator.
|
|
94
|
+
Object.entries(value).forEach(([position, value]) => {
|
|
95
|
+
var _a, _b;
|
|
96
|
+
if (position === 'a') {
|
|
97
|
+
return;
|
|
98
|
+
}
|
|
99
|
+
const positionKey = join(join(prefix, key), position.slice(1));
|
|
100
|
+
if (value === null) {
|
|
101
|
+
(_a = oplogEntry.$unset) !== null && _a !== void 0 ? _a : (oplogEntry.$unset = {});
|
|
102
|
+
oplogEntry.$unset[positionKey] = true;
|
|
103
|
+
}
|
|
104
|
+
else {
|
|
105
|
+
(_b = oplogEntry.$set) !== null && _b !== void 0 ? _b : (oplogEntry.$set = {});
|
|
106
|
+
oplogEntry.$set[positionKey] = value;
|
|
107
|
+
}
|
|
108
|
+
});
|
|
109
|
+
}
|
|
110
|
+
else if (key) {
|
|
111
|
+
// Nested object.
|
|
112
|
+
convertOplogDiff(oplogEntry, value, join(prefix, key));
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
function oplogV2V1Converter(oplogEntry) {
|
|
118
|
+
// Pass-through v1 and (probably) invalid entries.
|
|
119
|
+
if (oplogEntry.$v !== 2 || !oplogEntry.diff) {
|
|
120
|
+
return oplogEntry;
|
|
121
|
+
}
|
|
122
|
+
const convertedOplogEntry = { $v: 2 };
|
|
123
|
+
convertOplogDiff(convertedOplogEntry, oplogEntry.diff, '');
|
|
124
|
+
return convertedOplogEntry;
|
|
125
|
+
}
|
|
126
|
+
exports.oplogV2V1Converter = oplogV2V1Converter;
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.PollingObserveDriver = void 0;
|
|
4
|
+
const writefence_1 = require("../ddp/writefence");
|
|
5
|
+
const diff_1 = require("../diff-sequence/diff");
|
|
6
|
+
const synchronous_queue_1 = require("./synchronous-queue");
|
|
7
|
+
const observe_driver_utils_1 = require("./observe_driver_utils");
|
|
8
|
+
const id_map_1 = require("../id-map/id_map");
|
|
9
|
+
var POLLING_THROTTLE_MS = +process.env.METEOR_POLLING_THROTTLE_MS || 50;
|
|
10
|
+
var POLLING_INTERVAL_MS = +process.env.METEOR_POLLING_INTERVAL_MS || 10 * 1000;
|
|
11
|
+
class PollingObserveDriver {
|
|
12
|
+
constructor(options) {
|
|
13
|
+
var self = this;
|
|
14
|
+
self._cursorDescription = options.cursorDescription;
|
|
15
|
+
self._mongoHandle = options.mongoHandle;
|
|
16
|
+
self._ordered = options.ordered;
|
|
17
|
+
self._multiplexer = options.multiplexer;
|
|
18
|
+
self._stopCallbacks = [];
|
|
19
|
+
self._stopped = false;
|
|
20
|
+
// previous results snapshot. on each poll cycle, diffs against
|
|
21
|
+
// results drives the callbacks.
|
|
22
|
+
self._results = null;
|
|
23
|
+
// The number of _pollMongo calls that have been added to self._taskQueue but
|
|
24
|
+
// have not started running. Used to make sure we never schedule more than one
|
|
25
|
+
// _pollMongo (other than possibly the one that is currently running). It's
|
|
26
|
+
// also used by _suspendPolling to pretend there's a poll scheduled. Usually,
|
|
27
|
+
// it's either 0 (for "no polls scheduled other than maybe one currently
|
|
28
|
+
// running") or 1 (for "a poll scheduled that isn't running yet"), but it can
|
|
29
|
+
// also be 2 if incremented by _suspendPolling.
|
|
30
|
+
self._pollsScheduledButNotStarted = 0;
|
|
31
|
+
self._pendingWrites = []; // people to notify when polling completes
|
|
32
|
+
// Make sure to create a separately throttled function for each
|
|
33
|
+
// PollingObserveDriver object.
|
|
34
|
+
self._ensurePollIsScheduled = throttle(self._unthrottledEnsurePollIsScheduled, self._cursorDescription.options.pollingThrottleMs || POLLING_THROTTLE_MS /* ms */);
|
|
35
|
+
// XXX figure out if we still need a queue
|
|
36
|
+
self._taskQueue = new synchronous_queue_1._SynchronousQueue();
|
|
37
|
+
var listenersHandle = (0, observe_driver_utils_1.listenAll)(self._cursorDescription, function (notification) {
|
|
38
|
+
// When someone does a transaction that might affect us, schedule a poll
|
|
39
|
+
// of the database. If that transaction happens inside of a write fence,
|
|
40
|
+
// block the fence until we've polled and notified observers.
|
|
41
|
+
var fence = writefence_1._WriteFence._CurrentWriteFence;
|
|
42
|
+
if (fence)
|
|
43
|
+
self._pendingWrites.push(fence.beginWrite());
|
|
44
|
+
// Ensure a poll is scheduled... but if we already know that one is,
|
|
45
|
+
// don't hit the throttled _ensurePollIsScheduled function (which might
|
|
46
|
+
// lead to us calling it unnecessarily in <pollingThrottleMs> ms).
|
|
47
|
+
if (self._pollsScheduledButNotStarted === 0)
|
|
48
|
+
self._ensurePollIsScheduled();
|
|
49
|
+
});
|
|
50
|
+
self._stopCallbacks.push(function () { listenersHandle.stop(); });
|
|
51
|
+
// every once and a while, poll even if we don't think we're dirty, for
|
|
52
|
+
// eventual consistency with database writes from outside the Meteor
|
|
53
|
+
// universe.
|
|
54
|
+
var pollingInterval = self._cursorDescription.options.pollingIntervalMs || POLLING_INTERVAL_MS;
|
|
55
|
+
var intervalHandle = setInterval(() => self._ensurePollIsScheduled(), pollingInterval);
|
|
56
|
+
self._stopCallbacks.push(function () {
|
|
57
|
+
clearInterval(intervalHandle);
|
|
58
|
+
});
|
|
59
|
+
// Make sure we actually poll soon!
|
|
60
|
+
self._unthrottledEnsurePollIsScheduled();
|
|
61
|
+
}
|
|
62
|
+
;
|
|
63
|
+
// This is always called through _.throttle (except once at startup).
|
|
64
|
+
_unthrottledEnsurePollIsScheduled() {
|
|
65
|
+
var self = this;
|
|
66
|
+
if (self._pollsScheduledButNotStarted > 0)
|
|
67
|
+
return;
|
|
68
|
+
++self._pollsScheduledButNotStarted;
|
|
69
|
+
self._taskQueue.queueTask(async () => await self._pollMongo());
|
|
70
|
+
}
|
|
71
|
+
async _pollMongo() {
|
|
72
|
+
var self = this;
|
|
73
|
+
--self._pollsScheduledButNotStarted;
|
|
74
|
+
if (self._stopped)
|
|
75
|
+
return;
|
|
76
|
+
var first = false;
|
|
77
|
+
var newResults;
|
|
78
|
+
var oldResults = self._results;
|
|
79
|
+
if (!oldResults) {
|
|
80
|
+
first = true;
|
|
81
|
+
// XXX maybe use OrderedDict instead?
|
|
82
|
+
oldResults = self._ordered ? [] : new id_map_1.IdMap();
|
|
83
|
+
}
|
|
84
|
+
// Save the list of pending writes which this round will commit.
|
|
85
|
+
var writesForCycle = self._pendingWrites;
|
|
86
|
+
self._pendingWrites = [];
|
|
87
|
+
// Get the new query results. (This yields.)
|
|
88
|
+
try {
|
|
89
|
+
const cursor = self._mongoHandle.db.collection(self._cursorDescription.collectionName).find(self._cursorDescription.selector);
|
|
90
|
+
if (!self._ordered) {
|
|
91
|
+
newResults = new id_map_1.IdMap();
|
|
92
|
+
await cursor.forEach((doc) => newResults.set(doc._id, doc));
|
|
93
|
+
}
|
|
94
|
+
else
|
|
95
|
+
newResults = await cursor.toArray();
|
|
96
|
+
}
|
|
97
|
+
catch (e) {
|
|
98
|
+
if (first && typeof (e.code) === 'number') {
|
|
99
|
+
// This is an error document sent to us by mongod, not a connection
|
|
100
|
+
// error generated by the client. And we've never seen this query work
|
|
101
|
+
// successfully. Probably it's a bad selector or something, so we should
|
|
102
|
+
// NOT retry. Instead, we should halt the observe (which ends up calling
|
|
103
|
+
// `stop` on us).
|
|
104
|
+
self._multiplexer.queryError(new Error("Exception while polling query " +
|
|
105
|
+
JSON.stringify(self._cursorDescription) + ": " + e.message));
|
|
106
|
+
return;
|
|
107
|
+
}
|
|
108
|
+
// getRawObjects can throw if we're having trouble talking to the
|
|
109
|
+
// database. That's fine --- we will repoll later anyway. But we should
|
|
110
|
+
// make sure not to lose track of this cycle's writes.
|
|
111
|
+
// (It also can throw if there's just something invalid about this query;
|
|
112
|
+
// unfortunately the ObserveDriver API doesn't provide a good way to
|
|
113
|
+
// "cancel" the observe from the inside in this case.
|
|
114
|
+
Array.prototype.push.apply(self._pendingWrites, writesForCycle);
|
|
115
|
+
console.warn("Exception while polling query " + JSON.stringify(self._cursorDescription), e);
|
|
116
|
+
return;
|
|
117
|
+
}
|
|
118
|
+
// Run diffs.
|
|
119
|
+
// This will trigger the callbacks via the multiplexer
|
|
120
|
+
if (!self._stopped) {
|
|
121
|
+
diff_1.DiffSequence.diffQueryChanges(self._ordered, oldResults, newResults, self._multiplexer);
|
|
122
|
+
}
|
|
123
|
+
// Signals the multiplexer to allow all observeChanges calls that share this
|
|
124
|
+
// multiplexer to return. (This happens asynchronously, via the
|
|
125
|
+
// multiplexer's queue.)
|
|
126
|
+
if (first)
|
|
127
|
+
self._multiplexer.ready();
|
|
128
|
+
// Replace self._results atomically. (This assignment is what makes `first`
|
|
129
|
+
// stay through on the next cycle, so we've waited until after we've
|
|
130
|
+
// committed to ready-ing the multiplexer.)
|
|
131
|
+
self._results = newResults;
|
|
132
|
+
// Once the ObserveMultiplexer has processed everything we've done in this
|
|
133
|
+
// round, mark all the writes which existed before this call as
|
|
134
|
+
// commmitted. (If new writes have shown up in the meantime, there'll
|
|
135
|
+
// already be another _pollMongo task scheduled.)
|
|
136
|
+
self._multiplexer.onFlush(function () {
|
|
137
|
+
for (const w of writesForCycle) {
|
|
138
|
+
w.committed();
|
|
139
|
+
}
|
|
140
|
+
});
|
|
141
|
+
}
|
|
142
|
+
stop() {
|
|
143
|
+
var self = this;
|
|
144
|
+
self._stopped = true;
|
|
145
|
+
for (const c of self._stopCallbacks) {
|
|
146
|
+
c();
|
|
147
|
+
}
|
|
148
|
+
// Release any write fences that are waiting on us.
|
|
149
|
+
for (const w of self._pendingWrites) {
|
|
150
|
+
w.committed();
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
exports.PollingObserveDriver = PollingObserveDriver;
|
|
155
|
+
function throttle(func, wait, options) {
|
|
156
|
+
var timeout, context, args, result;
|
|
157
|
+
var previous = 0;
|
|
158
|
+
if (!options)
|
|
159
|
+
options = {};
|
|
160
|
+
var later = function () {
|
|
161
|
+
previous = options.leading === false ? 0 : Date.now();
|
|
162
|
+
timeout = null;
|
|
163
|
+
result = func.apply(context, args);
|
|
164
|
+
if (!timeout)
|
|
165
|
+
context = args = null;
|
|
166
|
+
};
|
|
167
|
+
var throttled = function () {
|
|
168
|
+
var _now = Date.now();
|
|
169
|
+
if (!previous && options.leading === false)
|
|
170
|
+
previous = _now;
|
|
171
|
+
var remaining = wait - (_now - previous);
|
|
172
|
+
context = this;
|
|
173
|
+
args = arguments;
|
|
174
|
+
if (remaining <= 0 || remaining > wait) {
|
|
175
|
+
if (timeout) {
|
|
176
|
+
clearTimeout(timeout);
|
|
177
|
+
timeout = null;
|
|
178
|
+
}
|
|
179
|
+
previous = _now;
|
|
180
|
+
result = func.apply(context, args);
|
|
181
|
+
if (!timeout)
|
|
182
|
+
context = args = null;
|
|
183
|
+
}
|
|
184
|
+
else if (!timeout && options.trailing !== false) {
|
|
185
|
+
timeout = setTimeout(later, remaining);
|
|
186
|
+
}
|
|
187
|
+
return result;
|
|
188
|
+
};
|
|
189
|
+
throttled.cancel = function () {
|
|
190
|
+
clearTimeout(timeout);
|
|
191
|
+
previous = 0;
|
|
192
|
+
timeout = context = args = null;
|
|
193
|
+
};
|
|
194
|
+
return throttled;
|
|
195
|
+
}
|