mongodb-livedata-server 0.0.14 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +75 -69
- package/dist/meteor/binary-heap/max_heap.d.ts +2 -2
- package/dist/meteor/binary-heap/max_heap.js +7 -7
- package/dist/meteor/binary-heap/min_max_heap.d.ts +1 -1
- package/dist/meteor/binary-heap/min_max_heap.js +3 -3
- package/dist/meteor/ddp/livedata_server.d.ts +3 -2
- package/dist/meteor/ddp/livedata_server.js +2 -1
- package/dist/meteor/ddp/session-collection-view.js +3 -4
- package/dist/meteor/ddp/session.d.ts +7 -3
- package/dist/meteor/ddp/session.js +26 -10
- package/dist/meteor/ddp/stream_server.js +1 -1
- package/dist/meteor/ddp/subscription.d.ts +3 -1
- package/dist/meteor/ddp/subscription.js +16 -0
- package/dist/meteor/ddp/utils.js +1 -1
- package/dist/meteor/diff-sequence/diff.d.ts +2 -3
- package/dist/meteor/mongo/caching_change_observer.d.ts +2 -3
- package/dist/meteor/mongo/caching_change_observer.js +6 -38
- package/dist/meteor/mongo/live_cursor.js +3 -0
- package/dist/meteor/mongo/observe_multiplexer.d.ts +3 -1
- package/dist/meteor/mongo/observe_multiplexer.js +14 -38
- package/dist/meteor/mongo/oplog-observe-driver.d.ts +4 -3
- package/dist/meteor/mongo/oplog-observe-driver.js +25 -26
- package/dist/meteor/mongo/polling_observe_driver.js +30 -4
- package/dist/meteor/ordered-dict/ordered_dict.d.ts +2 -2
- package/dist/meteor/ordered-dict/ordered_dict.js +2 -2
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -1,69 +1,75 @@
|
|
|
1
|
-
MongoDB Live Data Server
|
|
2
|
-
========================
|
|
3
|
-
|
|
4
|
-
This project is essentially a MongoDB live data driver (based either on polling or on Oplog tailing) combined with a DDP server, extracted
|
|
5
|
-
out of [Meteor](https://github.com/meteor/meteor), with **Fibers** and **underscore** dependencies removed and code converted to Typescript.
|
|
6
|
-
|
|
7
|
-
Live data is one of the root concepts of Meteor. Data is served via WebSockets via the DDP protocol and updated automatically whenever something changes in the database. Also, calling server methods via WebSocket is supported.
|
|
8
|
-
|
|
9
|
-
Using Meteor locks you into the Meteor ecosystem, which has some problems (mostly for historical reasons). Using live data as a separate npm package might be preferable in many scenarios. Also, people who are trying to migrate from Meteor, might find this package useful as an intermediate step.
|
|
10
|
-
|
|
11
|
-
### Installation
|
|
12
|
-
|
|
13
|
-
```
|
|
14
|
-
npm i mongodb-livedata-server
|
|
15
|
-
```
|
|
16
|
-
|
|
17
|
-
### Usage
|
|
18
|
-
|
|
19
|
-
As a most common example, this is how you can use livedata with Express.js:
|
|
20
|
-
|
|
21
|
-
```js
|
|
22
|
-
const { DDPServer, LiveCursor, LiveMongoConnection } = require('mongodb-livedata-server')
|
|
23
|
-
const express = require('express')
|
|
24
|
-
const app = express()
|
|
25
|
-
const port = 3000
|
|
26
|
-
|
|
27
|
-
app.get('/', (req, res) => {
|
|
28
|
-
res.send('Hello World!')
|
|
29
|
-
})
|
|
30
|
-
|
|
31
|
-
const httpServer = app.listen(port, () => {
|
|
32
|
-
console.log(`Example app listening on port ${port}`)
|
|
33
|
-
})
|
|
34
|
-
|
|
35
|
-
const liveMongoConnection = new LiveMongoConnection(process.env.MONGO_URL, {
|
|
36
|
-
oplogUrl: process.env.MONGO_OPLOG_URL
|
|
37
|
-
});
|
|
38
|
-
const liveDataServer = new DDPServer({}, httpServer);
|
|
39
|
-
|
|
40
|
-
liveDataServer.methods({
|
|
41
|
-
"test-method": async (msg) => {
|
|
42
|
-
console.log("Test msg: ", msg);
|
|
43
|
-
return "hello! Current timestamp is: " + Date.now()
|
|
44
|
-
}
|
|
45
|
-
})
|
|
46
|
-
|
|
47
|
-
liveDataServer.publish({
|
|
48
|
-
"test-subscription": async () => {
|
|
49
|
-
return new LiveCursor(liveMongoConnection, "test-collection", { category: "apples" });
|
|
50
|
-
}
|
|
51
|
-
})
|
|
52
|
-
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
`liveDataServer.methods` and `liveDataServer.publish` have exactly same interface as [Meteor.methods](https://docs.meteor.com/api/methods.html#Meteor-methods) and [Meteor.publish](https://docs.meteor.com/api/pubsub.html#Meteor-publish) respectively, notice however that when publishing subscriptions, you must use `LiveCursor` rather than a normal MongoDB cursor.
|
|
56
|
-
|
|
57
|
-
### Important notes
|
|
58
|
-
|
|
59
|
-
- The project is in alpha. Use at your own risk.
|
|
60
|
-
- Neither method context nor subscription context have the `unblock` method anymore (because this package doesn't use Fibers)
|
|
61
|
-
- Meteor syntax for MongoDB queries is not supported. Please always use MongoDB Node.js driver syntax. For example, instead of
|
|
62
|
-
```ts
|
|
63
|
-
const doc = myCollection.findOne(id);
|
|
64
|
-
```
|
|
65
|
-
use
|
|
66
|
-
```ts
|
|
67
|
-
const doc = await myCollection.findOne({ _id: id });
|
|
68
|
-
```
|
|
69
|
-
- Neither MongoDB.ObjectId nor it's Meteor.js alternative is supported at the moment. String ids only.
|
|
1
|
+
MongoDB Live Data Server
|
|
2
|
+
========================
|
|
3
|
+
|
|
4
|
+
This project is essentially a MongoDB live data driver (based either on polling or on Oplog tailing) combined with a DDP server, extracted
|
|
5
|
+
out of [Meteor](https://github.com/meteor/meteor), with **Fibers** and **underscore** dependencies removed and code converted to Typescript.
|
|
6
|
+
|
|
7
|
+
Live data is one of the root concepts of Meteor. Data is served via WebSockets via the DDP protocol and updated automatically whenever something changes in the database. Also, calling server methods via WebSocket is supported.
|
|
8
|
+
|
|
9
|
+
Using Meteor locks you into the Meteor ecosystem, which has some problems (mostly for historical reasons). Using live data as a separate npm package might be preferable in many scenarios. Also, people who are trying to migrate from Meteor, might find this package useful as an intermediate step.
|
|
10
|
+
|
|
11
|
+
### Installation
|
|
12
|
+
|
|
13
|
+
```
|
|
14
|
+
npm i mongodb-livedata-server
|
|
15
|
+
```
|
|
16
|
+
|
|
17
|
+
### Usage
|
|
18
|
+
|
|
19
|
+
As a most common example, this is how you can use livedata with Express.js:
|
|
20
|
+
|
|
21
|
+
```js
|
|
22
|
+
const { DDPServer, LiveCursor, LiveMongoConnection } = require('mongodb-livedata-server')
|
|
23
|
+
const express = require('express')
|
|
24
|
+
const app = express()
|
|
25
|
+
const port = 3000
|
|
26
|
+
|
|
27
|
+
app.get('/', (req, res) => {
|
|
28
|
+
res.send('Hello World!')
|
|
29
|
+
})
|
|
30
|
+
|
|
31
|
+
const httpServer = app.listen(port, () => {
|
|
32
|
+
console.log(`Example app listening on port ${port}`)
|
|
33
|
+
})
|
|
34
|
+
|
|
35
|
+
const liveMongoConnection = new LiveMongoConnection(process.env.MONGO_URL, {
|
|
36
|
+
oplogUrl: process.env.MONGO_OPLOG_URL
|
|
37
|
+
});
|
|
38
|
+
const liveDataServer = new DDPServer({}, httpServer);
|
|
39
|
+
|
|
40
|
+
liveDataServer.methods({
|
|
41
|
+
"test-method": async (msg) => {
|
|
42
|
+
console.log("Test msg: ", msg);
|
|
43
|
+
return "hello! Current timestamp is: " + Date.now()
|
|
44
|
+
}
|
|
45
|
+
})
|
|
46
|
+
|
|
47
|
+
liveDataServer.publish({
|
|
48
|
+
"test-subscription": async () => {
|
|
49
|
+
return new LiveCursor(liveMongoConnection, "test-collection", { category: "apples" });
|
|
50
|
+
}
|
|
51
|
+
})
|
|
52
|
+
|
|
53
|
+
```
|
|
54
|
+
|
|
55
|
+
`liveDataServer.methods` and `liveDataServer.publish` have exactly same interface as [Meteor.methods](https://docs.meteor.com/api/methods.html#Meteor-methods) and [Meteor.publish](https://docs.meteor.com/api/pubsub.html#Meteor-publish) respectively, notice however that when publishing subscriptions, you must use `LiveCursor` rather than a normal MongoDB cursor.
|
|
56
|
+
|
|
57
|
+
### Important notes
|
|
58
|
+
|
|
59
|
+
- The project is in alpha. Use at your own risk.
|
|
60
|
+
- Neither method context nor subscription context have the `unblock` method anymore (because this package doesn't use Fibers)
|
|
61
|
+
- Meteor syntax for MongoDB queries is not supported. Please always use MongoDB Node.js driver syntax. For example, instead of
|
|
62
|
+
```ts
|
|
63
|
+
const doc = myCollection.findOne(id);
|
|
64
|
+
```
|
|
65
|
+
use
|
|
66
|
+
```ts
|
|
67
|
+
const doc = await myCollection.findOne({ _id: id });
|
|
68
|
+
```
|
|
69
|
+
- Neither MongoDB.ObjectId nor it's Meteor.js alternative is supported at the moment. String ids only.
|
|
70
|
+
|
|
71
|
+
### DDP Extension
|
|
72
|
+
|
|
73
|
+
- Starting from 0.1.0, this library extends DDP with `init` message, which is used to avoid initial spam of `added` messages.
|
|
74
|
+
- Starting from 0.1.1, `init` message will only be sent if `version` `1a` of DDP protocol is specified. Additionally, when version `1a`
|
|
75
|
+
is specified, server will not send removes for all documents when stopping a subscription, and rely on the client for the cleanup instead.
|
|
@@ -18,12 +18,12 @@ export declare class MaxHeap {
|
|
|
18
18
|
_swap(idxA: any, idxB: any): void;
|
|
19
19
|
get(id: any): any;
|
|
20
20
|
set(id: any, value: any): void;
|
|
21
|
-
|
|
21
|
+
delete(id: any): void;
|
|
22
22
|
has(id: any): any;
|
|
23
23
|
empty(): boolean;
|
|
24
24
|
clear(): void;
|
|
25
25
|
forEach(iterator: any): void;
|
|
26
|
-
size(): number;
|
|
26
|
+
get size(): number;
|
|
27
27
|
setDefault(id: any, def: any): any;
|
|
28
28
|
clone(): MaxHeap;
|
|
29
29
|
maxElementId(): any;
|
|
@@ -51,14 +51,14 @@ class MaxHeap {
|
|
|
51
51
|
}
|
|
52
52
|
}
|
|
53
53
|
_downHeap(idx) {
|
|
54
|
-
while (leftChildIdx(idx) < this.size
|
|
54
|
+
while (leftChildIdx(idx) < this.size) {
|
|
55
55
|
const left = leftChildIdx(idx);
|
|
56
56
|
const right = rightChildIdx(idx);
|
|
57
57
|
let largest = idx;
|
|
58
|
-
if (left < this.size
|
|
58
|
+
if (left < this.size) {
|
|
59
59
|
largest = this._maxIndex(largest, left);
|
|
60
60
|
}
|
|
61
|
-
if (right < this.size
|
|
61
|
+
if (right < this.size) {
|
|
62
62
|
largest = this._maxIndex(largest, right);
|
|
63
63
|
}
|
|
64
64
|
if (largest === idx) {
|
|
@@ -121,7 +121,7 @@ class MaxHeap {
|
|
|
121
121
|
this._upHeap(this._heap.length - 1);
|
|
122
122
|
}
|
|
123
123
|
}
|
|
124
|
-
|
|
124
|
+
delete(id) {
|
|
125
125
|
if (this.has(id)) {
|
|
126
126
|
const last = this._heap.length - 1;
|
|
127
127
|
const idx = this._heapIdx.get(id);
|
|
@@ -143,7 +143,7 @@ class MaxHeap {
|
|
|
143
143
|
return this._heapIdx.has(id);
|
|
144
144
|
}
|
|
145
145
|
empty() {
|
|
146
|
-
return
|
|
146
|
+
return this.size === 0;
|
|
147
147
|
}
|
|
148
148
|
clear() {
|
|
149
149
|
this._heap = [];
|
|
@@ -153,7 +153,7 @@ class MaxHeap {
|
|
|
153
153
|
forEach(iterator) {
|
|
154
154
|
this._heap.forEach(obj => iterator(obj.value, obj.id));
|
|
155
155
|
}
|
|
156
|
-
size() {
|
|
156
|
+
get size() {
|
|
157
157
|
return this._heap.length;
|
|
158
158
|
}
|
|
159
159
|
setDefault(id, def) {
|
|
@@ -168,7 +168,7 @@ class MaxHeap {
|
|
|
168
168
|
return clone;
|
|
169
169
|
}
|
|
170
170
|
maxElementId() {
|
|
171
|
-
return this.size
|
|
171
|
+
return this.size > 0 ? this._heap[0].id : null;
|
|
172
172
|
}
|
|
173
173
|
_selfCheck() {
|
|
174
174
|
for (let i = 1; i < this._heap.length; i++) {
|
|
@@ -3,7 +3,7 @@ export declare class MinMaxHeap extends MaxHeap {
|
|
|
3
3
|
private _minHeap;
|
|
4
4
|
constructor(comparator: any, options?: MaxHeapOptions);
|
|
5
5
|
set(id: string, value: any): void;
|
|
6
|
-
|
|
6
|
+
delete(id: string): void;
|
|
7
7
|
clear(): void;
|
|
8
8
|
setDefault(id: string, def: any): any;
|
|
9
9
|
clone(): MinMaxHeap;
|
|
@@ -24,9 +24,9 @@ class MinMaxHeap extends max_heap_1.MaxHeap {
|
|
|
24
24
|
super.set(id, value);
|
|
25
25
|
this._minHeap.set(id, value);
|
|
26
26
|
}
|
|
27
|
-
|
|
28
|
-
super.
|
|
29
|
-
this._minHeap.
|
|
27
|
+
delete(id) {
|
|
28
|
+
super.delete(id);
|
|
29
|
+
this._minHeap.delete(id);
|
|
30
30
|
}
|
|
31
31
|
clear() {
|
|
32
32
|
super.clear();
|
|
@@ -3,6 +3,7 @@ import { MethodInvocation } from "./method-invocation";
|
|
|
3
3
|
import { StreamServerSocket } from "./stream_server";
|
|
4
4
|
import { DDPSession, SessionConnectionHandle } from "./session";
|
|
5
5
|
import { Server } from "http";
|
|
6
|
+
import { Hook } from "../callback-hook/hook";
|
|
6
7
|
import { Subscription } from "./subscription";
|
|
7
8
|
export declare const DDP: {
|
|
8
9
|
_CurrentPublicationInvocation: Subscription;
|
|
@@ -17,9 +18,9 @@ interface PublicationStrategy {
|
|
|
17
18
|
export declare class DDPServer {
|
|
18
19
|
private options;
|
|
19
20
|
private onConnectionHook;
|
|
20
|
-
|
|
21
|
+
onMessageHook: Hook;
|
|
21
22
|
private publish_handlers;
|
|
22
|
-
|
|
23
|
+
universal_publish_handlers: any[];
|
|
23
24
|
private method_handlers;
|
|
24
25
|
private _publicationStrategies;
|
|
25
26
|
private sessions;
|
|
@@ -107,7 +107,7 @@ class DDPServer {
|
|
|
107
107
|
*/
|
|
108
108
|
setPublicationStrategy(publicationName, strategy) {
|
|
109
109
|
if (!Object.values(DDPServer.publicationStrategies).includes(strategy)) {
|
|
110
|
-
throw new Error(`Invalid merge strategy: ${strategy}
|
|
110
|
+
throw new Error(`Invalid merge strategy: ${strategy}
|
|
111
111
|
for collection ${publicationName}`);
|
|
112
112
|
}
|
|
113
113
|
this._publicationStrategies[publicationName] = strategy;
|
|
@@ -370,6 +370,7 @@ function wrapInternalException(exception, context) {
|
|
|
370
370
|
if (exception.sanitizedError.isClientSafe)
|
|
371
371
|
return exception.sanitizedError;
|
|
372
372
|
}
|
|
373
|
+
console.error("Error " + context + ":", exception);
|
|
373
374
|
return ddpError(500, "Internal server error");
|
|
374
375
|
}
|
|
375
376
|
exports.wrapInternalException = wrapInternalException;
|
|
@@ -3,7 +3,6 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.SessionCollectionView = void 0;
|
|
4
4
|
const diff_1 = require("../diff-sequence/diff");
|
|
5
5
|
const ejson_1 = require("../ejson/ejson");
|
|
6
|
-
const id_map_1 = require("../id-map/id_map");
|
|
7
6
|
const session_document_view_1 = require("./session-document-view");
|
|
8
7
|
/**
|
|
9
8
|
* Represents a client's view of a single collection
|
|
@@ -15,10 +14,10 @@ class SessionCollectionView {
|
|
|
15
14
|
constructor(collectionName, callbacks) {
|
|
16
15
|
this.collectionName = collectionName;
|
|
17
16
|
this.callbacks = callbacks;
|
|
18
|
-
this.documents = new
|
|
17
|
+
this.documents = new Map();
|
|
19
18
|
}
|
|
20
19
|
isEmpty() {
|
|
21
|
-
return this.documents.
|
|
20
|
+
return this.documents.size === 0;
|
|
22
21
|
}
|
|
23
22
|
diff(previous) {
|
|
24
23
|
diff_1.DiffSequence.diffMaps(previous.documents, this.documents, {
|
|
@@ -91,7 +90,7 @@ class SessionCollectionView {
|
|
|
91
90
|
if (docView.existsIn.size === 0) {
|
|
92
91
|
// it is gone from everyone
|
|
93
92
|
self.callbacks.removed(self.collectionName, id);
|
|
94
|
-
self.documents.
|
|
93
|
+
self.documents.delete(id);
|
|
95
94
|
}
|
|
96
95
|
else {
|
|
97
96
|
var changed = {};
|
|
@@ -1,7 +1,9 @@
|
|
|
1
1
|
import DoubleEndedQueue from "double-ended-queue";
|
|
2
2
|
import { StreamServerSocket } from "./stream_server";
|
|
3
3
|
import { DDPServer } from "./livedata_server";
|
|
4
|
+
import { SessionCollectionView } from "./session-collection-view";
|
|
4
5
|
import { SubscriptionHandle } from "./subscription";
|
|
6
|
+
import { OrderedDict } from "../ordered-dict/ordered_dict";
|
|
5
7
|
export interface SessionConnectionHandle {
|
|
6
8
|
id: string;
|
|
7
9
|
close: Function;
|
|
@@ -20,13 +22,13 @@ interface DDPMessage {
|
|
|
20
22
|
}
|
|
21
23
|
export declare class DDPSession {
|
|
22
24
|
id: string;
|
|
23
|
-
server:
|
|
25
|
+
server: DDPServer;
|
|
24
26
|
inQueue: DoubleEndedQueue<any>;
|
|
25
27
|
userId: string | null;
|
|
26
28
|
connectionHandle: SessionConnectionHandle;
|
|
27
29
|
_dontStartNewUniversalSubs: boolean;
|
|
28
30
|
_socketUrl: string;
|
|
29
|
-
|
|
31
|
+
version: string;
|
|
30
32
|
private socket;
|
|
31
33
|
private initialized;
|
|
32
34
|
private workerRunning;
|
|
@@ -41,6 +43,7 @@ export declare class DDPSession {
|
|
|
41
43
|
constructor(server: DDPServer, version: string, socket: StreamServerSocket, options: any);
|
|
42
44
|
sendReady(subscriptionIds: string[]): void;
|
|
43
45
|
_canSend(collectionName: any): boolean;
|
|
46
|
+
sendInitialAdds(collectionName: string, docs: Map<string, any> | OrderedDict): void;
|
|
44
47
|
sendAdded(collectionName: string, id: string, fields: Record<string, any>): void;
|
|
45
48
|
sendChanged(collectionName: string, id: string, fields: Record<string, any>): void;
|
|
46
49
|
sendRemoved(collectionName: string, id: string): void;
|
|
@@ -49,7 +52,8 @@ export declare class DDPSession {
|
|
|
49
52
|
changed: any;
|
|
50
53
|
removed: any;
|
|
51
54
|
};
|
|
52
|
-
getCollectionView(collectionName: string):
|
|
55
|
+
getCollectionView(collectionName: string): SessionCollectionView;
|
|
56
|
+
initialAdds(subscriptionHandle: SubscriptionHandle, collectionName: string, docs: Map<string, any> | OrderedDict): void;
|
|
53
57
|
added(subscriptionHandle: SubscriptionHandle, collectionName: string, id: string, fields: Record<string, any>): void;
|
|
54
58
|
removed(subscriptionHandle: SubscriptionHandle, collectionName: string, id: string): void;
|
|
55
59
|
changed(subscriptionHandle: SubscriptionHandle, collectionName: string, id: string, fields: Record<string, any>): void;
|
|
@@ -14,7 +14,6 @@ const utils_1 = require("./utils");
|
|
|
14
14
|
const diff_1 = require("../diff-sequence/diff");
|
|
15
15
|
const session_collection_view_1 = require("./session-collection-view");
|
|
16
16
|
const subscription_1 = require("./subscription");
|
|
17
|
-
const id_map_1 = require("../id-map/id_map");
|
|
18
17
|
class DDPSession {
|
|
19
18
|
constructor(server, version, socket, options) {
|
|
20
19
|
this.protocol_handlers = {
|
|
@@ -161,7 +160,7 @@ class DDPSession {
|
|
|
161
160
|
self._namedSubs = new Map();
|
|
162
161
|
self._universalSubs = [];
|
|
163
162
|
self.userId = null;
|
|
164
|
-
self.collectionViews = new
|
|
163
|
+
self.collectionViews = new Map();
|
|
165
164
|
// Set this to false to not send messages when collectionViews are
|
|
166
165
|
// modified. This is done when rerunning subs in _setUserId and those messages
|
|
167
166
|
// are calculated via a diff instead.
|
|
@@ -230,6 +229,13 @@ class DDPSession {
|
|
|
230
229
|
_canSend(collectionName) {
|
|
231
230
|
return this._isSending || !this.server.getPublicationStrategy(collectionName).useCollectionView;
|
|
232
231
|
}
|
|
232
|
+
sendInitialAdds(collectionName, docs) {
|
|
233
|
+
if (this._canSend(collectionName)) {
|
|
234
|
+
const items = [];
|
|
235
|
+
docs.forEach(doc => items.push(doc));
|
|
236
|
+
this.send({ msg: "init", collection: collectionName, items });
|
|
237
|
+
}
|
|
238
|
+
}
|
|
233
239
|
sendAdded(collectionName, id, fields) {
|
|
234
240
|
if (this._canSend(collectionName))
|
|
235
241
|
this.send({ msg: "added", collection: collectionName, id, fields });
|
|
@@ -266,6 +272,15 @@ class DDPSession {
|
|
|
266
272
|
}
|
|
267
273
|
return ret;
|
|
268
274
|
}
|
|
275
|
+
initialAdds(subscriptionHandle, collectionName, docs) {
|
|
276
|
+
if (this.server.getPublicationStrategy(collectionName).useCollectionView) {
|
|
277
|
+
const view = this.getCollectionView(collectionName);
|
|
278
|
+
docs.forEach((doc, id) => view.added(subscriptionHandle, id, doc));
|
|
279
|
+
}
|
|
280
|
+
else {
|
|
281
|
+
this.sendInitialAdds(collectionName, docs);
|
|
282
|
+
}
|
|
283
|
+
}
|
|
269
284
|
added(subscriptionHandle, collectionName, id, fields) {
|
|
270
285
|
if (this.server.getPublicationStrategy(collectionName).useCollectionView) {
|
|
271
286
|
const view = this.getCollectionView(collectionName);
|
|
@@ -280,7 +295,7 @@ class DDPSession {
|
|
|
280
295
|
const view = this.getCollectionView(collectionName);
|
|
281
296
|
view.removed(subscriptionHandle, id);
|
|
282
297
|
if (view.isEmpty()) {
|
|
283
|
-
this.collectionViews.
|
|
298
|
+
this.collectionViews.delete(collectionName);
|
|
284
299
|
}
|
|
285
300
|
}
|
|
286
301
|
else {
|
|
@@ -316,7 +331,7 @@ class DDPSession {
|
|
|
316
331
|
return;
|
|
317
332
|
// Drop the merge box data immediately.
|
|
318
333
|
self.inQueue = null;
|
|
319
|
-
self.collectionViews = new
|
|
334
|
+
self.collectionViews = new Map();
|
|
320
335
|
if (self.heartbeat) {
|
|
321
336
|
self.heartbeat.stop();
|
|
322
337
|
self.heartbeat = null;
|
|
@@ -457,7 +472,7 @@ class DDPSession {
|
|
|
457
472
|
// update the userId.
|
|
458
473
|
self._isSending = false;
|
|
459
474
|
var beforeCVs = self.collectionViews;
|
|
460
|
-
self.collectionViews = new
|
|
475
|
+
self.collectionViews = new Map();
|
|
461
476
|
self.userId = userId;
|
|
462
477
|
// _setUserId is normally called from a Meteor method with
|
|
463
478
|
// DDP._CurrentMethodInvocation set. But DDP._CurrentMethodInvocation is not
|
|
@@ -507,15 +522,16 @@ class DDPSession {
|
|
|
507
522
|
}
|
|
508
523
|
// Tear down specified subscription
|
|
509
524
|
_stopSubscription(subId, error) {
|
|
510
|
-
var self = this;
|
|
511
525
|
var subName = null;
|
|
512
526
|
if (subId) {
|
|
513
|
-
var maybeSub =
|
|
527
|
+
var maybeSub = this._namedSubs.get(subId);
|
|
514
528
|
if (maybeSub) {
|
|
515
529
|
subName = maybeSub._name;
|
|
516
|
-
|
|
530
|
+
// version 1a doesn't send document deletions and relies on the clients for cleanup
|
|
531
|
+
if (this.version !== "1a")
|
|
532
|
+
maybeSub._removeAllDocuments();
|
|
517
533
|
maybeSub._deactivate();
|
|
518
|
-
|
|
534
|
+
this._namedSubs.delete(subId);
|
|
519
535
|
}
|
|
520
536
|
}
|
|
521
537
|
var response = { msg: 'nosub', id: subId, error: undefined };
|
|
@@ -523,7 +539,7 @@ class DDPSession {
|
|
|
523
539
|
response.error = (0, livedata_server_1.wrapInternalException)(error, subName ? ("from sub " + subName + " id " + subId)
|
|
524
540
|
: ("from sub id " + subId));
|
|
525
541
|
}
|
|
526
|
-
|
|
542
|
+
this.send(response);
|
|
527
543
|
}
|
|
528
544
|
// Tear down all subscriptions. Note that this does NOT send removed or nosub
|
|
529
545
|
// messages, since we assume the client is gone.
|
|
@@ -48,7 +48,7 @@ class StreamServer {
|
|
|
48
48
|
}
|
|
49
49
|
else {
|
|
50
50
|
serverOptions.faye_server_options = {
|
|
51
|
-
extensions: [permessage_deflate_1.default.configure({})]
|
|
51
|
+
extensions: [permessage_deflate_1.default.configure({ maxWindowBits: 11, memLevel: 4 })]
|
|
52
52
|
};
|
|
53
53
|
}
|
|
54
54
|
this.server = sockjs_1.default.createServer(serverOptions);
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { OrderedDict } from "../ordered-dict/ordered_dict";
|
|
1
2
|
import { AsyncFunction } from "../types";
|
|
2
3
|
import { DDPSession, SessionConnectionHandle } from "./session";
|
|
3
4
|
export type SubscriptionHandle = `N${string}` | `U${string}`;
|
|
@@ -13,7 +14,7 @@ export declare class Subscription {
|
|
|
13
14
|
private _handler;
|
|
14
15
|
private _subscriptionId;
|
|
15
16
|
private _params;
|
|
16
|
-
|
|
17
|
+
_name?: string;
|
|
17
18
|
connection: SessionConnectionHandle;
|
|
18
19
|
private _subscriptionHandle;
|
|
19
20
|
private _deactivated;
|
|
@@ -53,6 +54,7 @@ export declare class Subscription {
|
|
|
53
54
|
*/
|
|
54
55
|
onStop(func: () => void): void;
|
|
55
56
|
_isDeactivated(): boolean;
|
|
57
|
+
initialAdds(collectionName: string, documents: Map<string, any> | OrderedDict): void;
|
|
56
58
|
/**
|
|
57
59
|
* @summary Call inside the publish function. Informs the subscriber that a document has been added to the record set.
|
|
58
60
|
* @locus Server
|
|
@@ -276,6 +276,22 @@ class Subscription {
|
|
|
276
276
|
var self = this;
|
|
277
277
|
return self._deactivated || self._session.inQueue === null;
|
|
278
278
|
}
|
|
279
|
+
initialAdds(collectionName, documents) {
|
|
280
|
+
if (this._isDeactivated())
|
|
281
|
+
return;
|
|
282
|
+
if (this._session.server.getPublicationStrategy(collectionName).doAccountingForCollection) {
|
|
283
|
+
let ids = this._documents.get(collectionName);
|
|
284
|
+
if (ids == null) {
|
|
285
|
+
ids = new Set();
|
|
286
|
+
this._documents.set(collectionName, ids);
|
|
287
|
+
}
|
|
288
|
+
documents.forEach((_doc, id) => ids.add(id));
|
|
289
|
+
}
|
|
290
|
+
if (this._session.version === "1a")
|
|
291
|
+
this._session.initialAdds(this._subscriptionHandle, collectionName, documents);
|
|
292
|
+
else
|
|
293
|
+
documents.forEach((doc, id) => this._session.added(this._subscriptionHandle, collectionName, id, doc));
|
|
294
|
+
}
|
|
279
295
|
/**
|
|
280
296
|
* @summary Call inside the publish function. Informs the subscriber that a document has been added to the record set.
|
|
281
297
|
* @locus Server
|
package/dist/meteor/ddp/utils.js
CHANGED
|
@@ -34,7 +34,7 @@ function last(array, n, guard) {
|
|
|
34
34
|
return exports.slice.call(array, Math.max(array.length - n, 0));
|
|
35
35
|
}
|
|
36
36
|
exports.last = last;
|
|
37
|
-
exports.SUPPORTED_DDP_VERSIONS = ['1', 'pre2', 'pre1'];
|
|
37
|
+
exports.SUPPORTED_DDP_VERSIONS = ['1a', '1', 'pre2', 'pre1'];
|
|
38
38
|
function parseDDP(stringMessage) {
|
|
39
39
|
try {
|
|
40
40
|
var msg = JSON.parse(stringMessage);
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
import { IdMap } from "../id-map/id_map";
|
|
2
1
|
interface DiffCallbacks {
|
|
3
2
|
both: (key: string, left: any, right: any) => void;
|
|
4
3
|
leftOnly: (key: string, value: any) => void;
|
|
@@ -6,11 +5,11 @@ interface DiffCallbacks {
|
|
|
6
5
|
}
|
|
7
6
|
interface DiffSequence {
|
|
8
7
|
diffQueryChanges(ordered: boolean, oldResults: any[], newResults: any[], observer: any, options?: any): any;
|
|
9
|
-
diffQueryChanges(ordered: boolean, oldResults:
|
|
8
|
+
diffQueryChanges(ordered: boolean, oldResults: Map<string, any>, newResults: Map<string, any>, observer: any, options?: any): any;
|
|
10
9
|
diffQueryUnorderedChanges(oldResults: any, newResults: any, observer: any, options?: any): any;
|
|
11
10
|
diffQueryOrderedChanges(old_results: any, new_results: any, observer: any, options?: any): any;
|
|
12
11
|
diffObjects(left: Record<string, any>, right: Record<string, any>, callbacks: DiffCallbacks): any;
|
|
13
|
-
diffMaps(left:
|
|
12
|
+
diffMaps(left: Map<string, any>, right: Map<string, any>, callbacks: DiffCallbacks): any;
|
|
14
13
|
makeChangedFields(newDoc: Record<string, any>, oldDoc: Record<string, any>): any;
|
|
15
14
|
applyChanges(doc: Record<string, any>, changeFields: Record<string, any>): void;
|
|
16
15
|
}
|
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
import { IdMap } from "../id-map/id_map";
|
|
2
1
|
import { OrderedDict } from "../ordered-dict/ordered_dict";
|
|
3
2
|
export declare class _CachingChangeObserver {
|
|
4
|
-
docs: OrderedDict |
|
|
3
|
+
docs: OrderedDict | Map<string, any>;
|
|
5
4
|
applyChange: {
|
|
5
|
+
initialAdds?: (docs: OrderedDict | Map<string, any>) => void;
|
|
6
6
|
added?: (id: string, fields: any) => void;
|
|
7
7
|
changed?: (id: string, fields: any) => void;
|
|
8
8
|
removed?: (id: string) => void;
|
|
@@ -11,7 +11,6 @@ export declare class _CachingChangeObserver {
|
|
|
11
11
|
};
|
|
12
12
|
private ordered;
|
|
13
13
|
constructor(options?: {
|
|
14
|
-
callbacks?: any;
|
|
15
14
|
ordered?: boolean;
|
|
16
15
|
});
|
|
17
16
|
}
|
|
@@ -8,27 +8,11 @@
|
|
|
8
8
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
9
9
|
exports._CachingChangeObserver = void 0;
|
|
10
10
|
const diff_1 = require("../diff-sequence/diff");
|
|
11
|
-
const ejson_1 = require("../ejson/ejson");
|
|
12
|
-
const id_map_1 = require("../id-map/id_map");
|
|
13
11
|
const ordered_dict_1 = require("../ordered-dict/ordered_dict");
|
|
14
12
|
// available as `this` to those callbacks.
|
|
15
13
|
class _CachingChangeObserver {
|
|
16
14
|
constructor(options = {}) {
|
|
17
|
-
|
|
18
|
-
!!(options.callbacks.addedBefore || options.callbacks.movedBefore));
|
|
19
|
-
if (options.hasOwnProperty('ordered')) {
|
|
20
|
-
this.ordered = options.ordered;
|
|
21
|
-
if (options.callbacks && options.ordered !== orderedFromCallbacks) {
|
|
22
|
-
throw Error('ordered option doesn\'t match callbacks');
|
|
23
|
-
}
|
|
24
|
-
}
|
|
25
|
-
else if (options.callbacks) {
|
|
26
|
-
this.ordered = orderedFromCallbacks;
|
|
27
|
-
}
|
|
28
|
-
else {
|
|
29
|
-
throw Error('must provide ordered or callbacks');
|
|
30
|
-
}
|
|
31
|
-
const callbacks = options.callbacks || {};
|
|
15
|
+
this.ordered = options.ordered || false;
|
|
32
16
|
if (this.ordered) {
|
|
33
17
|
this.docs = new ordered_dict_1.OrderedDict();
|
|
34
18
|
this.applyChange = {
|
|
@@ -36,13 +20,6 @@ class _CachingChangeObserver {
|
|
|
36
20
|
// Take a shallow copy since the top-level properties can be changed
|
|
37
21
|
const doc = Object.assign({}, fields);
|
|
38
22
|
doc._id = id;
|
|
39
|
-
if (callbacks.addedBefore) {
|
|
40
|
-
callbacks.addedBefore.call(this, id, (0, ejson_1.clone)(fields), before);
|
|
41
|
-
}
|
|
42
|
-
// This line triggers if we provide added with movedBefore.
|
|
43
|
-
if (callbacks.added) {
|
|
44
|
-
callbacks.added.call(this, id, (0, ejson_1.clone)(fields));
|
|
45
|
-
}
|
|
46
23
|
// XXX could `before` be a falsy ID? Technically
|
|
47
24
|
// idStringify seems to allow for them -- though
|
|
48
25
|
// OrderedDict won't call stringify on a falsy arg.
|
|
@@ -50,27 +27,24 @@ class _CachingChangeObserver {
|
|
|
50
27
|
},
|
|
51
28
|
movedBefore: (id, before) => {
|
|
52
29
|
const doc = this.docs.get(id);
|
|
53
|
-
if (callbacks.movedBefore) {
|
|
54
|
-
callbacks.movedBefore.call(this, id, before);
|
|
55
|
-
}
|
|
56
30
|
this.docs.moveBefore(id, before || null);
|
|
57
31
|
},
|
|
58
32
|
};
|
|
59
33
|
}
|
|
60
34
|
else {
|
|
61
|
-
this.docs = new
|
|
35
|
+
this.docs = new Map();
|
|
62
36
|
this.applyChange = {
|
|
63
37
|
added: (id, fields) => {
|
|
64
38
|
// Take a shallow copy since the top-level properties can be changed
|
|
65
39
|
const doc = Object.assign({}, fields);
|
|
66
|
-
if (callbacks.added) {
|
|
67
|
-
callbacks.added.call(this, id, (0, ejson_1.clone)(fields));
|
|
68
|
-
}
|
|
69
40
|
doc._id = id;
|
|
70
41
|
this.docs.set(id, doc);
|
|
71
42
|
},
|
|
72
43
|
};
|
|
73
44
|
}
|
|
45
|
+
this.applyChange.initialAdds = (docs) => {
|
|
46
|
+
this.docs = docs;
|
|
47
|
+
};
|
|
74
48
|
// The methods in _IdMap and OrderedDict used by these callbacks are
|
|
75
49
|
// identical.
|
|
76
50
|
this.applyChange.changed = (id, fields) => {
|
|
@@ -78,16 +52,10 @@ class _CachingChangeObserver {
|
|
|
78
52
|
if (!doc) {
|
|
79
53
|
throw new Error(`Unknown id for changed: ${id}`);
|
|
80
54
|
}
|
|
81
|
-
if (callbacks.changed) {
|
|
82
|
-
callbacks.changed.call(this, id, (0, ejson_1.clone)(fields));
|
|
83
|
-
}
|
|
84
55
|
diff_1.DiffSequence.applyChanges(doc, fields);
|
|
85
56
|
};
|
|
86
57
|
this.applyChange.removed = id => {
|
|
87
|
-
|
|
88
|
-
callbacks.removed.call(this, id);
|
|
89
|
-
}
|
|
90
|
-
this.docs.remove(id);
|
|
58
|
+
this.docs.delete(id);
|
|
91
59
|
};
|
|
92
60
|
}
|
|
93
61
|
}
|
|
@@ -19,6 +19,9 @@ class LiveCursor {
|
|
|
19
19
|
}
|
|
20
20
|
async _publishCursor(sub) {
|
|
21
21
|
const observeHandle = await this.mongo._observeChanges(this.cursorDescription, false, {
|
|
22
|
+
initialAdds: (docs) => {
|
|
23
|
+
sub.initialAdds(this.cursorDescription.collectionName, docs);
|
|
24
|
+
},
|
|
22
25
|
added: (id, fields) => {
|
|
23
26
|
sub.added(this.cursorDescription.collectionName, id, fields);
|
|
24
27
|
},
|
|
@@ -1,9 +1,11 @@
|
|
|
1
|
+
import { OrderedDict } from "../ordered-dict/ordered_dict";
|
|
1
2
|
export interface ObserveCallbacks {
|
|
2
3
|
added: (id: string, fields: Record<string, any>) => void;
|
|
3
4
|
changed: (id: string, fields: Record<string, any>) => void;
|
|
4
5
|
removed: (id: string) => void;
|
|
5
6
|
addedBefore?: (id: string, fields: Record<string, any>, before?: any) => void;
|
|
6
7
|
movedBefore?: (id: string, fields: Record<string, any>, before?: any) => void;
|
|
8
|
+
initialAdds: (docs: Map<string, any> | OrderedDict) => void;
|
|
7
9
|
_testOnlyPollCallback?: any;
|
|
8
10
|
}
|
|
9
11
|
export declare class ObserveMultiplexer {
|
|
@@ -27,12 +29,12 @@ export declare class ObserveMultiplexer {
|
|
|
27
29
|
callbackNames(): string[];
|
|
28
30
|
_ready(): boolean;
|
|
29
31
|
_applyCallback(callbackName: string, args: any): Promise<void>;
|
|
30
|
-
_sendAdds(handle: ObserveHandle): void;
|
|
31
32
|
}
|
|
32
33
|
export declare class ObserveHandle {
|
|
33
34
|
private _multiplexer;
|
|
34
35
|
nonMutatingCallbacks: boolean;
|
|
35
36
|
_id: number;
|
|
37
|
+
_initialAdds: ObserveCallbacks["initialAdds"];
|
|
36
38
|
_addedBefore: ObserveCallbacks["addedBefore"];
|
|
37
39
|
_movedBefore: ObserveCallbacks["movedBefore"];
|
|
38
40
|
_added: ObserveCallbacks["added"];
|
|
@@ -1,15 +1,4 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __rest = (this && this.__rest) || function (s, e) {
|
|
3
|
-
var t = {};
|
|
4
|
-
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)
|
|
5
|
-
t[p] = s[p];
|
|
6
|
-
if (s != null && typeof Object.getOwnPropertySymbols === "function")
|
|
7
|
-
for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {
|
|
8
|
-
if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))
|
|
9
|
-
t[p[i]] = s[p[i]];
|
|
10
|
-
}
|
|
11
|
-
return t;
|
|
12
|
-
};
|
|
13
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
14
3
|
exports.ObserveHandle = exports.ObserveMultiplexer = void 0;
|
|
15
4
|
const ejson_1 = require("../ejson/ejson");
|
|
@@ -51,9 +40,8 @@ class ObserveMultiplexer {
|
|
|
51
40
|
++self._addHandleTasksScheduledButNotPerformed;
|
|
52
41
|
await self._queue.runTask(async () => {
|
|
53
42
|
self._handles[handle._id] = handle;
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
self._sendAdds(handle);
|
|
43
|
+
if (this._ready() && this._cache.docs.size > 0)
|
|
44
|
+
handle._initialAdds(this._cache.docs);
|
|
57
45
|
--self._addHandleTasksScheduledButNotPerformed;
|
|
58
46
|
});
|
|
59
47
|
// *outside* the task, since otherwise we'd deadlock
|
|
@@ -97,6 +85,13 @@ class ObserveMultiplexer {
|
|
|
97
85
|
this._queue.queueTask(async () => {
|
|
98
86
|
if (this._ready())
|
|
99
87
|
throw Error("can't make ObserveMultiplex ready twice!");
|
|
88
|
+
if (this._cache.docs.size > 0) {
|
|
89
|
+
for (const handleId of Object.keys(this._handles)) {
|
|
90
|
+
var handle = this._handles && this._handles[handleId];
|
|
91
|
+
if (handle)
|
|
92
|
+
handle._initialAdds(this._cache.docs);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
100
95
|
this._readyFuture.resolve();
|
|
101
96
|
this._readyFuture.isResolved = true;
|
|
102
97
|
});
|
|
@@ -130,9 +125,9 @@ class ObserveMultiplexer {
|
|
|
130
125
|
callbackNames() {
|
|
131
126
|
var self = this;
|
|
132
127
|
if (self._ordered)
|
|
133
|
-
return ["addedBefore", "changed", "movedBefore", "removed"];
|
|
128
|
+
return ["initialAdds", "addedBefore", "changed", "movedBefore", "removed"];
|
|
134
129
|
else
|
|
135
|
-
return ["added", "changed", "removed"];
|
|
130
|
+
return ["initialAdds", "added", "changed", "removed"];
|
|
136
131
|
}
|
|
137
132
|
_ready() {
|
|
138
133
|
return this._readyFuture.isResolved;
|
|
@@ -151,6 +146,9 @@ class ObserveMultiplexer {
|
|
|
151
146
|
(callbackName !== 'added' && callbackName !== 'addedBefore')) {
|
|
152
147
|
throw new Error("Got " + callbackName + " during initial adds");
|
|
153
148
|
}
|
|
149
|
+
// don't actually send anything to the handles until initial adds are cached
|
|
150
|
+
if (!self._ready())
|
|
151
|
+
return;
|
|
154
152
|
// Now multiplex the callbacks out to all observe handles. It's OK if
|
|
155
153
|
// these calls yield; since we're inside a task, no other use of our queue
|
|
156
154
|
// can continue until these are done. (But we do have to be careful to not
|
|
@@ -166,28 +164,6 @@ class ObserveMultiplexer {
|
|
|
166
164
|
}
|
|
167
165
|
});
|
|
168
166
|
}
|
|
169
|
-
// Sends initial adds to a handle. It should only be called from within a task
|
|
170
|
-
// (the task that is processing the addHandleAndSendInitialAdds call). It
|
|
171
|
-
// synchronously invokes the handle's added or addedBefore; there's no need to
|
|
172
|
-
// flush the queue afterwards to ensure that the callbacks get out.
|
|
173
|
-
_sendAdds(handle) {
|
|
174
|
-
var self = this;
|
|
175
|
-
//if (self._queue.safeToRunTask())
|
|
176
|
-
// throw Error("_sendAdds may only be called from within a task!");
|
|
177
|
-
var add = self._ordered ? handle._addedBefore : handle._added;
|
|
178
|
-
if (!add)
|
|
179
|
-
return;
|
|
180
|
-
// note: docs may be an _IdMap or an OrderedDict
|
|
181
|
-
self._cache.docs.forEach((doc, id) => {
|
|
182
|
-
if (!self._handles.hasOwnProperty(handle._id))
|
|
183
|
-
throw Error("handle got removed before sending initial adds!");
|
|
184
|
-
const _a = handle.nonMutatingCallbacks ? doc : (0, ejson_1.clone)(doc), { _id } = _a, fields = __rest(_a, ["_id"]);
|
|
185
|
-
if (self._ordered)
|
|
186
|
-
add(id, fields, null); // we're going in order, so add at end
|
|
187
|
-
else
|
|
188
|
-
add(id, fields);
|
|
189
|
-
});
|
|
190
|
-
}
|
|
191
167
|
}
|
|
192
168
|
exports.ObserveMultiplexer = ObserveMultiplexer;
|
|
193
169
|
let nextObserveHandleId = 1;
|
|
@@ -3,7 +3,6 @@ import { MinimongoMatcher } from "./minimongo_matcher";
|
|
|
3
3
|
import MinimongoSorter from "./minimongo_sorter";
|
|
4
4
|
import { ObserveMultiplexer } from "./observe_multiplexer";
|
|
5
5
|
import { CursorDescription } from "./live_cursor";
|
|
6
|
-
import { IdMap } from "../id-map/id_map";
|
|
7
6
|
import { SynchronousCursor } from "./synchronous-cursor";
|
|
8
7
|
interface OplogObserveDriverOptions {
|
|
9
8
|
cursorDescription: CursorDescription<any>;
|
|
@@ -53,13 +52,15 @@ export declare class OplogObserveDriver {
|
|
|
53
52
|
_handleOplogEntrySteadyOrFetching(op: any): void;
|
|
54
53
|
_runInitialQuery(): Promise<void>;
|
|
55
54
|
_pollQuery(): void;
|
|
56
|
-
_runQuery(options?:
|
|
55
|
+
_runQuery(options?: {
|
|
56
|
+
initial?: boolean;
|
|
57
|
+
}): Promise<void>;
|
|
57
58
|
_needToPollQuery(): void;
|
|
58
59
|
_doneQuerying(): Promise<void>;
|
|
59
60
|
_cursorForQuery(optionsOverwrite: {
|
|
60
61
|
limit: number;
|
|
61
62
|
}): SynchronousCursor;
|
|
62
|
-
_publishNewResults(newResults:
|
|
63
|
+
_publishNewResults(newResults: Map<string, any>, newBuffer: Map<string, any>): void;
|
|
63
64
|
stop(): void;
|
|
64
65
|
_registerPhaseChange(phase: any): void;
|
|
65
66
|
static cursorSupported(cursorDescription: CursorDescription<any>, matcher: MinimongoMatcher): boolean;
|
|
@@ -11,7 +11,6 @@ const max_heap_1 = require("../binary-heap/max_heap");
|
|
|
11
11
|
const minimongo_common_1 = require("./minimongo_common");
|
|
12
12
|
const ejson_1 = require("../ejson/ejson");
|
|
13
13
|
const oplog_tailing_1 = require("./oplog_tailing");
|
|
14
|
-
const id_map_1 = require("../id-map/id_map");
|
|
15
14
|
const synchronous_cursor_1 = require("./synchronous-cursor");
|
|
16
15
|
var PHASE;
|
|
17
16
|
(function (PHASE) {
|
|
@@ -77,7 +76,7 @@ class OplogObserveDriver {
|
|
|
77
76
|
self._comparator = null;
|
|
78
77
|
self._sorter = null;
|
|
79
78
|
self._unpublishedBuffer = null;
|
|
80
|
-
self._published = new
|
|
79
|
+
self._published = new Map();
|
|
81
80
|
}
|
|
82
81
|
// Indicates if it is safe to insert a new document at the end of the buffer
|
|
83
82
|
// for this query. i.e. it is known that there are no documents matching the
|
|
@@ -187,11 +186,11 @@ class OplogObserveDriver {
|
|
|
187
186
|
// (exceeding capacity specified by limit). If so, push the maximum
|
|
188
187
|
// element to the buffer, we might want to save it in memory to reduce the
|
|
189
188
|
// amount of Mongo lookups in the future.
|
|
190
|
-
if (self._limit && self._published.size
|
|
189
|
+
if (self._limit && self._published.size > self._limit) {
|
|
191
190
|
// XXX in theory the size of published is no more than limit+1
|
|
192
|
-
if (self._published.size
|
|
191
|
+
if (self._published.size !== self._limit + 1) {
|
|
193
192
|
throw new Error("After adding to published, " +
|
|
194
|
-
(self._published.size
|
|
193
|
+
(self._published.size - self._limit) +
|
|
195
194
|
" documents are overflowing the set");
|
|
196
195
|
}
|
|
197
196
|
var overflowingDocId = self._published.maxElementId();
|
|
@@ -199,7 +198,7 @@ class OplogObserveDriver {
|
|
|
199
198
|
if ((0, ejson_1.equals)(overflowingDocId, id)) {
|
|
200
199
|
throw new Error("The document just added is overflowing the published set");
|
|
201
200
|
}
|
|
202
|
-
self._published.
|
|
201
|
+
self._published.delete(overflowingDocId);
|
|
203
202
|
self._multiplexer.removed(overflowingDocId);
|
|
204
203
|
self._addBuffered(overflowingDocId, overflowingDoc);
|
|
205
204
|
}
|
|
@@ -208,11 +207,11 @@ class OplogObserveDriver {
|
|
|
208
207
|
_removePublished(id) {
|
|
209
208
|
var self = this;
|
|
210
209
|
//Meteor._noYieldsAllowed(function () {
|
|
211
|
-
self._published.
|
|
210
|
+
self._published.delete(id);
|
|
212
211
|
self._multiplexer.removed(id);
|
|
213
|
-
if (!self._limit || self._published.size
|
|
212
|
+
if (!self._limit || self._published.size === self._limit)
|
|
214
213
|
return;
|
|
215
|
-
if (self._published.size
|
|
214
|
+
if (self._published.size > self._limit)
|
|
216
215
|
throw Error("self._published got too big");
|
|
217
216
|
// OK, we are publishing less than the limit. Maybe we should look in the
|
|
218
217
|
// buffer to find the next element past what we were publishing before.
|
|
@@ -264,9 +263,9 @@ class OplogObserveDriver {
|
|
|
264
263
|
//Meteor._noYieldsAllowed(function () {
|
|
265
264
|
self._unpublishedBuffer.set(id, self._sharedProjectionFn(doc));
|
|
266
265
|
// If something is overflowing the buffer, we just remove it from cache
|
|
267
|
-
if (self._unpublishedBuffer.size
|
|
266
|
+
if (self._unpublishedBuffer.size > self._limit) {
|
|
268
267
|
var maxBufferedId = self._unpublishedBuffer.maxElementId();
|
|
269
|
-
self._unpublishedBuffer.
|
|
268
|
+
self._unpublishedBuffer.delete(maxBufferedId);
|
|
270
269
|
// Since something matching is removed from cache (both published set and
|
|
271
270
|
// buffer), set flag to false
|
|
272
271
|
self._safeAppendToBuffer = false;
|
|
@@ -278,11 +277,11 @@ class OplogObserveDriver {
|
|
|
278
277
|
_removeBuffered(id) {
|
|
279
278
|
var self = this;
|
|
280
279
|
//Meteor._noYieldsAllowed(function () {
|
|
281
|
-
self._unpublishedBuffer.
|
|
280
|
+
self._unpublishedBuffer.delete(id);
|
|
282
281
|
// To keep the contract "buffer is never empty in STEADY phase unless the
|
|
283
282
|
// everything matching fits into published" true, we poll everything as
|
|
284
283
|
// soon as we see the buffer becoming empty.
|
|
285
|
-
if (!self._unpublishedBuffer.size
|
|
284
|
+
if (!self._unpublishedBuffer.size && !self._safeAppendToBuffer)
|
|
286
285
|
self._needToPollQuery();
|
|
287
286
|
//});
|
|
288
287
|
}
|
|
@@ -299,22 +298,22 @@ class OplogObserveDriver {
|
|
|
299
298
|
throw Error("tried to add something already existed in buffer " + id);
|
|
300
299
|
var limit = self._limit;
|
|
301
300
|
var comparator = self._comparator;
|
|
302
|
-
var maxPublished = (limit && self._published.size
|
|
301
|
+
var maxPublished = (limit && self._published.size > 0)
|
|
303
302
|
? self._published.get(self._published.maxElementId()) // published is MaxHeap because limit is defined
|
|
304
303
|
: null;
|
|
305
|
-
var maxBuffered = (limit && self._unpublishedBuffer.size
|
|
304
|
+
var maxBuffered = (limit && self._unpublishedBuffer.size > 0)
|
|
306
305
|
? self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId())
|
|
307
306
|
: null;
|
|
308
307
|
// The query is unlimited or didn't publish enough documents yet or the
|
|
309
308
|
// new document would fit into published set pushing the maximum element
|
|
310
309
|
// out, then we need to publish the doc.
|
|
311
|
-
var toPublish = !limit || self._published.size
|
|
310
|
+
var toPublish = !limit || self._published.size < limit ||
|
|
312
311
|
comparator(doc, maxPublished) < 0;
|
|
313
312
|
// Otherwise we might need to buffer it (only in case of limited query).
|
|
314
313
|
// Buffering is allowed if the buffer is not filled up yet and all
|
|
315
314
|
// matching docs are either in the published set or in the buffer.
|
|
316
315
|
var canAppendToBuffer = !toPublish && self._safeAppendToBuffer &&
|
|
317
|
-
self._unpublishedBuffer.size
|
|
316
|
+
self._unpublishedBuffer.size < limit;
|
|
318
317
|
// Or if it is small enough to be safely inserted to the middle or the
|
|
319
318
|
// beginning of the buffer.
|
|
320
319
|
var canInsertIntoBuffer = !toPublish && maxBuffered &&
|
|
@@ -364,7 +363,7 @@ class OplogObserveDriver {
|
|
|
364
363
|
else if (cachedBefore && matchesNow) {
|
|
365
364
|
var oldDoc = self._published.get(id);
|
|
366
365
|
var comparator = self._comparator;
|
|
367
|
-
var minBuffered = self._limit && self._unpublishedBuffer.size
|
|
366
|
+
var minBuffered = self._limit && self._unpublishedBuffer.size &&
|
|
368
367
|
self._unpublishedBuffer.get(self._unpublishedBuffer.minElementId());
|
|
369
368
|
var maxBuffered;
|
|
370
369
|
if (publishedBefore) {
|
|
@@ -378,7 +377,7 @@ class OplogObserveDriver {
|
|
|
378
377
|
// published. Notably, we don't want to schedule repoll and continue
|
|
379
378
|
// relying on this property.
|
|
380
379
|
var staysInPublished = !self._limit ||
|
|
381
|
-
self._unpublishedBuffer.size
|
|
380
|
+
self._unpublishedBuffer.size === 0 ||
|
|
382
381
|
comparator(newDoc, minBuffered) <= 0;
|
|
383
382
|
if (staysInPublished) {
|
|
384
383
|
self._changePublished(id, oldDoc, newDoc);
|
|
@@ -405,10 +404,10 @@ class OplogObserveDriver {
|
|
|
405
404
|
// we don't trigger the querying immediately. if we end this block
|
|
406
405
|
// with the buffer empty, we will need to trigger the query poll
|
|
407
406
|
// manually too.
|
|
408
|
-
self._unpublishedBuffer.
|
|
407
|
+
self._unpublishedBuffer.delete(id);
|
|
409
408
|
// published is MaxHeap because bufferedBefore is only set when limit is defined
|
|
410
409
|
var maxPublished = self._published.get(self._published.maxElementId());
|
|
411
|
-
maxBuffered = self._unpublishedBuffer.size
|
|
410
|
+
maxBuffered = self._unpublishedBuffer.size && self._unpublishedBuffer.get(self._unpublishedBuffer.maxElementId());
|
|
412
411
|
// the buffered doc was updated, it could move to published
|
|
413
412
|
var toPublish = comparator(newDoc, maxPublished) < 0;
|
|
414
413
|
// or stays in buffer even after the change
|
|
@@ -427,7 +426,7 @@ class OplogObserveDriver {
|
|
|
427
426
|
self._safeAppendToBuffer = false;
|
|
428
427
|
// Normally this check would have been done in _removeBuffered but
|
|
429
428
|
// we didn't use it, so we need to do it ourself now.
|
|
430
|
-
if (!self._unpublishedBuffer.size
|
|
429
|
+
if (!self._unpublishedBuffer.size) {
|
|
431
430
|
self._needToPollQuery();
|
|
432
431
|
}
|
|
433
432
|
}
|
|
@@ -667,8 +666,8 @@ class OplogObserveDriver {
|
|
|
667
666
|
// If we've been stopped, we don't have to run anything any more.
|
|
668
667
|
if (self._stopped)
|
|
669
668
|
return;
|
|
670
|
-
newResults = new
|
|
671
|
-
newBuffer = new
|
|
669
|
+
newResults = new Map();
|
|
670
|
+
newBuffer = new Map();
|
|
672
671
|
// Query 2x documents as the half excluded from the original query will go
|
|
673
672
|
// into unpublished buffer to reduce additional Mongo lookups in cases
|
|
674
673
|
// when documents are removed from the published set and need a
|
|
@@ -809,7 +808,7 @@ class OplogObserveDriver {
|
|
|
809
808
|
// Sanity-check that everything we tried to put into _published ended up
|
|
810
809
|
// there.
|
|
811
810
|
// XXX if this is slow, remove it later
|
|
812
|
-
if (self._published.size
|
|
811
|
+
if (self._published.size !== newResults.size) {
|
|
813
812
|
console.error('The Mongo server and the Meteor query disagree on how ' +
|
|
814
813
|
'many documents match your query. Cursor description: ', self._cursorDescription);
|
|
815
814
|
throw Error("The Mongo server and the Meteor query disagree on how " +
|
|
@@ -825,7 +824,7 @@ class OplogObserveDriver {
|
|
|
825
824
|
newBuffer.forEach(function (doc, id) {
|
|
826
825
|
self._addBuffered(id, doc);
|
|
827
826
|
});
|
|
828
|
-
self._safeAppendToBuffer = newBuffer.size
|
|
827
|
+
self._safeAppendToBuffer = newBuffer.size < self._limit;
|
|
829
828
|
//});
|
|
830
829
|
}
|
|
831
830
|
// This stop function is invoked from the onStop of the ObserveMultiplexer, so
|
|
@@ -1,11 +1,17 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
+
var __asyncValues = (this && this.__asyncValues) || function (o) {
|
|
3
|
+
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
|
|
4
|
+
var m = o[Symbol.asyncIterator], i;
|
|
5
|
+
return m ? m.call(o) : (o = typeof __values === "function" ? __values(o) : o[Symbol.iterator](), i = {}, verb("next"), verb("throw"), verb("return"), i[Symbol.asyncIterator] = function () { return this; }, i);
|
|
6
|
+
function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }
|
|
7
|
+
function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }
|
|
8
|
+
};
|
|
2
9
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
10
|
exports.PollingObserveDriver = void 0;
|
|
4
11
|
const writefence_1 = require("../ddp/writefence");
|
|
5
12
|
const diff_1 = require("../diff-sequence/diff");
|
|
6
13
|
const synchronous_queue_1 = require("./synchronous-queue");
|
|
7
14
|
const observe_driver_utils_1 = require("./observe_driver_utils");
|
|
8
|
-
const id_map_1 = require("../id-map/id_map");
|
|
9
15
|
var POLLING_THROTTLE_MS = +process.env.METEOR_POLLING_THROTTLE_MS || 50;
|
|
10
16
|
var POLLING_INTERVAL_MS = +process.env.METEOR_POLLING_INTERVAL_MS || 10 * 1000;
|
|
11
17
|
class PollingObserveDriver {
|
|
@@ -69,6 +75,7 @@ class PollingObserveDriver {
|
|
|
69
75
|
self._taskQueue.queueTask(async () => await self._pollMongo());
|
|
70
76
|
}
|
|
71
77
|
async _pollMongo() {
|
|
78
|
+
var _a, e_1, _b, _c;
|
|
72
79
|
var self = this;
|
|
73
80
|
--self._pollsScheduledButNotStarted;
|
|
74
81
|
if (self._stopped)
|
|
@@ -79,7 +86,7 @@ class PollingObserveDriver {
|
|
|
79
86
|
if (!oldResults) {
|
|
80
87
|
first = true;
|
|
81
88
|
// XXX maybe use OrderedDict instead?
|
|
82
|
-
oldResults = self._ordered ? [] : new
|
|
89
|
+
oldResults = self._ordered ? [] : new Map();
|
|
83
90
|
}
|
|
84
91
|
// Save the list of pending writes which this round will commit.
|
|
85
92
|
var writesForCycle = self._pendingWrites;
|
|
@@ -88,8 +95,27 @@ class PollingObserveDriver {
|
|
|
88
95
|
try {
|
|
89
96
|
const cursor = self._mongoHandle.db.collection(self._cursorDescription.collectionName).find(self._cursorDescription.selector);
|
|
90
97
|
if (!self._ordered) {
|
|
91
|
-
newResults = new
|
|
92
|
-
|
|
98
|
+
newResults = new Map();
|
|
99
|
+
try {
|
|
100
|
+
for (var _d = true, cursor_1 = __asyncValues(cursor), cursor_1_1; cursor_1_1 = await cursor_1.next(), _a = cursor_1_1.done, !_a;) {
|
|
101
|
+
_c = cursor_1_1.value;
|
|
102
|
+
_d = false;
|
|
103
|
+
try {
|
|
104
|
+
const doc = _c;
|
|
105
|
+
newResults.set(doc._id, doc);
|
|
106
|
+
}
|
|
107
|
+
finally {
|
|
108
|
+
_d = true;
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
catch (e_1_1) { e_1 = { error: e_1_1 }; }
|
|
113
|
+
finally {
|
|
114
|
+
try {
|
|
115
|
+
if (!_d && !_a && (_b = cursor_1.return)) await _b.call(cursor_1);
|
|
116
|
+
}
|
|
117
|
+
finally { if (e_1) throw e_1.error; }
|
|
118
|
+
}
|
|
93
119
|
}
|
|
94
120
|
else
|
|
95
121
|
newResults = await cursor.toArray();
|
|
@@ -10,12 +10,12 @@ export declare class OrderedDict {
|
|
|
10
10
|
constructor(...args: any[]);
|
|
11
11
|
_k(key: any): string;
|
|
12
12
|
empty(): boolean;
|
|
13
|
-
size(): number;
|
|
13
|
+
get size(): number;
|
|
14
14
|
_linkEltIn(elt: any): void;
|
|
15
15
|
_linkEltOut(elt: any): void;
|
|
16
16
|
putBefore(key: any, item: any, before: any): void;
|
|
17
17
|
append(key: any, item: any): void;
|
|
18
|
-
|
|
18
|
+
delete(key: any): any;
|
|
19
19
|
get(key: any): any;
|
|
20
20
|
has(key: any): any;
|
|
21
21
|
forEach(iter: (doc: any, key: string, index: number) => any, context?: any): void;
|
|
@@ -38,7 +38,7 @@ class OrderedDict {
|
|
|
38
38
|
empty() {
|
|
39
39
|
return !this._first;
|
|
40
40
|
}
|
|
41
|
-
size() {
|
|
41
|
+
get size() {
|
|
42
42
|
return this._size;
|
|
43
43
|
}
|
|
44
44
|
_linkEltIn(elt) {
|
|
@@ -82,7 +82,7 @@ class OrderedDict {
|
|
|
82
82
|
append(key, item) {
|
|
83
83
|
this.putBefore(key, item, null);
|
|
84
84
|
}
|
|
85
|
-
|
|
85
|
+
delete(key) {
|
|
86
86
|
var elt = this._dict[this._k(key)];
|
|
87
87
|
if (typeof elt === "undefined")
|
|
88
88
|
throw new Error("Item " + key + " not present in OrderedDict");
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "mongodb-livedata-server",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.1.1",
|
|
4
4
|
"description": "MongoDB live data server, extracted from Meteor, Fibers removed and converted to TypeScript",
|
|
5
5
|
"main": "dist/livedata_server.js",
|
|
6
6
|
"types": "dist/livedata_server.d.ts",
|