@scrypted/server 0.123.0 → 0.123.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/ip.d.ts +2 -0
- package/dist/ip.js +5 -4
- package/dist/ip.js.map +1 -1
- package/dist/listen-zero.d.ts +5 -2
- package/dist/listen-zero.js +2 -1
- package/dist/listen-zero.js.map +1 -1
- package/dist/plugin/plugin-api.d.ts +9 -3
- package/dist/plugin/plugin-api.js +10 -1
- package/dist/plugin/plugin-api.js.map +1 -1
- package/dist/plugin/plugin-console.d.ts +2 -1
- package/dist/plugin/plugin-console.js +27 -6
- package/dist/plugin/plugin-console.js.map +1 -1
- package/dist/plugin/plugin-host-api.js +1 -1
- package/dist/plugin/plugin-host-api.js.map +1 -1
- package/dist/plugin/plugin-host.d.ts +4 -7
- package/dist/plugin/plugin-host.js +76 -62
- package/dist/plugin/plugin-host.js.map +1 -1
- package/dist/plugin/plugin-lazy-remote.d.ts +3 -3
- package/dist/plugin/plugin-lazy-remote.js +2 -2
- package/dist/plugin/plugin-lazy-remote.js.map +1 -1
- package/dist/plugin/plugin-remote-stats.d.ts +2 -2
- package/dist/plugin/plugin-remote-stats.js +4 -2
- package/dist/plugin/plugin-remote-stats.js.map +1 -1
- package/dist/plugin/plugin-remote-worker.d.ts +0 -1
- package/dist/plugin/plugin-remote-worker.js +77 -335
- package/dist/plugin/plugin-remote-worker.js.map +1 -1
- package/dist/plugin/plugin-remote.d.ts +3 -3
- package/dist/plugin/plugin-remote.js +2 -2
- package/dist/plugin/plugin-remote.js.map +1 -1
- package/dist/plugin/plugin-repl.js +2 -1
- package/dist/plugin/plugin-repl.js.map +1 -1
- package/dist/plugin/runtime/cluster-fork.worker.d.ts +9 -0
- package/dist/plugin/runtime/cluster-fork.worker.js +73 -0
- package/dist/plugin/runtime/cluster-fork.worker.js.map +1 -0
- package/dist/plugin/runtime/custom-worker.js +2 -2
- package/dist/plugin/runtime/custom-worker.js.map +1 -1
- package/dist/plugin/runtime/node-fork-worker.js +5 -3
- package/dist/plugin/runtime/node-fork-worker.js.map +1 -1
- package/dist/plugin/runtime/python-worker.js +2 -2
- package/dist/plugin/runtime/python-worker.js.map +1 -1
- package/dist/rpc.d.ts +1 -0
- package/dist/rpc.js +3 -2
- package/dist/rpc.js.map +1 -1
- package/dist/runtime.d.ts +4 -0
- package/dist/runtime.js +16 -2
- package/dist/runtime.js.map +1 -1
- package/dist/scrypted-cluster-common.d.ts +22 -0
- package/dist/scrypted-cluster-common.js +332 -0
- package/dist/scrypted-cluster-common.js.map +1 -0
- package/dist/scrypted-cluster-main.d.ts +2 -0
- package/dist/scrypted-cluster-main.js +12 -0
- package/dist/scrypted-cluster-main.js.map +1 -0
- package/dist/scrypted-cluster.d.ts +38 -0
- package/dist/scrypted-cluster.js +277 -0
- package/dist/scrypted-cluster.js.map +1 -0
- package/dist/scrypted-main-exports.js +20 -14
- package/dist/scrypted-main-exports.js.map +1 -1
- package/dist/scrypted-server-main.js +8 -15
- package/dist/scrypted-server-main.js.map +1 -1
- package/dist/server-settings.d.ts +1 -0
- package/dist/server-settings.js +2 -1
- package/dist/server-settings.js.map +1 -1
- package/dist/services/backup.js.map +1 -1
- package/dist/services/cluster-fork.d.ts +7 -0
- package/dist/services/cluster-fork.js +25 -0
- package/dist/services/cluster-fork.js.map +1 -0
- package/dist/services/plugin.d.ts +2 -7
- package/dist/services/plugin.js +2 -17
- package/dist/services/plugin.js.map +1 -1
- package/package.json +7 -7
- package/python/plugin_remote.py +150 -135
- package/python/rpc_reader.py +3 -19
- package/src/ip.ts +5 -4
- package/src/listen-zero.ts +3 -2
- package/src/plugin/plugin-api.ts +11 -3
- package/src/plugin/plugin-console.ts +29 -7
- package/src/plugin/plugin-host-api.ts +1 -1
- package/src/plugin/plugin-host.ts +92 -77
- package/src/plugin/plugin-lazy-remote.ts +4 -4
- package/src/plugin/plugin-remote-stats.ts +6 -4
- package/src/plugin/plugin-remote-worker.ts +91 -376
- package/src/plugin/plugin-remote.ts +5 -5
- package/src/plugin/plugin-repl.ts +2 -1
- package/src/plugin/runtime/cluster-fork.worker.ts +92 -0
- package/src/plugin/runtime/custom-worker.ts +2 -2
- package/src/plugin/runtime/node-fork-worker.ts +6 -3
- package/src/plugin/runtime/python-worker.ts +2 -2
- package/src/rpc.ts +3 -2
- package/src/runtime.ts +17 -2
- package/src/scrypted-cluster-common.ts +374 -0
- package/src/scrypted-cluster-main.ts +12 -0
- package/src/scrypted-cluster.ts +326 -0
- package/src/scrypted-main-exports.ts +22 -16
- package/src/scrypted-server-main.ts +15 -23
- package/src/server-settings.ts +1 -0
- package/src/services/backup.ts +0 -1
- package/src/services/cluster-fork.ts +22 -0
- package/src/services/plugin.ts +3 -21
@@ -6,31 +6,30 @@ import net from 'net';
|
|
6
6
|
import path from 'path';
|
7
7
|
import { install as installSourceMapSupport } from 'source-map-support';
|
8
8
|
import worker_threads from 'worker_threads';
|
9
|
-
import { computeClusterObjectHash } from '../cluster/cluster-hash';
|
10
9
|
import { ClusterObject, ConnectRPCObject } from '../cluster/connect-rpc-object';
|
11
|
-
import {
|
10
|
+
import { Deferred } from '../deferred';
|
12
11
|
import { RpcMessage, RpcPeer } from '../rpc';
|
13
12
|
import { evalLocal } from '../rpc-peer-eval';
|
14
13
|
import { createDuplexRpcPeer } from '../rpc-serializer';
|
14
|
+
import { getClusterPeerKey, isClusterAddress, peerConnectRPCObject, prepareClusterPeer } from '../scrypted-cluster-common';
|
15
15
|
import { MediaManagerImpl } from './media';
|
16
|
-
import { PluginAPI, PluginAPIProxy, PluginRemote, PluginRemoteLoadZipOptions } from './plugin-api';
|
16
|
+
import { PluginAPI, PluginAPIProxy, PluginRemote, PluginRemoteLoadZipOptions, PluginZipAPI } from './plugin-api';
|
17
17
|
import { pipeWorkerConsole, prepareConsoles } from './plugin-console';
|
18
18
|
import { getPluginNodePath, installOptionalDependencies } from './plugin-npm-dependencies';
|
19
|
-
import {
|
19
|
+
import { attachPluginRemote, DeviceManagerImpl, setupPluginRemote } from './plugin-remote';
|
20
20
|
import { PluginStats, startStatsUpdater } from './plugin-remote-stats';
|
21
21
|
import { createREPLServer } from './plugin-repl';
|
22
22
|
import { getPluginVolume } from './plugin-volume';
|
23
|
+
import { ChildProcessWorker } from './runtime/child-process-worker';
|
24
|
+
import { createClusterForkWorker, needsClusterForkWorker } from './runtime/cluster-fork.worker';
|
23
25
|
import { NodeThreadWorker } from './runtime/node-thread-worker';
|
24
26
|
import { prepareZip } from './runtime/node-worker-common';
|
25
|
-
import { RuntimeWorker } from './runtime/runtime-worker';
|
26
27
|
import { getBuiltinRuntimeHosts } from './runtime/runtime-host';
|
27
|
-
import {
|
28
|
-
import { Deferred } from '../deferred';
|
28
|
+
import { RuntimeWorker } from './runtime/runtime-worker';
|
29
29
|
|
30
30
|
const serverVersion = require('../../package.json').version;
|
31
31
|
|
32
32
|
export interface StartPluginRemoteOptions {
|
33
|
-
onClusterPeer?(peer: RpcPeer): void;
|
34
33
|
sourceURL?(filename: string): string;
|
35
34
|
consoleId?: string;
|
36
35
|
}
|
@@ -38,10 +37,15 @@ export interface StartPluginRemoteOptions {
|
|
38
37
|
export function startPluginRemote(mainFilename: string, pluginId: string, peerSend: (message: RpcMessage, reject?: (e: Error) => void, serializationContext?: any) => void, startPluginRemoteOptions?: StartPluginRemoteOptions) {
|
39
38
|
const peer = new RpcPeer('unknown', 'host', peerSend);
|
40
39
|
|
40
|
+
const clusterPeerSetup = prepareClusterPeer(peer);
|
41
|
+
const { initializeCluster, connectRPCObject, SCRYPTED_CLUSTER_ADDRESS, connectClusterObject, ensureClusterPeer, mainThreadBrokerRegister , mainThreadPort } = clusterPeerSetup;
|
42
|
+
|
43
|
+
peer.params.initializeCluster = initializeCluster;
|
44
|
+
|
41
45
|
let systemManager: SystemManager;
|
42
46
|
let deviceManager: DeviceManagerImpl;
|
43
47
|
let api: PluginAPI;
|
44
|
-
|
48
|
+
let originalAPI: PluginAPI;
|
45
49
|
|
46
50
|
let pluginsPromise: Promise<any>;
|
47
51
|
function getPlugins() {
|
@@ -84,6 +88,7 @@ export function startPluginRemote(mainFilename: string, pluginId: string, peerSe
|
|
84
88
|
}
|
85
89
|
}
|
86
90
|
|
91
|
+
originalAPI = _api;
|
87
92
|
api = new PluginForkableAPI(_api);
|
88
93
|
peer.selfName = pluginId;
|
89
94
|
return api;
|
@@ -95,331 +100,23 @@ export function startPluginRemote(mainFilename: string, pluginId: string, peerSe
|
|
95
100
|
if (name === 'repl') {
|
96
101
|
if (!replPort)
|
97
102
|
throw new Error('REPL unavailable: Plugin not loaded.')
|
98
|
-
return replPort;
|
103
|
+
return [await replPort, process.env.SCRYPTED_CLUSTER_ADDRESS];
|
99
104
|
}
|
100
105
|
throw new Error(`unknown service ${name}`);
|
101
106
|
},
|
102
|
-
async onLoadZip(scrypted: ScryptedStatic, params: any, packageJson: any,
|
107
|
+
async onLoadZip(scrypted: ScryptedStatic, params: any, packageJson: any, zipAPI: PluginZipAPI, zipOptions: PluginRemoteLoadZipOptions) {
|
103
108
|
const mainFile = zipOptions?.main || 'main';
|
104
109
|
const mainNodejs = `${mainFile}.nodejs.js`;
|
105
110
|
const pluginMainNodeJs = `/plugin/${mainNodejs}`;
|
106
111
|
const pluginIdMainNodeJs = `/${pluginId}/${mainNodejs}`;
|
107
112
|
|
108
|
-
const {
|
109
|
-
const { zipFile, unzippedPath } = await prepareZip(getPluginVolume(pluginId), zipHash, getZip);
|
110
|
-
|
111
|
-
const SCRYPTED_CLUSTER_ADDRESS = process.env.SCRYPTED_CLUSTER_ADDRESS;
|
113
|
+
const { zipHash } = zipOptions;
|
114
|
+
const { zipFile, unzippedPath } = await prepareZip(getPluginVolume(pluginId), zipHash, zipAPI.getZip);
|
112
115
|
|
113
|
-
|
114
|
-
return !address || address === SCRYPTED_CLUSTER_ADDRESS;
|
115
|
-
}
|
116
|
-
|
117
|
-
const onProxySerialization = (peer: RpcPeer, value: any, sourceKey: string) => {
|
118
|
-
const properties = RpcPeer.prepareProxyProperties(value) || {};
|
119
|
-
let clusterEntry: ClusterObject = properties.__cluster;
|
116
|
+
await initializeCluster(zipOptions);
|
120
117
|
|
121
|
-
|
122
|
-
// worker threads will embed their pid and tid in the proxy id for cross worker fast path.
|
123
|
-
const proxyId = peer.localProxied.get(value)?.id || clusterEntry?.proxyId || `n-${process.pid}-${worker_threads.threadId}-${RpcPeer.generateId()}`;
|
118
|
+
scrypted.connectRPCObject = connectRPCObject;
|
124
119
|
|
125
|
-
// if the cluster entry already exists, check if it belongs to this node.
|
126
|
-
// if it belongs to this node, the entry must also be for this peer.
|
127
|
-
// relying on the liveness/gc of a different peer may cause race conditions.
|
128
|
-
if (clusterEntry) {
|
129
|
-
if (isClusterAddress(clusterEntry?.address) && clusterPort === clusterEntry.port && sourceKey !== clusterEntry.sourceKey)
|
130
|
-
clusterEntry = undefined;
|
131
|
-
}
|
132
|
-
|
133
|
-
if (!clusterEntry) {
|
134
|
-
clusterEntry = {
|
135
|
-
id: clusterId,
|
136
|
-
address: SCRYPTED_CLUSTER_ADDRESS,
|
137
|
-
port: clusterPort,
|
138
|
-
proxyId,
|
139
|
-
sourceKey,
|
140
|
-
sha256: null,
|
141
|
-
};
|
142
|
-
clusterEntry.sha256 = computeClusterObjectHash(clusterEntry, clusterSecret);
|
143
|
-
properties.__cluster = clusterEntry;
|
144
|
-
}
|
145
|
-
|
146
|
-
return {
|
147
|
-
proxyId,
|
148
|
-
properties,
|
149
|
-
};
|
150
|
-
}
|
151
|
-
|
152
|
-
peer.onProxySerialization = value => onProxySerialization(peer, value, undefined);
|
153
|
-
|
154
|
-
const resolveObject = async (id: string, sourceKey: string) => {
|
155
|
-
const sourcePeer = sourceKey
|
156
|
-
? await clusterPeers.get(sourceKey)
|
157
|
-
: peer;
|
158
|
-
if (!sourcePeer)
|
159
|
-
console.error('source peer not found', sourceKey);
|
160
|
-
const ret = sourcePeer?.localProxyMap.get(id);
|
161
|
-
if (!ret) {
|
162
|
-
console.error('source key not found', sourceKey, id);
|
163
|
-
return;
|
164
|
-
}
|
165
|
-
return ret;
|
166
|
-
}
|
167
|
-
|
168
|
-
// all cluster clients, incoming and outgoing, connect with random ports which can be used as peer ids
|
169
|
-
// on the cluster server that is listening on the actual port/
|
170
|
-
// incoming connections: use the remote random/unique port
|
171
|
-
// outgoing connections: use the local random/unique port
|
172
|
-
const clusterPeers = new Map<string, Promise<RpcPeer>>();
|
173
|
-
function getClusterPeerKey(address: string, port: number) {
|
174
|
-
return `${address}:${port}`;
|
175
|
-
}
|
176
|
-
|
177
|
-
const clusterRpcServer = net.createServer(client => {
|
178
|
-
const clusterPeerAddress = client.remoteAddress;
|
179
|
-
const clusterPeerPort = client.remotePort;
|
180
|
-
const clusterPeerKey = getClusterPeerKey(clusterPeerAddress, clusterPeerPort);
|
181
|
-
const clusterPeer = createDuplexRpcPeer(peer.selfName, clusterPeerKey, client, client);
|
182
|
-
// the listening peer sourceKey (client address/port) is used by the OTHER peer (the client)
|
183
|
-
// to determine if it is already connected to THIS peer (the server).
|
184
|
-
clusterPeer.onProxySerialization = (value) => onProxySerialization(clusterPeer, value, clusterPeerKey);
|
185
|
-
clusterPeers.set(clusterPeerKey, Promise.resolve(clusterPeer));
|
186
|
-
startPluginRemoteOptions?.onClusterPeer?.(clusterPeer);
|
187
|
-
const connectRPCObject: ConnectRPCObject = async (o) => {
|
188
|
-
const sha256 = computeClusterObjectHash(o, clusterSecret);
|
189
|
-
if (sha256 !== o.sha256)
|
190
|
-
throw new Error('secret incorrect');
|
191
|
-
return resolveObject(o.proxyId, o.sourceKey);
|
192
|
-
}
|
193
|
-
clusterPeer.params['connectRPCObject'] = connectRPCObject;
|
194
|
-
client.on('close', () => {
|
195
|
-
clusterPeers.delete(clusterPeerKey);
|
196
|
-
clusterPeer.kill('cluster socket closed');
|
197
|
-
});
|
198
|
-
})
|
199
|
-
|
200
|
-
const listenAddress = SCRYPTED_CLUSTER_ADDRESS
|
201
|
-
? '0.0.0.0'
|
202
|
-
: '127.0.0.1';
|
203
|
-
const clusterPort = await listenZero(clusterRpcServer, listenAddress);
|
204
|
-
|
205
|
-
const ensureClusterPeer = (address: string, connectPort: number) => {
|
206
|
-
if (isClusterAddress(address))
|
207
|
-
address = '127.0.0.1';
|
208
|
-
|
209
|
-
const clusterPeerKey = getClusterPeerKey(address, connectPort);
|
210
|
-
let clusterPeerPromise = clusterPeers.get(clusterPeerKey);
|
211
|
-
if (clusterPeerPromise)
|
212
|
-
return clusterPeerPromise;
|
213
|
-
|
214
|
-
clusterPeerPromise = (async () => {
|
215
|
-
const socket = net.connect(connectPort, address);
|
216
|
-
socket.on('close', () => clusterPeers.delete(clusterPeerKey));
|
217
|
-
|
218
|
-
try {
|
219
|
-
await once(socket, 'connect');
|
220
|
-
const { address: sourceAddress } = (socket.address() as net.AddressInfo);
|
221
|
-
if (sourceAddress !== SCRYPTED_CLUSTER_ADDRESS && sourceAddress !== '127.0.0.1')
|
222
|
-
console.warn("source address mismatch", sourceAddress);
|
223
|
-
|
224
|
-
const clusterPeer = createDuplexRpcPeer(peer.selfName, clusterPeerKey, socket, socket);
|
225
|
-
clusterPeer.onProxySerialization = (value) => onProxySerialization(clusterPeer, value, clusterPeerKey);
|
226
|
-
return clusterPeer;
|
227
|
-
}
|
228
|
-
catch (e) {
|
229
|
-
console.error('failure ipc connect', e);
|
230
|
-
socket.destroy();
|
231
|
-
throw e;
|
232
|
-
}
|
233
|
-
})();
|
234
|
-
|
235
|
-
clusterPeers.set(clusterPeerKey, clusterPeerPromise);
|
236
|
-
return clusterPeerPromise;
|
237
|
-
};
|
238
|
-
|
239
|
-
async function peerConnectRPCObject(peer: RpcPeer, o: ClusterObject) {
|
240
|
-
let peerConnectRPCObject: Promise<ConnectRPCObject> = peer.tags['connectRPCObject'];
|
241
|
-
if (!peerConnectRPCObject) {
|
242
|
-
peerConnectRPCObject = peer.getParam('connectRPCObject');
|
243
|
-
peer.tags['connectRPCObject'] = peerConnectRPCObject;
|
244
|
-
}
|
245
|
-
const resolved = await peerConnectRPCObject;
|
246
|
-
return resolved(o);
|
247
|
-
}
|
248
|
-
|
249
|
-
const tidChannels = new Map<number, Deferred<worker_threads.MessagePort>>();
|
250
|
-
const tidPeers = new Map<number, Promise<RpcPeer>>();
|
251
|
-
|
252
|
-
function connectTidPeer(tid: number) {
|
253
|
-
let peerPromise = tidPeers.get(tid);
|
254
|
-
if (peerPromise)
|
255
|
-
return peerPromise;
|
256
|
-
let tidDeferred = tidChannels.get(tid);
|
257
|
-
// if the tid port is not available yet, request it.
|
258
|
-
if (!tidDeferred) {
|
259
|
-
tidDeferred = new Deferred<worker_threads.MessagePort>();
|
260
|
-
tidChannels.set(tid, tidDeferred);
|
261
|
-
|
262
|
-
if (mainThreadPort) {
|
263
|
-
// request the connection via the main thread
|
264
|
-
mainThreadPort.postMessage({
|
265
|
-
threadId: tid,
|
266
|
-
});
|
267
|
-
}
|
268
|
-
}
|
269
|
-
|
270
|
-
const threadPeerKey = `thread:${tid}`;
|
271
|
-
function peerCleanup() {
|
272
|
-
clusterPeers.delete(threadPeerKey);
|
273
|
-
}
|
274
|
-
peerPromise = tidDeferred.promise.then(port => {
|
275
|
-
const threadPeer = NodeThreadWorker.createRpcPeer(peer.selfName, threadPeerKey, port);
|
276
|
-
threadPeer.onProxySerialization = value => onProxySerialization(threadPeer, value, threadPeerKey);
|
277
|
-
|
278
|
-
const connectRPCObject: ConnectRPCObject = async (o) => {
|
279
|
-
const sha256 = computeClusterObjectHash(o, clusterSecret);
|
280
|
-
if (sha256 !== o.sha256)
|
281
|
-
throw new Error('secret incorrect');
|
282
|
-
return resolveObject(o.proxyId, o.sourceKey);
|
283
|
-
}
|
284
|
-
threadPeer.params['connectRPCObject'] = connectRPCObject;
|
285
|
-
|
286
|
-
function cleanup(message: string) {
|
287
|
-
peerCleanup();
|
288
|
-
tidChannels.delete(tid);
|
289
|
-
tidPeers.delete(tid);
|
290
|
-
threadPeer.kill(message);
|
291
|
-
}
|
292
|
-
port.on('close', () => cleanup('connection closed.'));
|
293
|
-
port.on('messageerror', () => cleanup('message error.'));
|
294
|
-
return threadPeer;
|
295
|
-
});
|
296
|
-
peerPromise.catch(() => peerCleanup());
|
297
|
-
clusterPeers.set(threadPeerKey, peerPromise);
|
298
|
-
tidPeers.set(tid, peerPromise);
|
299
|
-
|
300
|
-
return peerPromise;
|
301
|
-
}
|
302
|
-
|
303
|
-
const mainThreadPort: worker_threads.MessagePort = worker_threads.isMainThread ? undefined : worker_threads.workerData.mainThreadPort;
|
304
|
-
if (!worker_threads.isMainThread) {
|
305
|
-
// the main thread port will send messages with a thread port when a thread wants to initiate a connection.
|
306
|
-
mainThreadPort.on('message', async (message: { port: worker_threads.MessagePort, threadId: number }) => {
|
307
|
-
const { port, threadId } = message;
|
308
|
-
let tidDeferred = tidChannels.get(threadId);
|
309
|
-
if (!tidDeferred) {
|
310
|
-
tidDeferred = new Deferred<worker_threads.MessagePort>();
|
311
|
-
tidChannels.set(threadId, tidDeferred);
|
312
|
-
}
|
313
|
-
tidDeferred.resolve(port);
|
314
|
-
connectTidPeer(threadId);
|
315
|
-
});
|
316
|
-
}
|
317
|
-
|
318
|
-
async function connectIPCObject(clusterObject: ClusterObject, tid: number) {
|
319
|
-
// if the main thread is trying to connect to an object,
|
320
|
-
// the argument order matters here, as the connection attempt looks at the
|
321
|
-
// connectThreadId to see if the target is main thread.
|
322
|
-
if (worker_threads.isMainThread)
|
323
|
-
mainThreadBrokerConnect(tid, worker_threads.threadId);
|
324
|
-
const clusterPeer = await connectTidPeer(tid);
|
325
|
-
const existing = clusterPeer.remoteWeakProxies[clusterObject.proxyId]?.deref();
|
326
|
-
if (existing)
|
327
|
-
return existing;
|
328
|
-
return peerConnectRPCObject(clusterPeer, clusterObject);
|
329
|
-
}
|
330
|
-
|
331
|
-
const brokeredConnections = new Set<string>();
|
332
|
-
const workers = new Map<number, worker_threads.MessagePort>();
|
333
|
-
function mainThreadBrokerConnect(threadId: number, connectThreadId: number) {
|
334
|
-
if (worker_threads.isMainThread && threadId === worker_threads.threadId) {
|
335
|
-
const msg = 'invalid ipc, main thread cannot connect to itself';
|
336
|
-
console.error(msg);
|
337
|
-
throw new Error(msg);
|
338
|
-
}
|
339
|
-
// both workers nay initiate connection to each other at same time, so this
|
340
|
-
// is a synchronization point.
|
341
|
-
const key = JSON.stringify([threadId, connectThreadId].sort());
|
342
|
-
if (brokeredConnections.has(key))
|
343
|
-
return;
|
344
|
-
|
345
|
-
brokeredConnections.add(key);
|
346
|
-
|
347
|
-
const worker = workers.get(threadId);
|
348
|
-
const connect = workers.get(connectThreadId);
|
349
|
-
const channel = new worker_threads.MessageChannel();
|
350
|
-
|
351
|
-
worker.postMessage({
|
352
|
-
port: channel.port1,
|
353
|
-
threadId: connectThreadId,
|
354
|
-
}, [channel.port1]);
|
355
|
-
|
356
|
-
if (connect) {
|
357
|
-
connect.postMessage({
|
358
|
-
port: channel.port2,
|
359
|
-
threadId,
|
360
|
-
}, [channel.port2]);
|
361
|
-
}
|
362
|
-
else if (connectThreadId === worker_threads.threadId) {
|
363
|
-
connectTidPeer(threadId);
|
364
|
-
const deferred = tidChannels.get(threadId);
|
365
|
-
deferred.resolve(channel.port2);
|
366
|
-
}
|
367
|
-
else {
|
368
|
-
channel.port2.close();
|
369
|
-
}
|
370
|
-
}
|
371
|
-
|
372
|
-
function mainThreadBrokerRegister(workerPort: worker_threads.MessagePort, threadId: number) {
|
373
|
-
workers.set(threadId, workerPort);
|
374
|
-
|
375
|
-
// this is main thread, so there will be two types of requests from the child: registration requests from grandchildren and connection requests.
|
376
|
-
workerPort.on('message', async (message: { port: worker_threads.MessagePort, threadId: number }) => {
|
377
|
-
const { port, threadId: connectThreadId } = message;
|
378
|
-
|
379
|
-
if (port) {
|
380
|
-
mainThreadBrokerRegister(port, connectThreadId);
|
381
|
-
}
|
382
|
-
else {
|
383
|
-
mainThreadBrokerConnect(threadId, connectThreadId);
|
384
|
-
}
|
385
|
-
});
|
386
|
-
}
|
387
|
-
|
388
|
-
scrypted.connectRPCObject = async (value: any) => {
|
389
|
-
const clusterObject: ClusterObject = value?.__cluster;
|
390
|
-
if (clusterObject?.id !== clusterId)
|
391
|
-
return value;
|
392
|
-
const { address, port, proxyId, sourceKey } = clusterObject;
|
393
|
-
// handle the case when trying to connect to an object is on this cluster node,
|
394
|
-
// returning the actual object, rather than initiating a loopback connection.
|
395
|
-
if (port === clusterPort)
|
396
|
-
return resolveObject(proxyId, sourceKey);
|
397
|
-
|
398
|
-
// can use worker to worker ipc if the address and pid matches and both side are node.
|
399
|
-
if (address === SCRYPTED_CLUSTER_ADDRESS && proxyId.startsWith('n-')) {
|
400
|
-
const parts = proxyId.split('-');
|
401
|
-
const pid = parseInt(parts[1]);
|
402
|
-
if (pid === process.pid)
|
403
|
-
return connectIPCObject(clusterObject, parseInt(parts[2]));
|
404
|
-
}
|
405
|
-
|
406
|
-
try {
|
407
|
-
const clusterPeerPromise = ensureClusterPeer(address, port);
|
408
|
-
const clusterPeer = await clusterPeerPromise;
|
409
|
-
// may already have this proxy so check first.
|
410
|
-
const existing = clusterPeer.remoteWeakProxies[proxyId]?.deref();
|
411
|
-
if (existing)
|
412
|
-
return existing;
|
413
|
-
const newValue = await peerConnectRPCObject(clusterPeer, clusterObject);
|
414
|
-
if (!newValue)
|
415
|
-
throw new Error('rpc object not found?');
|
416
|
-
return newValue;
|
417
|
-
}
|
418
|
-
catch (e) {
|
419
|
-
console.error('failure rpc', clusterObject, e);
|
420
|
-
return value;
|
421
|
-
}
|
422
|
-
}
|
423
120
|
if (worker_threads.isMainThread) {
|
424
121
|
const fsDir = path.join(unzippedPath, 'fs')
|
425
122
|
await fs.promises.mkdir(fsDir, {
|
@@ -507,7 +204,7 @@ export function startPluginRemote(mainFilename: string, pluginId: string, peerSe
|
|
507
204
|
// process.memoryUsage is per thread.
|
508
205
|
const allMemoryStats = new Map<RuntimeWorker, NodeJS.MemoryUsage>();
|
509
206
|
// start the stats updater/watchdog after installation has finished, as that may take some time.
|
510
|
-
|
207
|
+
startStatsUpdater(allMemoryStats, zipAPI.updateStats);
|
511
208
|
|
512
209
|
let pong: (time: number) => Promise<void>;
|
513
210
|
peer.params.ping = async (time: number) => {
|
@@ -525,68 +222,83 @@ export function startPluginRemote(mainFilename: string, pluginId: string, peerSe
|
|
525
222
|
const pluginRemoteAPI: PluginRemote = scrypted.pluginRemoteAPI;
|
526
223
|
|
527
224
|
scrypted.fork = (options) => {
|
225
|
+
let forkPeer: Promise<RpcPeer>;
|
528
226
|
let runtimeWorker: RuntimeWorker;
|
529
227
|
let nativeWorker: child_process.ChildProcess | worker_threads.Worker;
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
packageJson,
|
537
|
-
env: process.env,
|
538
|
-
pluginDebug: undefined,
|
539
|
-
zipFile,
|
540
|
-
unzippedPath,
|
541
|
-
zipHash,
|
542
|
-
}, undefined);
|
543
|
-
|
544
|
-
if (runtimeWorker instanceof ChildProcessWorker) {
|
545
|
-
nativeWorker = runtimeWorker.childProcess;
|
546
|
-
const console = options?.id ? getMixinConsole(options.id, options.nativeId) : undefined;
|
547
|
-
pipeWorkerConsole(nativeWorker, console);
|
548
|
-
}
|
228
|
+
|
229
|
+
// if running in a cluster, fork to a matching cluster worker only if necessary.
|
230
|
+
if (needsClusterForkWorker(options)) {
|
231
|
+
({ runtimeWorker, forkPeer } = createClusterForkWorker(
|
232
|
+
api.getComponent('cluster-fork'), zipHash, zipAPI.getZip, options, packageJson, scrypted.connectRPCObject)
|
233
|
+
);
|
549
234
|
}
|
550
235
|
else {
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
nativeWorker = ntw.worker;
|
569
|
-
|
570
|
-
const { threadId } = ntw.worker;
|
571
|
-
if (mainThreadPort) {
|
572
|
-
// grandparent connection to child
|
573
|
-
mainThreadPort.postMessage({
|
574
|
-
port: mainThreadChannel.port2,
|
575
|
-
threadId,
|
576
|
-
}, [mainThreadChannel.port2]);
|
236
|
+
if (options?.runtime) {
|
237
|
+
const builtins = getBuiltinRuntimeHosts();
|
238
|
+
const runtime = builtins.get(options.runtime);
|
239
|
+
if (!runtime)
|
240
|
+
throw new Error('unknown runtime ' + options.runtime);
|
241
|
+
runtimeWorker = runtime(mainFilename, pluginId, {
|
242
|
+
packageJson,
|
243
|
+
env: undefined,
|
244
|
+
pluginDebug: undefined,
|
245
|
+
zipFile,
|
246
|
+
unzippedPath,
|
247
|
+
zipHash,
|
248
|
+
}, undefined);
|
249
|
+
|
250
|
+
if (runtimeWorker instanceof ChildProcessWorker) {
|
251
|
+
nativeWorker = runtimeWorker.childProcess;
|
252
|
+
}
|
577
253
|
}
|
578
254
|
else {
|
579
|
-
|
255
|
+
// when a node thread is created, also create a secondary message channel to link the grandparent (or mainthread) and child.
|
256
|
+
const mainThreadChannel = new worker_threads.MessageChannel();
|
257
|
+
|
258
|
+
const ntw = new NodeThreadWorker(mainFilename, pluginId, {
|
259
|
+
packageJson,
|
260
|
+
env: undefined,
|
261
|
+
pluginDebug: undefined,
|
262
|
+
zipFile,
|
263
|
+
unzippedPath,
|
264
|
+
zipHash,
|
265
|
+
}, {
|
266
|
+
name: options?.name,
|
267
|
+
}, {
|
268
|
+
// child connection to grandparent
|
269
|
+
mainThreadPort: mainThreadChannel.port1,
|
270
|
+
}, [mainThreadChannel.port1]);
|
271
|
+
runtimeWorker = ntw;
|
272
|
+
nativeWorker = ntw.worker;
|
273
|
+
|
274
|
+
const { threadId } = ntw.worker;
|
275
|
+
if (mainThreadPort) {
|
276
|
+
// grandparent connection to child
|
277
|
+
mainThreadPort.postMessage({
|
278
|
+
port: mainThreadChannel.port2,
|
279
|
+
threadId,
|
280
|
+
}, [mainThreadChannel.port2]);
|
281
|
+
}
|
282
|
+
else {
|
283
|
+
mainThreadBrokerRegister(mainThreadChannel.port2, threadId);
|
284
|
+
}
|
285
|
+
}
|
286
|
+
|
287
|
+
// thread workers inherit main console. pipe anything else.
|
288
|
+
if (!(runtimeWorker instanceof NodeThreadWorker)) {
|
289
|
+
const console = options?.id ? getMixinConsole(options.id, options.nativeId) : undefined;
|
290
|
+
pipeWorkerConsole(nativeWorker, console);
|
580
291
|
}
|
292
|
+
|
293
|
+
const localPeer = new RpcPeer('main', 'thread', (message, reject, serializationContext) => runtimeWorker.send(message, reject, serializationContext));
|
294
|
+
runtimeWorker.setupRpcPeer(localPeer);
|
295
|
+
forkPeer = Promise.resolve(localPeer);
|
581
296
|
}
|
582
297
|
|
583
298
|
const result = (async () => {
|
584
|
-
const threadPeer =
|
585
|
-
threadPeer.params.updateStats = (stats: PluginStats) => {
|
586
|
-
allMemoryStats.set(runtimeWorker, stats.memoryUsage);
|
587
|
-
}
|
588
|
-
runtimeWorker.setupRpcPeer(threadPeer);
|
299
|
+
const threadPeer = await forkPeer;
|
589
300
|
|
301
|
+
// todo: handle nested forks and skip wrap. this is probably buggy.
|
590
302
|
class PluginForkAPI extends PluginAPIProxy {
|
591
303
|
[RpcPeer.PROPERTY_PROXY_ONEWAY_METHODS] = (api as any)[RpcPeer.PROPERTY_PROXY_ONEWAY_METHODS];
|
592
304
|
|
@@ -625,7 +337,10 @@ export function startPluginRemote(mainFilename: string, pluginId: string, peerSe
|
|
625
337
|
const forkOptions = Object.assign({}, zipOptions);
|
626
338
|
forkOptions.fork = true;
|
627
339
|
forkOptions.main = options?.filename;
|
628
|
-
|
340
|
+
const forkZipAPI = new PluginZipAPI(zipAPI.getZip, async (stats: PluginStats) => {
|
341
|
+
allMemoryStats.set(runtimeWorker, stats.memoryUsage);
|
342
|
+
});
|
343
|
+
return remote.loadZip(packageJson, forkZipAPI, forkOptions)
|
629
344
|
})();
|
630
345
|
|
631
346
|
result.catch(() => runtimeWorker.kill());
|
@@ -2,7 +2,7 @@ import { Device, DeviceManager, DeviceManifest, DeviceState, EndpointAccessContr
|
|
2
2
|
import { RpcPeer, RPCResultError } from '../rpc';
|
3
3
|
import { AccessControls } from './acl';
|
4
4
|
import { BufferSerializer } from '../rpc-buffer-serializer';
|
5
|
-
import { PluginAPI, PluginHostInfo, PluginLogger, PluginRemote, PluginRemoteLoadZipOptions } from './plugin-api';
|
5
|
+
import { PluginAPI, PluginHostInfo, PluginLogger, PluginRemote, PluginRemoteLoadZipOptions, PluginZipAPI } from './plugin-api';
|
6
6
|
import { createWebSocketClass, WebSocketConnectCallbacks, WebSocketConnection, WebSocketMethods, WebSocketSerializer } from './plugin-remote-websocket';
|
7
7
|
import { checkProperty } from './plugin-state-check';
|
8
8
|
import { SystemManagerImpl } from './system';
|
@@ -456,11 +456,11 @@ export interface WebSocketCustomHandler {
|
|
456
456
|
|
457
457
|
export interface PluginRemoteAttachOptions {
|
458
458
|
createMediaManager?: (systemManager: SystemManager, deviceManager: DeviceManagerImpl) => Promise<MediaManager>;
|
459
|
-
getServicePort?: (name: string, ...args: any[]) => Promise<number>;
|
459
|
+
getServicePort?: (name: string, ...args: any[]) => Promise<[number, string]>;
|
460
460
|
getDeviceConsole?: (nativeId?: ScryptedNativeId) => Console;
|
461
461
|
getPluginConsole?: () => Console;
|
462
462
|
getMixinConsole?: (id: string, nativeId?: ScryptedNativeId) => Console;
|
463
|
-
onLoadZip?: (scrypted: ScryptedStatic, params: any, packageJson: any,
|
463
|
+
onLoadZip?: (scrypted: ScryptedStatic, params: any, packageJson: any, zipAPI: PluginZipAPI, zipOptions: PluginRemoteLoadZipOptions) => Promise<any>;
|
464
464
|
onGetRemote?: (api: PluginAPI, pluginId: string) => Promise<PluginAPI>;
|
465
465
|
}
|
466
466
|
|
@@ -634,7 +634,7 @@ export function attachPluginRemote(peer: RpcPeer, options?: PluginRemoteAttachOp
|
|
634
634
|
done(ret);
|
635
635
|
},
|
636
636
|
|
637
|
-
async loadZip(packageJson: any,
|
637
|
+
async loadZip(packageJson: any, zipAPI: PluginZipAPI, zipOptions?: PluginRemoteLoadZipOptions) {
|
638
638
|
const params: any = {
|
639
639
|
__filename: undefined,
|
640
640
|
deviceManager,
|
@@ -657,7 +657,7 @@ export function attachPluginRemote(peer: RpcPeer, options?: PluginRemoteAttachOp
|
|
657
657
|
params.pluginRuntimeAPI = ret;
|
658
658
|
|
659
659
|
try {
|
660
|
-
return await options.onLoadZip(ret, params, packageJson,
|
660
|
+
return await options.onLoadZip(ret, params, packageJson, zipAPI, zipOptions);
|
661
661
|
}
|
662
662
|
catch (e) {
|
663
663
|
console.error('plugin start/fork failed', e)
|
@@ -75,5 +75,6 @@ export async function createREPLServer(scrypted: ScryptedStatic, params: any, pl
|
|
75
75
|
socket.on('error', cleanup);
|
76
76
|
socket.on('end', cleanup);
|
77
77
|
});
|
78
|
-
|
78
|
+
const address = process.env.SCRYPTED_CLUSTER_ADDRESS ? '0.0.0.0' : '127.0.0.1';
|
79
|
+
return listenZero(server, address);
|
79
80
|
}
|