@scrypted/server 0.123.33 → 0.123.35

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/dist/cluster/cluster-labels.d.ts +5 -0
  2. package/dist/cluster/cluster-labels.js +15 -5
  3. package/dist/cluster/cluster-labels.js.map +1 -1
  4. package/dist/cluster/cluster-setup.js +12 -5
  5. package/dist/cluster/cluster-setup.js.map +1 -1
  6. package/dist/plugin/plugin-host.d.ts +1 -0
  7. package/dist/plugin/plugin-host.js +8 -2
  8. package/dist/plugin/plugin-host.js.map +1 -1
  9. package/dist/plugin/plugin-remote-worker.js +2 -2
  10. package/dist/plugin/plugin-remote-worker.js.map +1 -1
  11. package/dist/plugin/runtime/cluster-fork-worker.js +1 -1
  12. package/dist/plugin/runtime/cluster-fork-worker.js.map +1 -1
  13. package/dist/scrypted-cluster-main.d.ts +13 -3
  14. package/dist/scrypted-cluster-main.js +97 -77
  15. package/dist/scrypted-cluster-main.js.map +1 -1
  16. package/dist/scrypted-server-main.js +19 -8
  17. package/dist/scrypted-server-main.js.map +1 -1
  18. package/dist/services/cluster-fork.d.ts +3 -3
  19. package/dist/services/cluster-fork.js +54 -14
  20. package/dist/services/cluster-fork.js.map +1 -1
  21. package/package.json +1 -1
  22. package/python/cluster_labels.py +4 -1
  23. package/python/cluster_setup.py +16 -7
  24. package/python/plugin_console.py +1 -0
  25. package/python/plugin_pip.py +14 -8
  26. package/python/plugin_remote.py +120 -38
  27. package/python/plugin_repl.py +42 -15
  28. package/python/plugin_volume.py +17 -11
  29. package/python/rpc-iterator-test.py +11 -8
  30. package/python/rpc.py +242 -154
  31. package/python/rpc_reader.py +35 -28
  32. package/src/cluster/cluster-labels.ts +16 -5
  33. package/src/cluster/cluster-setup.ts +12 -5
  34. package/src/plugin/plugin-host.ts +11 -3
  35. package/src/plugin/plugin-remote-worker.ts +4 -5
  36. package/src/plugin/runtime/cluster-fork-worker.ts +1 -1
  37. package/src/scrypted-cluster-main.ts +123 -91
  38. package/src/scrypted-server-main.ts +24 -11
  39. package/src/services/cluster-fork.ts +64 -18
@@ -16,7 +16,7 @@ import json
16
16
 
17
17
  class BufferSerializer(rpc.RpcSerializer):
18
18
  def serialize(self, value, serializationContext):
19
- return base64.b64encode(value).decode('utf8')
19
+ return base64.b64encode(value).decode("utf8")
20
20
 
21
21
  def deserialize(self, value, serializationContext):
22
22
  return base64.b64decode(value)
@@ -24,15 +24,15 @@ class BufferSerializer(rpc.RpcSerializer):
24
24
 
25
25
  class SidebandBufferSerializer(rpc.RpcSerializer):
26
26
  def serialize(self, value, serializationContext):
27
- buffers = serializationContext.get('buffers', None)
27
+ buffers = serializationContext.get("buffers", None)
28
28
  if not buffers:
29
29
  buffers = []
30
- serializationContext['buffers'] = buffers
30
+ serializationContext["buffers"] = buffers
31
31
  buffers.append(value)
32
32
  return len(buffers) - 1
33
33
 
34
34
  def deserialize(self, value, serializationContext):
35
- buffers: List = serializationContext.get('buffers', None)
35
+ buffers: List = serializationContext.get("buffers", None)
36
36
  buffer = buffers.pop()
37
37
  return buffer
38
38
 
@@ -56,7 +56,7 @@ class RpcFileTransport(RpcTransport):
56
56
  super().__init__()
57
57
  self.readFd = readFd
58
58
  self.writeFd = writeFd
59
- self.executor = ThreadPoolExecutor(1, 'rpc-read')
59
+ self.executor = ThreadPoolExecutor(1, "rpc-read")
60
60
 
61
61
  def osReadExact(self, size: int):
62
62
  b = bytes(0)
@@ -64,7 +64,7 @@ class RpcFileTransport(RpcTransport):
64
64
  got = os.read(self.readFd, size)
65
65
  if not len(got):
66
66
  self.executor.shutdown(False)
67
- raise Exception('rpc end of stream reached')
67
+ raise Exception("rpc end of stream reached")
68
68
  size -= len(got)
69
69
  b += got
70
70
  return b
@@ -73,7 +73,7 @@ class RpcFileTransport(RpcTransport):
73
73
  lengthBytes = self.osReadExact(4)
74
74
  typeBytes = self.osReadExact(1)
75
75
  type = typeBytes[0]
76
- length = int.from_bytes(lengthBytes, 'big')
76
+ length = int.from_bytes(lengthBytes, "big")
77
77
  data = self.osReadExact(length - 1)
78
78
  if type == 1:
79
79
  return data
@@ -81,11 +81,13 @@ class RpcFileTransport(RpcTransport):
81
81
  return message
82
82
 
83
83
  async def read(self):
84
- return await asyncio.get_event_loop().run_in_executor(self.executor, lambda: self.readMessageInternal())
84
+ return await asyncio.get_event_loop().run_in_executor(
85
+ self.executor, lambda: self.readMessageInternal()
86
+ )
85
87
 
86
88
  def writeMessage(self, type: int, buffer, reject):
87
89
  length = len(buffer) + 1
88
- lb = length.to_bytes(4, 'big')
90
+ lb = length.to_bytes(4, "big")
89
91
  try:
90
92
  for b in [lb, bytes([type]), buffer]:
91
93
  os.write(self.writeFd, b)
@@ -94,14 +96,18 @@ class RpcFileTransport(RpcTransport):
94
96
  reject(e)
95
97
 
96
98
  def writeJSON(self, j, reject):
97
- return self.writeMessage(0, bytes(json.dumps(j, allow_nan=False), 'utf8'), reject)
99
+ return self.writeMessage(
100
+ 0, bytes(json.dumps(j, allow_nan=False), "utf8"), reject
101
+ )
98
102
 
99
103
  def writeBuffer(self, buffer, reject):
100
104
  return self.writeMessage(1, buffer, reject)
101
105
 
102
106
 
103
107
  class RpcStreamTransport(RpcTransport):
104
- def __init__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:
108
+ def __init__(
109
+ self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
110
+ ) -> None:
105
111
  super().__init__()
106
112
  self.reader = reader
107
113
  self.writer = writer
@@ -110,7 +116,7 @@ class RpcStreamTransport(RpcTransport):
110
116
  lengthBytes = await self.reader.readexactly(4)
111
117
  typeBytes = await self.reader.readexactly(1)
112
118
  type = typeBytes[0]
113
- length = int.from_bytes(lengthBytes, 'big')
119
+ length = int.from_bytes(lengthBytes, "big")
114
120
  data = await self.reader.readexactly(length - 1)
115
121
  if type == 1:
116
122
  return data
@@ -119,7 +125,7 @@ class RpcStreamTransport(RpcTransport):
119
125
 
120
126
  def writeMessage(self, type: int, buffer, reject):
121
127
  length = len(buffer) + 1
122
- lb = length.to_bytes(4, 'big')
128
+ lb = length.to_bytes(4, "big")
123
129
  try:
124
130
  for b in [lb, bytes([type]), buffer]:
125
131
  self.writer.write(b)
@@ -128,7 +134,9 @@ class RpcStreamTransport(RpcTransport):
128
134
  reject(e)
129
135
 
130
136
  def writeJSON(self, j, reject):
131
- return self.writeMessage(0, bytes(json.dumps(j, allow_nan=False), 'utf8'), reject)
137
+ return self.writeMessage(
138
+ 0, bytes(json.dumps(j, allow_nan=False), "utf8"), reject
139
+ )
132
140
 
133
141
  def writeBuffer(self, buffer, reject):
134
142
  return self.writeMessage(1, buffer, reject)
@@ -141,7 +149,9 @@ class RpcConnectionTransport(RpcTransport):
141
149
  self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
142
150
 
143
151
  async def read(self):
144
- return await asyncio.get_event_loop().run_in_executor(self.executor, lambda: self.connection.recv())
152
+ return await asyncio.get_event_loop().run_in_executor(
153
+ self.executor, lambda: self.connection.recv()
154
+ )
145
155
 
146
156
  def writeMessage(self, json, reject):
147
157
  try:
@@ -158,23 +168,20 @@ class RpcConnectionTransport(RpcTransport):
158
168
 
159
169
 
160
170
  async def readLoop(loop, peer: rpc.RpcPeer, rpcTransport: RpcTransport):
161
- deserializationContext = {
162
- 'buffers': []
163
- }
171
+ deserializationContext = {"buffers": []}
164
172
 
165
173
  while True:
166
174
  message = await rpcTransport.read()
167
175
 
168
176
  if type(message) != dict:
169
- deserializationContext['buffers'].append(message)
177
+ deserializationContext["buffers"].append(message)
170
178
  continue
171
179
 
172
180
  asyncio.run_coroutine_threadsafe(
173
- peer.handleMessage(message, deserializationContext), loop)
181
+ peer.handleMessage(message, deserializationContext), loop
182
+ )
174
183
 
175
- deserializationContext = {
176
- 'buffers': []
177
- }
184
+ deserializationContext = {"buffers": []}
178
185
 
179
186
 
180
187
  async def prepare_peer_readloop(loop: AbstractEventLoop, rpcTransport: RpcTransport):
@@ -185,7 +192,7 @@ async def prepare_peer_readloop(loop: AbstractEventLoop, rpcTransport: RpcTransp
185
192
  def send(message, reject=None, serializationContext=None):
186
193
  with mutex:
187
194
  if serializationContext:
188
- buffers = serializationContext.get('buffers', None)
195
+ buffers = serializationContext.get("buffers", None)
189
196
  if buffers:
190
197
  for buffer in buffers:
191
198
  rpcTransport.writeBuffer(buffer, reject)
@@ -193,10 +200,10 @@ async def prepare_peer_readloop(loop: AbstractEventLoop, rpcTransport: RpcTransp
193
200
  rpcTransport.writeJSON(message, reject)
194
201
 
195
202
  peer = rpc.RpcPeer(send)
196
- peer.nameDeserializerMap['Buffer'] = SidebandBufferSerializer()
197
- peer.constructorSerializerMap[bytes] = 'Buffer'
198
- peer.constructorSerializerMap[bytearray] = 'Buffer'
199
- peer.constructorSerializerMap[memoryview] = 'Buffer'
203
+ peer.nameDeserializerMap["Buffer"] = SidebandBufferSerializer()
204
+ peer.constructorSerializerMap[bytes] = "Buffer"
205
+ peer.constructorSerializerMap[bytearray] = "Buffer"
206
+ peer.constructorSerializerMap[memoryview] = "Buffer"
200
207
 
201
208
  async def peerReadLoop():
202
209
  try:
@@ -9,13 +9,19 @@ export function matchesClusterLabels(options: ClusterForkOptions, labels: string
9
9
  }
10
10
 
11
11
  // if there is nothing in the any list, consider it matched
12
- let foundAny = !options?.labels?.any?.length;
13
- for (const label of options.labels?.any || []) {
14
- if (labels.includes(label)) {
15
- matched++;
16
- foundAny = true;
12
+ let foundAny: boolean;
13
+ if (options?.labels?.any?.length) {
14
+ for (const label of options.labels.any) {
15
+ if (labels.includes(label)) {
16
+ foundAny = true;
17
+ break;
18
+ }
17
19
  }
18
20
  }
21
+ else {
22
+ foundAny = true;
23
+ }
24
+
19
25
  if (!foundAny)
20
26
  return 0;
21
27
 
@@ -40,3 +46,8 @@ export function needsClusterForkWorker(options: ClusterForkOptions) {
40
46
  && options
41
47
  && (!matchesClusterLabels(options, getClusterLabels()) || options.clusterWorkerId);
42
48
  }
49
+
50
+ export function utilizesClusterForkWorker(options: ClusterForkOptions) {
51
+ return process.env.SCRYPTED_CLUSTER_ADDRESS
52
+ && (options?.labels || options?.clusterWorkerId);
53
+ }
@@ -249,8 +249,15 @@ export function setupCluster(peer: RpcPeer) {
249
249
  if (address === SCRYPTED_CLUSTER_ADDRESS && proxyId.startsWith('n-')) {
250
250
  const parts = proxyId.split('-');
251
251
  const pid = parseInt(parts[1]);
252
- if (pid === process.pid)
253
- return connectIPCObject(clusterObject, parseInt(parts[2]));
252
+ const tid = parseInt(parts[2]);
253
+ if (pid === process.pid) {
254
+ if (worker_threads.isMainThread && tid === worker_threads.threadId) {
255
+ // main thread can't call itself, so this may be a different thread cluster.
256
+ }
257
+ else {
258
+ return connectIPCObject(clusterObject, parseInt(parts[2]));
259
+ }
260
+ }
254
261
  }
255
262
 
256
263
  try {
@@ -379,15 +386,15 @@ export function getScryptedClusterMode(): ['server' | 'client', string, number]
379
386
 
380
387
  if (!mode) {
381
388
  if (process.env.SCRYPTED_CLUSTER_ADDRESS) {
382
- console.warn('SCRYPTED_CLUSTER_ADDRESS, but SCRYPTED_CLUSTER_MODE is not set. This setting will be ignored.');
389
+ console.warn('SCRYPTED_CLUSTER_ADDRESS is set but SCRYPTED_CLUSTER_MODE is not set. This setting will be ignored.');
383
390
  delete process.env.SCRYPTED_CLUSTER_ADDRESS;
384
391
  }
385
392
  if (process.env.SCRPYTED_CLUSTER_SERVER) {
386
- console.warn('SCRYPTED_CLUSTER_SERVER, but SCRYPTED_CLUSTER_MODE is not set. This setting will be ignored.');
393
+ console.warn('SCRYPTED_CLUSTER_SERVER is set but SCRYPTED_CLUSTER_MODE is not set. This setting will be ignored.');
387
394
  delete process.env.SCRPYTED_CLUSTER_SERVER
388
395
  }
389
396
  if (process.env.SCRYPTED_CLUSTER_SECRET) {
390
- console.warn('SCRYPTED_CLUSTER_SECRET, but SCRYPTED_CLUSTER_MODE is not set. This setting will be ignored.');
397
+ console.warn('SCRYPTED_CLUSTER_SECRET is set but SCRYPTED_CLUSTER_MODE is not set. This setting will be ignored.');
391
398
  delete process.env.SCRYPTED_CLUSTER_SECRET;
392
399
  }
393
400
  return;
@@ -4,7 +4,7 @@ import * as io from 'engine.io';
4
4
  import fs from 'fs';
5
5
  import os from 'os';
6
6
  import WebSocket from 'ws';
7
- import { needsClusterForkWorker } from '../cluster/cluster-labels';
7
+ import { utilizesClusterForkWorker } from '../cluster/cluster-labels';
8
8
  import { setupCluster } from '../cluster/cluster-setup';
9
9
  import { Plugin } from '../db-types';
10
10
  import { IOServer, IOServerSocket } from '../io';
@@ -65,6 +65,7 @@ export class PluginHost {
65
65
  zipHash: string;
66
66
  zipFile: string;
67
67
  unzippedPath: string;
68
+ clusterWorkerId: Promise<string>;
68
69
 
69
70
  kill() {
70
71
  this.killed = true;
@@ -350,7 +351,9 @@ export class PluginHost {
350
351
  zipFile: this.zipFile,
351
352
  zipHash: this.zipHash,
352
353
  };
353
- if (!needsClusterForkWorker(this.packageJson.scrypted)) {
354
+
355
+ // if a plugin requests a cluster worker, and it can be fulfilled by the server, do it.
356
+ if (!utilizesClusterForkWorker(this.packageJson.scrypted)) {
354
357
  this.peer = new RpcPeer('host', this.pluginId, (message, reject, serializationContext) => {
355
358
  if (connected) {
356
359
  this.worker.send(message, reject, serializationContext);
@@ -368,6 +371,7 @@ export class PluginHost {
368
371
 
369
372
  this.worker.stdout.on('data', data => console.log(data.toString()));
370
373
  this.worker.stderr.on('data', data => console.error(data.toString()));
374
+ this.clusterWorkerId = Promise.resolve(undefined);
371
375
  }
372
376
  else {
373
377
  this.peer = new RpcPeer('host', this.pluginId, (message, reject, serializationContext) => {
@@ -399,9 +403,13 @@ export class PluginHost {
399
403
  this.peer = peer;
400
404
  peer.killedSafe.finally(() => originalPeer.kill());
401
405
  }).catch(() => { });
406
+
407
+ this.clusterWorkerId = clusterWorkerId;
402
408
  clusterWorkerId.then(clusterWorkerId => {
403
409
  console.log('cluster worker id', clusterWorkerId);
404
- }).catch(() => { });
410
+ }).catch(() => {
411
+ console.warn("cluster worker id failed", clusterWorkerId);
412
+ });
405
413
 
406
414
  this.worker = runtimeWorker;
407
415
  peer = forkPeer;
@@ -4,10 +4,12 @@ import fs from 'fs';
4
4
  import path from 'path';
5
5
  import { install as installSourceMapSupport } from 'source-map-support';
6
6
  import worker_threads from 'worker_threads';
7
- import { needsClusterForkWorker } from '../cluster/cluster-labels';
7
+ import { utilizesClusterForkWorker } from '../cluster/cluster-labels';
8
8
  import { setupCluster } from '../cluster/cluster-setup';
9
9
  import { RpcMessage, RpcPeer } from '../rpc';
10
10
  import { evalLocal } from '../rpc-peer-eval';
11
+ import { ClusterManagerImpl } from '../scrypted-cluster-main';
12
+ import type { PluginComponent } from '../services/plugin';
11
13
  import type { DeviceManagerImpl } from './device';
12
14
  import { MediaManagerImpl } from './media';
13
15
  import { PluginAPI, PluginAPIProxy, PluginRemote, PluginRemoteLoadZipOptions, PluginZipAPI } from './plugin-api';
@@ -22,9 +24,6 @@ import { NodeThreadWorker } from './runtime/node-thread-worker';
22
24
  import { prepareZip } from './runtime/node-worker-common';
23
25
  import { getBuiltinRuntimeHosts } from './runtime/runtime-host';
24
26
  import { RuntimeWorker, RuntimeWorkerOptions } from './runtime/runtime-worker';
25
- import type { ClusterForkService } from '../services/cluster-fork';
26
- import type { PluginComponent } from '../services/plugin';
27
- import { ClusterManagerImpl } from '../scrypted-cluster-main';
28
27
 
29
28
  const serverVersion = require('../../package.json').version;
30
29
 
@@ -226,7 +225,7 @@ export function startPluginRemote(mainFilename: string, pluginId: string, peerSe
226
225
  };
227
226
 
228
227
  // if running in a cluster, fork to a matching cluster worker only if necessary.
229
- if (needsClusterForkWorker(options)) {
228
+ if (utilizesClusterForkWorker(options)) {
230
229
  ({ runtimeWorker, forkPeer, clusterWorkerId } = createClusterForkWorker(
231
230
  runtimeWorkerOptions,
232
231
  options,
@@ -85,7 +85,7 @@ export function createClusterForkWorker(
85
85
  return peer;
86
86
  }
87
87
  catch (e) {
88
- clusterForkResult.kill();
88
+ clusterForkResult.kill().catch(() => {});
89
89
  throw e;
90
90
  }
91
91
  })();
@@ -74,8 +74,11 @@ export interface ClusterWorkerProperties {
74
74
 
75
75
  export interface RunningClusterWorker extends ClusterWorkerProperties {
76
76
  id: string;
77
+ name: string;
77
78
  peer: RpcPeer;
79
+ fork: Promise<ClusterForkParam>;
78
80
  forks: Set<ClusterForkOptions>;
81
+ address: string;
79
82
  }
80
83
 
81
84
  export class PeerLiveness {
@@ -86,7 +89,7 @@ export class PeerLiveness {
86
89
  }
87
90
  }
88
91
 
89
- export class ClusterForkResult extends PeerLiveness {
92
+ export class ClusterForkResult extends PeerLiveness implements ClusterForkResultInterface {
90
93
  [RpcPeer.PROPERTY_PROXY_ONEWAY_METHODS] = ['kill'];
91
94
  clusterWorkerId?: string;
92
95
 
@@ -103,7 +106,96 @@ export class ClusterForkResult extends PeerLiveness {
103
106
  }
104
107
  }
105
108
 
106
- export type ClusterForkParam = (runtime: string, options: RuntimeWorkerOptions, peerLiveness: PeerLiveness, getZip: () => Promise<Buffer>) => Promise<ClusterForkResult>;
109
+ export interface ClusterForkResultInterface {
110
+ clusterWorkerId?: string;
111
+ getResult(): Promise<any>;
112
+ kill(): Promise<void>;
113
+ waitKilled(): Promise<void>;
114
+ }
115
+
116
+ export type ClusterForkParam = (runtime: string, options: RuntimeWorkerOptions, peerLiveness: PeerLiveness, getZip: () => Promise<Buffer>) => Promise<ClusterForkResultInterface>;
117
+
118
+ function createClusterForkParam(mainFilename: string, clusterId: string, clusterSecret: string) {
119
+ const clusterForkParam: ClusterForkParam = async (runtime, runtimeWorkerOptions, peerLiveness, getZip) => {
120
+ let runtimeWorker: RuntimeWorker;
121
+
122
+ const builtins = getBuiltinRuntimeHosts();
123
+ const rt = builtins.get(runtime);
124
+ if (!rt)
125
+ throw new Error('unknown runtime ' + runtime);
126
+
127
+ const pluginId: string = runtimeWorkerOptions.packageJson.name;
128
+ const { zipFile, unzippedPath } = await prepareZip(getPluginVolume(pluginId), runtimeWorkerOptions.zipHash, getZip);
129
+
130
+ const volume = getScryptedVolume();
131
+ const pluginVolume = getPluginVolume(pluginId);
132
+
133
+ runtimeWorkerOptions.zipFile = zipFile;
134
+ runtimeWorkerOptions.unzippedPath = unzippedPath;
135
+
136
+ runtimeWorkerOptions.env = {
137
+ ...runtimeWorkerOptions.env,
138
+ SCRYPTED_VOLUME: volume,
139
+ SCRYPTED_PLUGIN_VOLUME: pluginVolume,
140
+ };
141
+
142
+ runtimeWorker = rt(mainFilename, runtimeWorkerOptions, undefined);
143
+ runtimeWorker.stdout.on('data', data => console.log(data.toString()));
144
+ runtimeWorker.stderr.on('data', data => console.error(data.toString()));
145
+
146
+ const threadPeer = new RpcPeer('main', 'thread', (message, reject, serializationContext) => runtimeWorker.send(message, reject, serializationContext));
147
+ runtimeWorker.setupRpcPeer(threadPeer);
148
+ runtimeWorker.on('exit', () => {
149
+ threadPeer.kill('worker exited');
150
+ });
151
+ runtimeWorker.on('error', e => {
152
+ threadPeer.kill('worker error ' + e);
153
+ });
154
+ threadPeer.killedSafe.finally(() => {
155
+ runtimeWorker.kill();
156
+ });
157
+ peerLiveness.waitKilled().catch(() => { }).finally(() => {
158
+ threadPeer.kill('peer killed');
159
+ });
160
+ let getRemote: any;
161
+ let ping: any;
162
+ try {
163
+ const initializeCluster: InitializeCluster = await threadPeer.getParam('initializeCluster');
164
+ await initializeCluster({ clusterId, clusterSecret });
165
+ getRemote = await threadPeer.getParam('getRemote');
166
+ ping = await threadPeer.getParam('ping');
167
+ }
168
+ catch (e) {
169
+ threadPeer.kill('cluster fork failed');
170
+ throw e;
171
+ }
172
+
173
+ const readStream = async function* (stream: Readable) {
174
+ for await (const buffer of stream) {
175
+ yield buffer;
176
+ }
177
+ }
178
+
179
+ const timeout = setTimeout(() => {
180
+ threadPeer.kill('cluster fork timeout');
181
+ }, 10000);
182
+ const clusterGetRemote = (...args: any[]) => {
183
+ clearTimeout(timeout);
184
+ return {
185
+ [RpcPeer.PROPERTY_JSON_COPY_SERIALIZE_CHILDREN]: true,
186
+ stdout: readStream(runtimeWorker.stdout),
187
+ stderr: readStream(runtimeWorker.stderr),
188
+ getRemote,
189
+ ping,
190
+ };
191
+ };
192
+
193
+ const result = new ClusterForkResult(threadPeer, threadPeer.killed, clusterGetRemote);
194
+ return result;
195
+ };
196
+
197
+ return clusterForkParam;
198
+ }
107
199
 
108
200
  export function startClusterClient(mainFilename: string) {
109
201
  console.log('Cluster client starting.');
@@ -132,6 +224,7 @@ export function startClusterClient(mainFilename: string) {
132
224
  await once(rawSocket, 'connect');
133
225
  }
134
226
  catch (e) {
227
+ console.warn('Cluster server not available.', host, port, e);
135
228
  continue;
136
229
  }
137
230
 
@@ -144,6 +237,7 @@ export function startClusterClient(mainFilename: string) {
144
237
  await once(socket, 'secureConnect');
145
238
  }
146
239
  catch (e) {
240
+ console.warn('Cluster server tls failed.', host, port, e);
147
241
  continue;
148
242
  }
149
243
 
@@ -164,7 +258,7 @@ export function startClusterClient(mainFilename: string) {
164
258
  const auth: ClusterObject = {
165
259
  address: socket.localAddress,
166
260
  port: socket.localPort,
167
- id: process.env.SCRYPTED_CLUSTER_CLIENT_NAME || os.hostname(),
261
+ id: process.env.SCRYPTED_CLUSTER_WORKER_NAME || os.hostname(),
168
262
  proxyId: undefined,
169
263
  sourceKey: undefined,
170
264
  sha256: undefined,
@@ -180,85 +274,7 @@ export function startClusterClient(mainFilename: string) {
180
274
  const clusterPeerSetup = setupCluster(peer);
181
275
  await clusterPeerSetup.initializeCluster({ clusterId, clusterSecret });
182
276
 
183
- const clusterForkParam: ClusterForkParam = async (runtime, runtimeWorkerOptions, peerLiveness, getZip) => {
184
- let runtimeWorker: RuntimeWorker;
185
-
186
- const builtins = getBuiltinRuntimeHosts();
187
- const rt = builtins.get(runtime);
188
- if (!rt)
189
- throw new Error('unknown runtime ' + runtime);
190
-
191
- const pluginId: string = runtimeWorkerOptions.packageJson.name;
192
- const { zipFile, unzippedPath } = await prepareZip(getPluginVolume(pluginId), runtimeWorkerOptions.zipHash, getZip);
193
-
194
- const volume = getScryptedVolume();
195
- const pluginVolume = getPluginVolume(pluginId);
196
-
197
- runtimeWorkerOptions.zipFile = zipFile;
198
- runtimeWorkerOptions.unzippedPath = unzippedPath;
199
-
200
- runtimeWorkerOptions.env = {
201
- ...runtimeWorkerOptions.env,
202
- SCRYPTED_VOLUME: volume,
203
- SCRYPTED_PLUGIN_VOLUME: pluginVolume,
204
- };
205
-
206
- runtimeWorker = rt(mainFilename, runtimeWorkerOptions, undefined);
207
- runtimeWorker.stdout.on('data', data => console.log(data.toString()));
208
- runtimeWorker.stderr.on('data', data => console.error(data.toString()));
209
-
210
- const threadPeer = new RpcPeer('main', 'thread', (message, reject, serializationContext) => runtimeWorker.send(message, reject, serializationContext));
211
- runtimeWorker.setupRpcPeer(threadPeer);
212
- runtimeWorker.on('exit', () => {
213
- threadPeer.kill('worker exited');
214
- });
215
- runtimeWorker.on('error', e => {
216
- threadPeer.kill('worker error ' + e);
217
- });
218
- threadPeer.killedSafe.finally(() => {
219
- runtimeWorker.kill();
220
- });
221
- peerLiveness.waitKilled().catch(() => { }).finally(() => {
222
- threadPeer.kill('peer killed');
223
- });
224
- let getRemote: any;
225
- let ping: any;
226
- try {
227
- const initializeCluster: InitializeCluster = await threadPeer.getParam('initializeCluster');
228
- await initializeCluster({ clusterId, clusterSecret });
229
- getRemote = await threadPeer.getParam('getRemote');
230
- ping = await threadPeer.getParam('ping');
231
- }
232
- catch (e) {
233
- threadPeer.kill('cluster fork failed');
234
- throw e;
235
- }
236
-
237
- const readStream = async function* (stream: Readable) {
238
- for await (const buffer of stream) {
239
- yield buffer;
240
- }
241
- }
242
-
243
- const timeout = setTimeout(() => {
244
- threadPeer.kill('cluster fork timeout');
245
- }, 10000);
246
- const clusterGetRemote = (...args: any[]) => {
247
- clearTimeout(timeout);
248
- return {
249
- [RpcPeer.PROPERTY_JSON_COPY_SERIALIZE_CHILDREN]: true,
250
- stdout: readStream(runtimeWorker.stdout),
251
- stderr: readStream(runtimeWorker.stderr),
252
- getRemote,
253
- ping,
254
- };
255
- };
256
-
257
- const result = new ClusterForkResult(threadPeer, threadPeer.killed, clusterGetRemote);
258
- return result;
259
- };
260
-
261
- peer.params['fork'] = clusterForkParam;
277
+ peer.params['fork'] = createClusterForkParam(mainFilename, clusterId, clusterSecret);
262
278
 
263
279
  await peer.killed;
264
280
  }
@@ -274,7 +290,20 @@ export function startClusterClient(mainFilename: string) {
274
290
  })();
275
291
  }
276
292
 
277
- export function createClusterServer(runtime: ScryptedRuntime, certificate: ReturnType<typeof createSelfSignedCertificate>) {
293
+ export function createClusterServer(mainFilename: string, scryptedRuntime: ScryptedRuntime, certificate: ReturnType<typeof createSelfSignedCertificate>) {
294
+ const serverClusterWorkerId = crypto.randomUUID();
295
+ process.env.SCRYPTED_CLUSTER_WORKER_ID = serverClusterWorkerId;
296
+ const serverWorker: RunningClusterWorker = {
297
+ labels: getClusterLabels(),
298
+ id: serverClusterWorkerId,
299
+ peer: undefined,
300
+ fork: Promise.resolve(createClusterForkParam(mainFilename, scryptedRuntime.clusterId, scryptedRuntime.clusterSecret)),
301
+ name: process.env.SCRYPTED_CLUSTER_WORKER_NAME || os.hostname(),
302
+ address: process.env.SCRYPTED_CLUSTER_ADDRESS,
303
+ forks: new Set(),
304
+ };
305
+ scryptedRuntime.clusterWorkers.set(serverClusterWorkerId, serverWorker);
306
+
278
307
  const server = tls.createServer({
279
308
  key: certificate.serviceKey,
280
309
  cert: certificate.certificate,
@@ -289,7 +318,7 @@ export function createClusterServer(runtime: ScryptedRuntime, certificate: Retur
289
318
  const connectForkWorker: ConnectForkWorker = async (auth: ClusterObject, properties: ClusterWorkerProperties) => {
290
319
  const id = crypto.randomUUID();
291
320
  try {
292
- const sha256 = computeClusterObjectHash(auth, runtime.clusterSecret);
321
+ const sha256 = computeClusterObjectHash(auth, scryptedRuntime.clusterSecret);
293
322
  if (sha256 !== auth.sha256)
294
323
  throw new Error('cluster object hash mismatch');
295
324
 
@@ -303,17 +332,19 @@ export function createClusterServer(runtime: ScryptedRuntime, certificate: Retur
303
332
  }
304
333
  const worker: RunningClusterWorker = {
305
334
  ...properties,
306
- // generate a random uuid.
307
335
  id,
308
336
  peer,
337
+ fork: undefined,
338
+ name: auth.id,
339
+ address: socket.remoteAddress,
309
340
  forks: new Set(),
310
341
  };
311
- runtime.clusterWorkers.set(id, worker);
342
+ scryptedRuntime.clusterWorkers.set(id, worker);
312
343
  peer.killedSafe.finally(() => {
313
- runtime.clusterWorkers.delete(id);
344
+ scryptedRuntime.clusterWorkers.delete(id);
314
345
  });
315
346
  socket.on('close', () => {
316
- runtime.clusterWorkers.delete(id);
347
+ scryptedRuntime.clusterWorkers.delete(id);
317
348
  });
318
349
  console.log('Cluster client authenticated.', socket.remoteAddress, socket.remotePort, properties);
319
350
  }
@@ -323,7 +354,7 @@ export function createClusterServer(runtime: ScryptedRuntime, certificate: Retur
323
354
  }
324
355
 
325
356
  return {
326
- clusterId: runtime.clusterId,
357
+ clusterId: scryptedRuntime.clusterId,
327
358
  clusterWorkerId: id,
328
359
  }
329
360
  }
@@ -334,7 +365,8 @@ export function createClusterServer(runtime: ScryptedRuntime, certificate: Retur
334
365
  }
335
366
 
336
367
  export class ClusterManagerImpl implements ClusterManager {
337
- private clusterServicePromise: Promise<ClusterForkService>;
368
+ private clusterServicePromise: Promise<ClusterForkService>;
369
+ private clusterMode = getScryptedClusterMode()[0];
338
370
 
339
371
  constructor(private api: PluginAPI) {
340
372
  }
@@ -344,7 +376,7 @@ export class ClusterManagerImpl implements ClusterManager {
344
376
  }
345
377
 
346
378
  getClusterMode(): 'server' | 'client' | undefined {
347
- return getScryptedClusterMode()[0];
379
+ return this.clusterMode;
348
380
  }
349
381
 
350
382
  async getClusterWorkers(): Promise<Record<string, ClusterWorker>> {