multivisor 6.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. multivisor/__init__.py +0 -0
  2. multivisor/client/__init__.py +0 -0
  3. multivisor/client/cli.py +29 -0
  4. multivisor/client/http.py +93 -0
  5. multivisor/client/repl.py +244 -0
  6. multivisor/client/util.py +58 -0
  7. multivisor/multivisor.py +470 -0
  8. multivisor/rpc.py +232 -0
  9. multivisor/server/__init__.py +0 -0
  10. multivisor/server/dist/index.html +1 -0
  11. multivisor/server/dist/static/css/app.2aff3580a128440bba89b94112292cb1.css +6 -0
  12. multivisor/server/dist/static/css/app.2aff3580a128440bba89b94112292cb1.css.map +1 -0
  13. multivisor/server/dist/static/js/app.52791f915c2f060b9cb1.js +2 -0
  14. multivisor/server/dist/static/js/app.52791f915c2f060b9cb1.js.map +1 -0
  15. multivisor/server/dist/static/js/manifest.2ae2e69a05c33dfc65f8.js +2 -0
  16. multivisor/server/dist/static/js/manifest.2ae2e69a05c33dfc65f8.js.map +1 -0
  17. multivisor/server/dist/static/js/vendor.1d02877727062a41e9fb.js +1319 -0
  18. multivisor/server/dist/static/js/vendor.1d02877727062a41e9fb.js.map +1 -0
  19. multivisor/server/rpc.py +156 -0
  20. multivisor/server/tests/__init__.py +0 -0
  21. multivisor/server/tests/conftest.py +1 -0
  22. multivisor/server/tests/test_web.py +194 -0
  23. multivisor/server/util.py +70 -0
  24. multivisor/server/web.py +327 -0
  25. multivisor/signals.py +5 -0
  26. multivisor/tests/__init__.py +0 -0
  27. multivisor/tests/test_multivisor.py +179 -0
  28. multivisor/util.py +75 -0
  29. multivisor-6.0.2.dist-info/METADATA +375 -0
  30. multivisor-6.0.2.dist-info/RECORD +34 -0
  31. multivisor-6.0.2.dist-info/WHEEL +5 -0
  32. multivisor-6.0.2.dist-info/entry_points.txt +4 -0
  33. multivisor-6.0.2.dist-info/licenses/LICENSE +674 -0
  34. multivisor-6.0.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,470 @@
1
+ #!/usr/bin/env python
2
+ import copy
3
+ import logging
4
+ import os
5
+ import time
6
+ import weakref
7
+
8
+ from blinker import signal
9
+
10
+ try:
11
+ from ConfigParser import SafeConfigParser as ConfigParser
12
+ except ImportError:
13
+ from configparser import ConfigParser
14
+
15
+ import zerorpc
16
+ from gevent import spawn, sleep, joinall
17
+ from supervisor.xmlrpc import Faults
18
+ from supervisor.states import RUNNING_STATES
19
+
20
+ from .util import sanitize_url, filter_patterns, parse_dict
21
+
22
+ log = logging.getLogger("multivisor")
23
+
24
+
25
+ class Supervisor(dict):
26
+
27
+ Null = {
28
+ "identification": None,
29
+ "api_version": None,
30
+ "version": None,
31
+ "supervisor_version": None,
32
+ "processes": {},
33
+ "running": False,
34
+ "pid": None,
35
+ }
36
+
37
+ def __init__(self, name, url):
38
+ super(Supervisor, self).__init__(self.Null)
39
+ self.name = self["name"] = name
40
+ self.url = self["url"] = url
41
+ self.log = log.getChild(name)
42
+ addr = sanitize_url(url, protocol="tcp", host=name, port=9002)
43
+ self.address = addr["url"]
44
+ self.host = self["host"] = addr["host"]
45
+ self.server = zerorpc.Client(self.address)
46
+ # fill supervisor info before events start coming in
47
+ self.event_loop = spawn(self.run)
48
+
49
+ def __repr__(self):
50
+ return "{}(name={})".format(self.__class__.__name__, self.name)
51
+
52
+ def __eq__(self, other):
53
+ this, other = dict(self), dict(other)
54
+ this_p = this.pop("processes")
55
+ other_p = other.pop("processes")
56
+ return this == other and list(this_p.keys()) == list(other_p.keys())
57
+
58
+ def run(self):
59
+ last_retry = time.time()
60
+ while True:
61
+ try:
62
+ self.log.info("(re)initializing...")
63
+ self.refresh()
64
+ for i, event in enumerate(self.server.event_stream()):
65
+ # ignore first event. It serves only to trigger
66
+ # connection and avoid TimeoutExpired
67
+ if i != 0:
68
+ self.handle_event(event)
69
+ except zerorpc.LostRemote:
70
+ self.log.info("Lost remote")
71
+ except zerorpc.TimeoutExpired:
72
+ self.log.info("Timeout expired")
73
+ except Exception as err:
74
+ self.log.warning("Unexpected error %r", err)
75
+ finally:
76
+ curr_time = time.time()
77
+ delta = curr_time - last_retry
78
+ if delta < 10:
79
+ sleep(10 - delta)
80
+ last_retry = time.time()
81
+
82
+ def handle_event(self, event):
83
+ name = event["eventname"]
84
+ self.log.info("handling %s...", name)
85
+ if name.startswith("SUPERVISOR_STATE"):
86
+ self.refresh()
87
+ elif not self["running"]:
88
+ self.refresh()
89
+ elif name.startswith("PROCESS_GROUP"):
90
+ self.refresh()
91
+ elif name.startswith("PROCESS_STATE"):
92
+ payload = event["payload"]
93
+ puid = "{}:{}:{}".format(
94
+ self.name, payload["groupname"], payload["processname"]
95
+ )
96
+ self["processes"][puid].handle_event(event)
97
+
98
+ def create_base_info(self):
99
+ return dict(self.Null, name=self.name, url=self.url, host=self.host)
100
+
101
+ def read_info(self):
102
+ info = self.create_base_info()
103
+ server = self.server
104
+ info["pid"] = server.getPID()
105
+ info["running"] = True
106
+ info["identification"] = server.getIdentification()
107
+ info["api_version"] = server.getAPIVersion()
108
+ info["supervisor_version"] = server.getSupervisorVersion()
109
+ info["processes"] = processes = {}
110
+ procInfo = server.getAllProcessInfo()
111
+ for proc in procInfo:
112
+ process = Process(self, parse_dict(proc))
113
+ processes[process["uid"]] = process
114
+ return info
115
+
116
+ def update_info(self, info):
117
+ info = parse_dict(info)
118
+ if self == info:
119
+ this_p, info_p = self["processes"], info["processes"]
120
+ if this_p != info_p:
121
+ for name, process in info_p.items():
122
+ if process != this_p[name]:
123
+ send(process, "process_changed")
124
+ self.update(info)
125
+ else:
126
+ self.update(info)
127
+ send(self, "supervisor_changed")
128
+
129
+ def refresh(self):
130
+ try:
131
+ info = self.read_info()
132
+ except:
133
+ info = self.create_base_info()
134
+ raise
135
+ finally:
136
+ self.update_info(info)
137
+
138
+ def update_server(self, group_names=()):
139
+ server = self.server
140
+ try:
141
+ added, changed, removed = server.reloadConfig()[0]
142
+ except zerorpc.RemoteError as rerr:
143
+ error(rerr.msg)
144
+ return
145
+
146
+ # If any gnames are specified we need to verify that they are
147
+ # valid in order to print a useful error message.
148
+ if group_names:
149
+ groups = set()
150
+ for info in server.getAllProcessInfo():
151
+ groups.add(info["group"])
152
+ # New gnames would not currently exist in this set so
153
+ # add those as well.
154
+ groups.update(added)
155
+
156
+ for gname in group_names:
157
+ if gname not in groups:
158
+ self.log.debug("unknown group %s", gname)
159
+
160
+ for gname in removed:
161
+ if group_names and gname not in group_names:
162
+ continue
163
+ results = server.stopProcessGroup(gname)
164
+ self.log.debug("stopped process group %s", gname)
165
+
166
+ fails = [res for res in results if res["status"] == Faults.FAILED]
167
+ if fails:
168
+ self.log.debug("%s as problems; not removing", gname)
169
+ continue
170
+ server.removeProcessGroup(gname)
171
+ self.log.debug("removed process group %s", gname)
172
+
173
+ for gname in changed:
174
+ if group_names and gname not in group_names:
175
+ continue
176
+ server.stopProcessGroup(gname)
177
+ self.log.debug("stopped process group %s", gname)
178
+
179
+ server.removeProcessGroup(gname)
180
+ server.addProcessGroup(gname)
181
+ self.log.debug("updated process group %s", gname)
182
+
183
+ for gname in added:
184
+ if group_names and gname not in group_names:
185
+ continue
186
+ server.addProcessGroup(gname)
187
+ self.log.debug("added process group %s", gname)
188
+
189
+ self.log.info("Updated %s", self.name)
190
+
191
+ def _reread(self):
192
+ return self.server.reloadConfig()
193
+
194
+ def restart(self):
195
+ # do a reread. If there is an error (bad config) inform the user and
196
+ # and refuse to restart
197
+ try:
198
+ self._reread()
199
+ except zerorpc.RemoteError as rerr:
200
+ error("Cannot restart: {}".format(rerr.msg))
201
+ return
202
+ result = self.server.restart(timeout=30)
203
+ if result:
204
+ info("Restarted {}".format(self.name))
205
+ else:
206
+ error("Error restarting {}".format(self.name))
207
+
208
+ def reread(self):
209
+ try:
210
+ added, changed, removed = self._reread()[0]
211
+ except zerorpc.RemoteError as rerr:
212
+ error(rerr.msg)
213
+ else:
214
+ info(
215
+ "Reread config of {} "
216
+ "({} added; {} changed; {} disappeared)".format(
217
+ self.name, len(added), len(changed), len(removed)
218
+ )
219
+ )
220
+
221
+ def shutdown(self):
222
+ result = self.server.shutdown()
223
+ if result:
224
+ info("Shut down {}".format(self.name))
225
+ else:
226
+ error("Error shutting down {}".format(self.name))
227
+
228
+
229
+ class Process(dict):
230
+
231
+ Null = {"running": False, "pid": None, "state": None, "statename": "UNKNOWN"}
232
+
233
+ def __init__(self, supervisor, *args, **kwargs):
234
+ super(Process, self).__init__(self.Null)
235
+ if args:
236
+ self.update(args[0])
237
+ self.update(kwargs)
238
+ supervisor_name = supervisor["name"]
239
+ full_name = self.get("group", "") + ":" + self.get("name", "")
240
+ uid = "{}:{}".format(supervisor_name, full_name)
241
+ self.log = log.getChild(uid)
242
+ self.supervisor = weakref.proxy(supervisor)
243
+ self["full_name"] = full_name
244
+ self["running"] = self["state"] in RUNNING_STATES
245
+ self["supervisor"] = supervisor_name
246
+ self["host"] = supervisor["host"]
247
+ self["uid"] = uid
248
+
249
+ @property
250
+ def server(self):
251
+ return self.supervisor.server
252
+
253
+ @property
254
+ def full_name(self):
255
+ return self["full_name"]
256
+
257
+ def handle_event(self, event):
258
+ event_name = event["eventname"]
259
+ if event_name.startswith("PROCESS_STATE"):
260
+ payload = event["payload"]
261
+ proc_info = payload.get("process")
262
+ if proc_info is not None:
263
+ proc_info = parse_dict(proc_info)
264
+ old = self.update_info(proc_info)
265
+ if old != self:
266
+ old_state, new_state = old["statename"], self["statename"]
267
+ send(self, event="process_changed")
268
+ if old_state != new_state:
269
+ info(
270
+ "{} changed from {} to {}".format(
271
+ self, old_state, new_state
272
+ )
273
+ )
274
+
275
+ def read_info(self):
276
+ proc_info = dict(self.Null)
277
+ try:
278
+ from_serv = parse_dict(self.server.getProcessInfo(self.full_name))
279
+ proc_info.update(from_serv)
280
+ except Exception as err:
281
+ self.log.warn("Failed to read info from %s: %s", self["uid"], err)
282
+ return proc_info
283
+
284
+ def update_info(self, proc_info):
285
+ old = dict(self)
286
+ proc_info["running"] = proc_info["state"] in RUNNING_STATES
287
+ self.update(proc_info)
288
+ return old
289
+
290
+ def refresh(self):
291
+ proc_info = self.read_info()
292
+ proc_info = parse_dict(proc_info)
293
+ self.update_info(proc_info)
294
+
295
+ def start(self):
296
+ try:
297
+ self.server.startProcess(self.full_name, False, timeout=30)
298
+ except:
299
+ message = "Error trying to start {}!".format(self)
300
+ error(message)
301
+ self.log.exception(message)
302
+
303
+ def stop(self):
304
+ try:
305
+ self.server.stopProcess(self.full_name)
306
+ except:
307
+ message = "Failed to stop {}".format(self["uid"])
308
+ warning(message)
309
+ self.log.exception(message)
310
+
311
+ def restart(self):
312
+ if self["running"]:
313
+ self.stop()
314
+ self.start()
315
+
316
+ def __str__(self):
317
+ return "{0} on {1}".format(self["name"], self["supervisor"])
318
+
319
+ def __eq__(self, proc):
320
+ p1, p2 = dict(self), dict(proc)
321
+ p1.pop("description")
322
+ p1.pop("now")
323
+ p2.pop("description")
324
+ p2.pop("now")
325
+ return p1 == p2
326
+
327
+ def __ne__(self, proc):
328
+ return not self == proc
329
+
330
+
331
+ # Configuration
332
+
333
+
334
+ def load_config(config_file):
335
+ parser = ConfigParser()
336
+ parser.read(config_file)
337
+ dft_global = dict(name="multivisor")
338
+
339
+ supervisors = {}
340
+ config = dict(dft_global, supervisors=supervisors)
341
+ config.update(parser.items("global"))
342
+ tasks = []
343
+ for section in parser.sections():
344
+ if not section.startswith("supervisor:"):
345
+ continue
346
+ name = section[len("supervisor:") :]
347
+ section_items = dict(parser.items(section))
348
+ url = section_items.get("url", "")
349
+ supervisors[name] = Supervisor(name, url)
350
+ return config
351
+
352
+
353
+ def send(payload, event):
354
+ event_signal = signal(event)
355
+ return event_signal.send(event, payload=payload)
356
+
357
+
358
+ def notification(message, level):
359
+ payload = dict(message=message, level=level, time=time.time())
360
+ send(payload, "notification")
361
+
362
+
363
+ def info(message):
364
+ notification(message, "INFO")
365
+
366
+
367
+ def warning(message):
368
+ logging.warning(message)
369
+ notification(message, "WARNING")
370
+
371
+
372
+ def error(message):
373
+ logging.error(message)
374
+ notification(message, "ERROR")
375
+
376
+
377
+ class Multivisor(object):
378
+ def __init__(self, options):
379
+ self.options = options
380
+ self.reload()
381
+
382
+ @property
383
+ def config(self):
384
+ if self._config is None:
385
+ self._config = load_config(self.options.config_file)
386
+ return self._config
387
+
388
+ @property
389
+ def safe_config(self):
390
+ """
391
+ :return: config dict without username and password
392
+ """
393
+ if not self.use_authentication:
394
+ return self.config
395
+
396
+ config = copy.copy(self.config)
397
+ config.pop("username", "")
398
+ config.pop("password", "")
399
+ return config
400
+
401
+ @property
402
+ def config_file_content(self):
403
+ with open(self.options.config_file) as config_file:
404
+ return config_file.read()
405
+
406
+ def reload(self):
407
+ self._config = None
408
+ return self.config
409
+
410
+ @property
411
+ def supervisors(self):
412
+ return self.config["supervisors"]
413
+
414
+ @property
415
+ def processes(self):
416
+ procs = (svisor["processes"] for svisor in self.supervisors.values())
417
+ return {puid: proc for sprocs in procs for puid, proc in sprocs.items()}
418
+
419
+ @property
420
+ def use_authentication(self):
421
+ """
422
+ :return: whether authentication should be used
423
+ """
424
+ username = self.config.get("username", False)
425
+ password = self.config.get("password", False)
426
+ return bool(username and password)
427
+
428
+ @property
429
+ def secret_key(self):
430
+ return os.environ.get("MULTIVISOR_SECRET_KEY")
431
+
432
+ def refresh(self):
433
+ tasks = [spawn(supervisor.refresh) for supervisor in self.supervisors.values()]
434
+ joinall(tasks)
435
+
436
+ def get_supervisor(self, name):
437
+ return self.supervisors[name]
438
+
439
+ def get_process(self, uid):
440
+ supervisor, _ = uid.split(":", 1)
441
+ return self.supervisors[supervisor]["processes"][uid]
442
+
443
+ def _do_supervisors(self, operation, *names):
444
+ supervisors = (self.get_supervisor(name) for name in names)
445
+ tasks = [spawn(operation, supervisor) for supervisor in supervisors]
446
+ joinall(tasks)
447
+
448
+ def _do_processes(self, operation, *patterns):
449
+ procs = self.processes
450
+ puids = filter_patterns(procs, patterns)
451
+ tasks = [spawn(operation, procs[puid]) for puid in puids]
452
+ joinall(tasks)
453
+
454
+ def update_supervisors(self, *names):
455
+ self._do_supervisors(Supervisor.update_server, *names)
456
+
457
+ def restart_supervisors(self, *names):
458
+ self._do_supervisors(Supervisor.restart, *names)
459
+
460
+ def reread_supervisors(self, *names):
461
+ self._do_supervisors(Supervisor.reread, *names)
462
+
463
+ def shutdown_supervisors(self, *names):
464
+ self._do_supervisors(Supervisor.shutdown, *names)
465
+
466
+ def restart_processes(self, *patterns):
467
+ self._do_processes(Process.restart, *patterns)
468
+
469
+ def stop_processes(self, *patterns):
470
+ self._do_processes(Process.stop, *patterns)
multivisor/rpc.py ADDED
@@ -0,0 +1,232 @@
1
+ """
2
+ An extension to the standard supervisor RPC interface which subscribes
3
+ to internal supervisor events and dispatches them to 0RPC.
4
+
5
+ disadvantages: it depends on supervisor internal supervisor.events.subscribe
6
+ interface so its usage is quite risky.
7
+ advantages: it avoids creating an eventlistener process just to forward events.
8
+
9
+ The python environment where supervisor runs must have multivisor installed
10
+ """
11
+
12
+ import os
13
+ import queue
14
+ import logging
15
+ import functools
16
+ import threading
17
+
18
+ from gevent import spawn, hub, sleep
19
+ from gevent.queue import Queue
20
+ from six import text_type
21
+ from zerorpc import stream, Server, LostRemote, Context
22
+
23
+ from supervisor.http import NOT_DONE_YET
24
+ from supervisor.rpcinterface import SupervisorNamespaceRPCInterface
25
+ from supervisor.events import subscribe, Event, getEventNameByType
26
+
27
+ # unsubscribe only appears in supervisor > 3.3.4
28
+ try:
29
+ from supervisor.events import unsubscribe
30
+ except:
31
+ unsubscribe = lambda x, y: None
32
+
33
+ from .util import sanitize_url, parse_obj
34
+
35
+ DEFAULT_BIND = "tcp://*:9002"
36
+
37
+
38
+ def sync(klass):
39
+ def wrap_func(meth):
40
+ @functools.wraps(meth)
41
+ def wrapper(*args, **kwargs):
42
+ args[0]._log.debug("0RPC: called {}".format(meth.__name__))
43
+ result = meth(*args, **kwargs)
44
+ if callable(result):
45
+ r = NOT_DONE_YET
46
+ while r is NOT_DONE_YET:
47
+ sleep(0.1)
48
+ r = result()
49
+ result = r
50
+ return result
51
+
52
+ return wrapper
53
+
54
+ for name in dir(klass):
55
+ if name.startswith("_") or name == "event_stream":
56
+ continue
57
+ meth = getattr(klass, name)
58
+ if not callable(meth):
59
+ continue
60
+ setattr(klass, name, wrap_func(meth))
61
+ return klass
62
+
63
+
64
+ # When supervisor is asked to restart, it closes file descriptors
65
+ # from 5..1024. Since we are not able to restart the ZeroRPC server
66
+ # (see https://github.com/0rpc/zerorpc-python/issues/208) this patch
67
+ # prevents supervisor from closing the gevent pipes and 0MQ sockets
68
+ # This is a really agressive move but seems to work until the above
69
+ # bug is solved
70
+ from supervisor.options import ServerOptions
71
+
72
+ ServerOptions.cleanup_fds = lambda options: None
73
+
74
+
75
+ @sync
76
+ class MultivisorNamespaceRPCInterface(SupervisorNamespaceRPCInterface):
77
+ def __init__(self, supervisord, bind):
78
+ SupervisorNamespaceRPCInterface.__init__(self, supervisord)
79
+ self._bind = bind
80
+ self._channel = queue.Queue()
81
+ self._event_channels = set()
82
+ self._server = None
83
+ self._watcher = None
84
+ self._shutting_down = False
85
+ self._log = logging.getLogger("MVRPC")
86
+
87
+ def _start(self):
88
+ subscribe(Event, self._handle_event)
89
+
90
+ def _stop(self):
91
+ unsubscribe(Event, self._handle_event)
92
+ self._shutting_down = True
93
+
94
+ def _shutdown(self):
95
+ # disconnect all streams
96
+ for channel in self._event_channels:
97
+ channel.put(None)
98
+ if self._server is not None:
99
+ self._server.stop()
100
+ self._server.close()
101
+
102
+ def _process_event(self, event):
103
+ if self._shutting_down:
104
+ return
105
+ event_name = getEventNameByType(event.__class__)
106
+ stop_event = event_name == "SUPERVISOR_STATE_CHANGE_STOPPING"
107
+ if stop_event:
108
+ self._log.info("supervisor stopping!")
109
+ self._stop()
110
+ elif event_name.startswith("TICK"):
111
+ return
112
+ try:
113
+ payload_str = text_type(event.payload())
114
+ except AttributeError:
115
+ # old supervisor version
116
+ payload_str = text_type(event)
117
+ payload = dict((x.split(":") for x in payload_str.split()))
118
+ if event_name.startswith("PROCESS_STATE"):
119
+ pname = "{}:{}".format(payload["groupname"], payload["processname"])
120
+ payload[u"process"] = parse_obj(self.getProcessInfo(pname))
121
+ # broadcast the event to clients
122
+ server = self.supervisord.options.identifier
123
+ new_event = {
124
+ u"pool": u"multivisor",
125
+ u"server": text_type(server),
126
+ u"eventname": text_type(event_name),
127
+ u"payload": payload,
128
+ }
129
+ for channel in self._event_channels:
130
+ channel.put(new_event)
131
+ if stop_event:
132
+ self._shutdown()
133
+
134
+ # called on 0RPC server thread
135
+ def _dispatch_event(self):
136
+ while not self._channel.empty():
137
+ event = self._channel.get()
138
+ self._process_event(event)
139
+
140
+ # called on main thread
141
+ def _handle_event(self, event):
142
+ if self._server is None:
143
+ reply = start_rpc_server(self, self._bind)
144
+ if isinstance(reply, Exception):
145
+ self._log.critical("severe 0RPC error: %s", reply)
146
+ self._stop()
147
+ self._shutdown()
148
+ return
149
+ self._server, self._watcher = reply
150
+ self._channel.put(event)
151
+ self._watcher.send()
152
+
153
+ # handle stop synchronously
154
+ event_name = getEventNameByType(event.__class__)
155
+ if event_name == "SUPERVISOR_STATE_CHANGE_STOPPING":
156
+ self._server._stop_event.wait()
157
+ self._server = None
158
+ self._watcher = None
159
+
160
+ @stream
161
+ def event_stream(self):
162
+ self._log.info("client connected to stream")
163
+ channel = Queue()
164
+ self._event_channels.add(channel)
165
+ try:
166
+ yield "First event to trigger connection. Please ignore me!"
167
+ for event in channel:
168
+ if event is None:
169
+ self._log.info("stop: closing client")
170
+ return
171
+ # self._log.info(event)
172
+ yield event
173
+ except LostRemote as e:
174
+ self._log.info("remote end of stream disconnected")
175
+ finally:
176
+ self._event_channels.remove(channel)
177
+
178
+
179
+ class ServerMiddleware(object):
180
+ def server_after_exec(self, request_event, reply_event):
181
+ if reply_event.args:
182
+ reply_event._args = parse_obj(reply_event.args)
183
+
184
+
185
+ def start_rpc_server(multivisor, bind):
186
+ future_server = queue.Queue(1)
187
+ th = threading.Thread(
188
+ target=run_rpc_server, name="RPCServer", args=(multivisor, bind, future_server)
189
+ )
190
+ th.daemon = True
191
+ th.start()
192
+ return future_server.get()
193
+
194
+
195
+ def run_rpc_server(multivisor, bind, future_server):
196
+ multivisor._log.info("0RPC: spawn server on {}...".format(os.getpid()))
197
+ watcher = hub.get_hub().loop.async_()
198
+ stop_event = threading.Event()
199
+ watcher.start(lambda: spawn(multivisor._dispatch_event))
200
+ server = None
201
+ try:
202
+ context = Context()
203
+ context.register_middleware(ServerMiddleware())
204
+ server = Server(multivisor, context=context)
205
+ server._stop_event = stop_event
206
+ server.bind(bind)
207
+ future_server.put((server, watcher))
208
+ multivisor._log.info("0RPC: server running!")
209
+ server.run()
210
+ multivisor._log.info("0RPC: server stopped!")
211
+ except Exception as err:
212
+ future_server.put(err)
213
+ finally:
214
+ watcher.stop()
215
+ del server
216
+ # prevent reusage of this loop because supervisor closes all ports
217
+ # when a restart happens. It actually doesn't help preventing a crash
218
+ hub.get_hub().destroy(destroy_loop=True)
219
+ multivisor._log.info("0RPC: server thread destroyed!")
220
+ stop_event.set()
221
+
222
+
223
+ def make_rpc_interface(supervisord, bind=DEFAULT_BIND):
224
+ # Uncomment following lines to configure python standard logging
225
+ # log_level = logging.INFO
226
+ # log_fmt = '%(asctime)-15s %(levelname)s %(threadName)-8s %(name)s: %(message)s'
227
+ # logging.basicConfig(level=log_level, format=log_fmt)
228
+
229
+ url = sanitize_url(bind, protocol="tcp", host="*", port=9002)
230
+ multivisor = MultivisorNamespaceRPCInterface(supervisord, url["url"])
231
+ multivisor._start()
232
+ return multivisor
File without changes
@@ -0,0 +1 @@
1
+ <!DOCTYPE html><html><head><meta charset=utf-8><meta name=viewport content="width=device-width,initial-scale=1"><title>multivisor</title><link href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700|Material+Icons" rel=stylesheet><link href=/static/css/app.2aff3580a128440bba89b94112292cb1.css rel=stylesheet></head><body><div id=app></div><script type=text/javascript src=/static/js/manifest.2ae2e69a05c33dfc65f8.js></script><script type=text/javascript src=/static/js/vendor.1d02877727062a41e9fb.js></script><script type=text/javascript src=/static/js/app.52791f915c2f060b9cb1.js></script></body></html>