ominfra 0.0.0.dev119__py3-none-any.whl → 0.0.0.dev121__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -161,6 +161,19 @@ def close_fd(fd: int) -> bool:
161
161
  return True
162
162
 
163
163
 
164
+ def is_fd_open(fd: int) -> bool:
165
+ try:
166
+ n = os.dup(fd)
167
+ except OSError:
168
+ return False
169
+ os.close(n)
170
+ return True
171
+
172
+
173
+ def get_open_fds(limit: int) -> ta.FrozenSet[int]:
174
+ return frozenset(filter(is_fd_open, range(limit)))
175
+
176
+
164
177
  def mktempfile(suffix: str, prefix: str, dir: str) -> str: # noqa
165
178
  fd, filename = tempfile.mkstemp(suffix, prefix, dir)
166
179
  os.close(fd)
@@ -5,6 +5,8 @@ import signal
5
5
  import tempfile
6
6
  import typing as ta
7
7
 
8
+ from ..configs import ConfigMapping
9
+ from ..configs import build_config_named_children
8
10
  from .datatypes import byte_size
9
11
  from .datatypes import existing_directory
10
12
  from .datatypes import existing_dirpath
@@ -12,6 +14,9 @@ from .datatypes import logging_level
12
14
  from .datatypes import octal_type
13
15
 
14
16
 
17
+ ##
18
+
19
+
15
20
  @dc.dataclass(frozen=True)
16
21
  class ProcessConfig:
17
22
  name: str
@@ -108,3 +113,19 @@ class ServerConfig:
108
113
  child_logdir=child_logdir if child_logdir else tempfile.gettempdir(),
109
114
  **kwargs,
110
115
  )
116
+
117
+
118
+ ##
119
+
120
+
121
+ def prepare_process_group_config(dct: ConfigMapping) -> ConfigMapping:
122
+ out = dict(dct)
123
+ out['processes'] = build_config_named_children(out.get('processes'))
124
+ return out
125
+
126
+
127
+ def prepare_server_config(dct: ta.Mapping[str, ta.Any]) -> ta.Mapping[str, ta.Any]:
128
+ out = dict(dct)
129
+ group_dcts = build_config_named_children(out.get('groups'))
130
+ out['groups'] = [prepare_process_group_config(group_dct) for group_dct in group_dcts or []]
131
+ return out
@@ -32,17 +32,24 @@ from .types import AbstractServerContext
32
32
  from .types import AbstractSubprocess
33
33
 
34
34
 
35
+ ServerEpoch = ta.NewType('ServerEpoch', int)
36
+
37
+ InheritedFds = ta.NewType('InheritedFds', ta.FrozenSet[int])
38
+
39
+
35
40
  class ServerContext(AbstractServerContext):
36
41
  def __init__(
37
42
  self,
38
43
  config: ServerConfig,
39
44
  *,
40
- epoch: int = 0,
45
+ epoch: ServerEpoch = ServerEpoch(0),
46
+ inherited_fds: ta.Optional[InheritedFds] = None,
41
47
  ) -> None:
42
48
  super().__init__()
43
49
 
44
50
  self._config = config
45
51
  self._epoch = epoch
52
+ self._inherited_fds = InheritedFds(frozenset(inherited_fds or []))
46
53
 
47
54
  self._pid_history: ta.Dict[int, AbstractSubprocess] = {}
48
55
  self._state: SupervisorState = SupervisorStates.RUNNING
@@ -66,7 +73,7 @@ class ServerContext(AbstractServerContext):
66
73
  return self._config
67
74
 
68
75
  @property
69
- def epoch(self) -> int:
76
+ def epoch(self) -> ServerEpoch:
70
77
  return self._epoch
71
78
 
72
79
  @property
@@ -96,6 +103,10 @@ class ServerContext(AbstractServerContext):
96
103
  def gid(self) -> ta.Optional[int]:
97
104
  return self._gid
98
105
 
106
+ @property
107
+ def inherited_fds(self) -> InheritedFds:
108
+ return self._inherited_fds
109
+
99
110
  ##
100
111
 
101
112
  def set_signals(self) -> None:
@@ -13,9 +13,9 @@ from .compat import find_prefix_at_end
13
13
  from .compat import readfd
14
14
  from .compat import strip_escapes
15
15
  from .configs import ProcessConfig
16
+ from .events import EVENT_CALLBACKS
16
17
  from .events import ProcessLogStderrEvent
17
18
  from .events import ProcessLogStdoutEvent
18
- from .events import notify_event
19
19
  from .types import AbstractSubprocess
20
20
 
21
21
 
@@ -207,10 +207,10 @@ class OutputDispatcher(Dispatcher):
207
207
 
208
208
  if self._channel == 'stdout':
209
209
  if self._stdout_events_enabled:
210
- notify_event(ProcessLogStdoutEvent(self._process, self._process.pid, data))
210
+ EVENT_CALLBACKS.notify(ProcessLogStdoutEvent(self._process, self._process.pid, data))
211
211
 
212
212
  elif self._stderr_events_enabled:
213
- notify_event(ProcessLogStderrEvent(self._process, self._process.pid, data))
213
+ EVENT_CALLBACKS.notify(ProcessLogStderrEvent(self._process, self._process.pid, data))
214
214
 
215
215
  def record_output(self):
216
216
  if self._capture_log is None:
@@ -261,7 +261,7 @@ class OutputDispatcher(Dispatcher):
261
261
  channel = self._channel
262
262
  procname = self._process.config.name
263
263
  event = self.event_type(self._process, self._process.pid, data)
264
- notify_event(event)
264
+ EVENT_CALLBACKS.notify(event)
265
265
 
266
266
  log.debug('%r %s emitted a comm event', procname, channel)
267
267
  for handler in self._capture_log.handlers:
@@ -29,12 +29,9 @@ class EventCallbacks:
29
29
 
30
30
  EVENT_CALLBACKS = EventCallbacks()
31
31
 
32
- notify_event = EVENT_CALLBACKS.notify
33
- clear_events = EVENT_CALLBACKS.clear
34
-
35
32
 
36
33
  class Event(abc.ABC): # noqa
37
- """Abstract event type """
34
+ """Abstract event type."""
38
35
 
39
36
 
40
37
  class ProcessLogEvent(Event, abc.ABC):
@@ -114,7 +111,7 @@ class RemoteCommunicationEvent(Event):
114
111
 
115
112
 
116
113
  class SupervisorStateChangeEvent(Event):
117
- """ Abstract class """
114
+ """Abstract class."""
118
115
 
119
116
  def payload(self):
120
117
  return ''
@@ -136,7 +133,7 @@ class EventRejectedEvent: # purposely does not subclass Event
136
133
 
137
134
 
138
135
  class ProcessStateEvent(Event):
139
- """ Abstract class, never raised directly """
136
+ """Abstract class, never raised directly."""
140
137
  frm = None
141
138
  to = None
142
139
 
@@ -225,7 +222,7 @@ class ProcessGroupRemovedEvent(ProcessGroupEvent):
225
222
 
226
223
 
227
224
  class TickEvent(Event):
228
- """ Abstract """
225
+ """Abstract."""
229
226
 
230
227
  def __init__(self, when, supervisord):
231
228
  super().__init__()
@@ -1,19 +1,82 @@
1
1
  #!/usr/bin/env python3
2
2
  # ruff: noqa: UP006 UP007
3
3
  # @omlish-amalg ../scripts/supervisor.py
4
+ import functools
4
5
  import itertools
5
- import json
6
+ import os.path
6
7
  import typing as ta
7
8
 
9
+ from omlish.lite.inject import Injector
10
+ from omlish.lite.inject import InjectorBindingOrBindings
11
+ from omlish.lite.inject import InjectorBindings
12
+ from omlish.lite.inject import inj
8
13
  from omlish.lite.journald import journald_log_handler_factory
9
14
  from omlish.lite.logs import configure_standard_logging
10
- from omlish.lite.marshal import unmarshal_obj
11
15
 
16
+ from ..configs import read_config_file
12
17
  from .compat import ExitNow
18
+ from .compat import get_open_fds
19
+ from .configs import ProcessConfig
20
+ from .configs import ProcessGroupConfig
13
21
  from .configs import ServerConfig
22
+ from .configs import prepare_server_config
23
+ from .context import InheritedFds
14
24
  from .context import ServerContext
25
+ from .context import ServerEpoch
26
+ from .process import ProcessGroup
27
+ from .process import Subprocess
28
+ from .process import SubprocessFactory
15
29
  from .states import SupervisorStates
30
+ from .supervisor import ProcessGroupFactory
16
31
  from .supervisor import Supervisor
32
+ from .types import AbstractServerContext
33
+
34
+
35
+ ##
36
+
37
+
38
+ def build_server_bindings(
39
+ config: ServerConfig,
40
+ *,
41
+ server_epoch: ta.Optional[ServerEpoch] = None,
42
+ inherited_fds: ta.Optional[InheritedFds] = None,
43
+ ) -> InjectorBindings:
44
+ lst: ta.List[InjectorBindingOrBindings] = [
45
+ inj.bind(config),
46
+
47
+ inj.bind(ServerContext, singleton=True),
48
+ inj.bind(AbstractServerContext, to_key=ServerContext),
49
+
50
+ inj.bind(Supervisor, singleton=True),
51
+ ]
52
+
53
+ #
54
+
55
+ def make_process_group_factory(injector: Injector) -> ProcessGroupFactory:
56
+ def inner(group_config: ProcessGroupConfig) -> ProcessGroup:
57
+ return injector.inject(functools.partial(ProcessGroup, group_config))
58
+ return ProcessGroupFactory(inner)
59
+ lst.append(inj.bind(make_process_group_factory))
60
+
61
+ def make_subprocess_factory(injector: Injector) -> SubprocessFactory:
62
+ def inner(process_config: ProcessConfig, group: ProcessGroup) -> Subprocess:
63
+ return injector.inject(functools.partial(Subprocess, process_config, group))
64
+ return SubprocessFactory(inner)
65
+ lst.append(inj.bind(make_subprocess_factory))
66
+
67
+ #
68
+
69
+ if server_epoch is not None:
70
+ lst.append(inj.bind(server_epoch, key=ServerEpoch))
71
+ if inherited_fds is not None:
72
+ lst.append(inj.bind(inherited_fds, key=InheritedFds))
73
+
74
+ #
75
+
76
+ return inj.as_bindings(*lst)
77
+
78
+
79
+ ##
17
80
 
18
81
 
19
82
  def main(
@@ -26,6 +89,7 @@ def main(
26
89
  parser = argparse.ArgumentParser()
27
90
  parser.add_argument('config_file', metavar='config-file')
28
91
  parser.add_argument('--no-journald', action='store_true')
92
+ parser.add_argument('--inherit-initial-fds', action='store_true')
29
93
  args = parser.parse_args(argv)
30
94
 
31
95
  #
@@ -41,20 +105,27 @@ def main(
41
105
 
42
106
  #
43
107
 
108
+ initial_fds: ta.Optional[InheritedFds] = None
109
+ if args.inherit_initial_fds:
110
+ initial_fds = InheritedFds(get_open_fds(0x10000))
111
+
44
112
  # if we hup, restart by making a new Supervisor()
45
113
  for epoch in itertools.count():
46
- with open(cf) as f:
47
- config_src = f.read()
48
-
49
- config_dct = json.loads(config_src)
50
- config: ServerConfig = unmarshal_obj(config_dct, ServerConfig)
114
+ config = read_config_file(
115
+ os.path.expanduser(cf),
116
+ ServerConfig,
117
+ prepare=prepare_server_config,
118
+ )
51
119
 
52
- context = ServerContext(
120
+ injector = inj.create_injector(build_server_bindings(
53
121
  config,
54
- epoch=epoch,
55
- )
122
+ server_epoch=ServerEpoch(epoch),
123
+ inherited_fds=initial_fds,
124
+ ))
125
+
126
+ context = injector.provide(ServerContext)
127
+ supervisor = injector.provide(Supervisor)
56
128
 
57
- supervisor = Supervisor(context)
58
129
  try:
59
130
  supervisor.main()
60
131
  except ExitNow:
@@ -1,7 +1,8 @@
1
1
  # ruff: noqa: UP006 UP007
2
+ import dataclasses as dc
2
3
  import errno
3
4
  import functools
4
- import os
5
+ import os.path
5
6
  import shlex
6
7
  import signal
7
8
  import time
@@ -30,6 +31,7 @@ from .datatypes import RestartUnconditionally
30
31
  from .dispatchers import Dispatcher
31
32
  from .dispatchers import InputDispatcher
32
33
  from .dispatchers import OutputDispatcher
34
+ from .events import EVENT_CALLBACKS
33
35
  from .events import EventRejectedEvent
34
36
  from .events import ProcessCommunicationEvent
35
37
  from .events import ProcessCommunicationStderrEvent
@@ -43,7 +45,6 @@ from .events import ProcessStateStartingEvent
43
45
  from .events import ProcessStateStoppedEvent
44
46
  from .events import ProcessStateStoppingEvent
45
47
  from .events import ProcessStateUnknownEvent
46
- from .events import notify_event
47
48
  from .exceptions import BadCommandError
48
49
  from .exceptions import ProcessError
49
50
  from .states import STOPPED_STATES
@@ -55,6 +56,9 @@ from .types import AbstractServerContext
55
56
  from .types import AbstractSubprocess
56
57
 
57
58
 
59
+ ##
60
+
61
+
58
62
  @functools.total_ordering
59
63
  class Subprocess(AbstractSubprocess):
60
64
  """A class to manage a subprocess."""
@@ -80,7 +84,12 @@ class Subprocess(AbstractSubprocess):
80
84
  spawn_err = None # error message attached by spawn() if any
81
85
  group = None # ProcessGroup instance if process is in the group
82
86
 
83
- def __init__(self, config: ProcessConfig, group: 'ProcessGroup', context: AbstractServerContext) -> None:
87
+ def __init__(
88
+ self,
89
+ config: ProcessConfig,
90
+ group: 'ProcessGroup',
91
+ context: AbstractServerContext,
92
+ ) -> None:
84
93
  super().__init__()
85
94
  self._config = config
86
95
  self.group = group
@@ -207,7 +216,7 @@ class Subprocess(AbstractSubprocess):
207
216
  event_class = self.event_map.get(new_state)
208
217
  if event_class is not None:
209
218
  event = event_class(self, old_state, expected)
210
- notify_event(event)
219
+ EVENT_CALLBACKS.notify(event)
211
220
 
212
221
  return True
213
222
 
@@ -324,7 +333,10 @@ class Subprocess(AbstractSubprocess):
324
333
  os.dup2(self._pipes['child_stdout'], 2)
325
334
  else:
326
335
  os.dup2(self._pipes['child_stderr'], 2)
336
+
327
337
  for i in range(3, self.context.config.minfds):
338
+ if i in self.context.inherited_fds:
339
+ continue
328
340
  close_fd(i)
329
341
 
330
342
  def _spawn_as_child(self, filename: str, argv: ta.Sequence[str]) -> None:
@@ -359,7 +371,7 @@ class Subprocess(AbstractSubprocess):
359
371
  cwd = self.config.directory
360
372
  try:
361
373
  if cwd is not None:
362
- os.chdir(cwd)
374
+ os.chdir(os.path.expanduser(cwd))
363
375
  except OSError as why:
364
376
  code = errno.errorcode.get(why.args[0], why.args[0])
365
377
  msg = f"couldn't chdir to {cwd}: {code}\n"
@@ -415,7 +427,7 @@ class Subprocess(AbstractSubprocess):
415
427
  return self.kill(self.config.stopsignal)
416
428
 
417
429
  def stop_report(self) -> None:
418
- """ Log a 'waiting for x to stop' message with throttling. """
430
+ """Log a 'waiting for x to stop' message with throttling."""
419
431
  if self.state == ProcessStates.STOPPING:
420
432
  now = time.time()
421
433
 
@@ -545,7 +557,7 @@ class Subprocess(AbstractSubprocess):
545
557
  return None
546
558
 
547
559
  def finish(self, sts: int) -> None:
548
- """ The process was reaped and we need to report and manage its state """
560
+ """The process was reaped and we need to report and manage its state."""
549
561
 
550
562
  self.drain()
551
563
 
@@ -626,7 +638,7 @@ class Subprocess(AbstractSubprocess):
626
638
  # system that this event was rejected so it can be processed again.
627
639
  if self.event is not None:
628
640
  # Note: this should only be true if we were in the BUSY state when finish() was called.
629
- notify_event(EventRejectedEvent(self, self.event)) # type: ignore
641
+ EVENT_CALLBACKS.notify(EventRejectedEvent(self, self.event)) # type: ignore
630
642
  self.event = None
631
643
 
632
644
  def set_uid(self) -> ta.Optional[str]:
@@ -718,15 +730,39 @@ class Subprocess(AbstractSubprocess):
718
730
  pass
719
731
 
720
732
 
733
+ ##
734
+
735
+
736
+ @dc.dataclass(frozen=True)
737
+ class SubprocessFactory:
738
+ fn: ta.Callable[[ProcessConfig, 'ProcessGroup'], Subprocess]
739
+
740
+ def __call__(self, config: ProcessConfig, group: 'ProcessGroup') -> Subprocess:
741
+ return self.fn(config, group)
742
+
743
+
721
744
  @functools.total_ordering
722
745
  class ProcessGroup:
723
- def __init__(self, config: ProcessGroupConfig, context: ServerContext):
746
+ def __init__(
747
+ self,
748
+ config: ProcessGroupConfig,
749
+ context: ServerContext,
750
+ *,
751
+ subprocess_factory: ta.Optional[SubprocessFactory] = None,
752
+ ):
724
753
  super().__init__()
725
754
  self.config = config
726
755
  self.context = context
756
+
757
+ if subprocess_factory is None:
758
+ def make_subprocess(config: ProcessConfig, group: ProcessGroup) -> Subprocess:
759
+ return Subprocess(config, group, self.context)
760
+ subprocess_factory = SubprocessFactory(make_subprocess)
761
+ self._subprocess_factory = subprocess_factory
762
+
727
763
  self.processes = {}
728
764
  for pconfig in self.config.processes or []:
729
- process = Subprocess(pconfig, self, self.context)
765
+ process = self._subprocess_factory(pconfig, self)
730
766
  self.processes[pconfig.name] = process
731
767
 
732
768
  def __lt__(self, other):