ominfra 0.0.0.dev128__py3-none-any.whl → 0.0.0.dev129__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- ominfra/deploy/_executor.py +24 -0
- ominfra/pyremote/_runcommands.py +24 -0
- ominfra/scripts/journald2aws.py +24 -0
- ominfra/scripts/supervisor.py +543 -440
- ominfra/supervisor/dispatchers.py +3 -3
- ominfra/supervisor/dispatchersimpl.py +17 -11
- ominfra/supervisor/groupsimpl.py +2 -2
- ominfra/supervisor/inject.py +17 -12
- ominfra/supervisor/io.py +82 -0
- ominfra/supervisor/main.py +5 -6
- ominfra/supervisor/processimpl.py +7 -14
- ominfra/supervisor/signals.py +66 -0
- ominfra/supervisor/spawningimpl.py +9 -9
- ominfra/supervisor/supervisor.py +65 -135
- ominfra/supervisor/types.py +38 -25
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev129.dist-info}/METADATA +3 -3
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev129.dist-info}/RECORD +21 -20
- ominfra/supervisor/context.py +0 -80
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev129.dist-info}/LICENSE +0 -0
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev129.dist-info}/WHEEL +0 -0
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev129.dist-info}/entry_points.txt +0 -0
- {ominfra-0.0.0.dev128.dist-info → ominfra-0.0.0.dev129.dist-info}/top_level.txt +0 -0
ominfra/supervisor/supervisor.py
CHANGED
@@ -1,40 +1,38 @@
|
|
1
1
|
# ruff: noqa: UP006 UP007
|
2
|
-
import
|
2
|
+
import errno
|
3
|
+
import os
|
3
4
|
import time
|
4
5
|
import typing as ta
|
5
6
|
|
6
7
|
from omlish.lite.check import check_isinstance
|
7
|
-
from omlish.lite.check import check_not_none
|
8
8
|
from omlish.lite.logs import log
|
9
9
|
from omlish.lite.typing import Func1
|
10
10
|
|
11
11
|
from .configs import ProcessGroupConfig
|
12
|
-
from .
|
13
|
-
from .dispatchers import Dispatchers
|
12
|
+
from .configs import ServerConfig
|
14
13
|
from .events import TICK_EVENTS
|
15
14
|
from .events import EventCallbacks
|
16
15
|
from .events import SupervisorRunningEvent
|
17
16
|
from .events import SupervisorStoppingEvent
|
18
17
|
from .groups import ProcessGroup
|
19
18
|
from .groups import ProcessGroupManager
|
19
|
+
from .io import IoManager
|
20
20
|
from .poller import Poller
|
21
21
|
from .process import PidHistory
|
22
22
|
from .setup import SupervisorSetup
|
23
|
+
from .signals import SignalHandler
|
23
24
|
from .states import SupervisorState
|
24
|
-
from .types import
|
25
|
+
from .types import ExitNow
|
25
26
|
from .types import Process
|
27
|
+
from .types import SupervisorStateManager
|
26
28
|
from .utils.os import decode_wait_status
|
27
|
-
from .utils.
|
28
|
-
from .utils.
|
29
|
+
from .utils.ostypes import Pid
|
30
|
+
from .utils.ostypes import Rc
|
29
31
|
|
30
32
|
|
31
33
|
##
|
32
34
|
|
33
35
|
|
34
|
-
class ExitNow(Exception): # noqa
|
35
|
-
pass
|
36
|
-
|
37
|
-
|
38
36
|
def timeslice(period: int, when: float) -> int:
|
39
37
|
return int(when - (when % period))
|
40
38
|
|
@@ -42,59 +40,18 @@ def timeslice(period: int, when: float) -> int:
|
|
42
40
|
##
|
43
41
|
|
44
42
|
|
45
|
-
class
|
46
|
-
def __init__(
|
47
|
-
self,
|
48
|
-
*,
|
49
|
-
context: ServerContextImpl,
|
50
|
-
signal_receiver: SignalReceiver,
|
51
|
-
process_groups: ProcessGroupManager,
|
52
|
-
) -> None:
|
43
|
+
class SupervisorStateManagerImpl(SupervisorStateManager):
|
44
|
+
def __init__(self) -> None:
|
53
45
|
super().__init__()
|
54
46
|
|
55
|
-
self.
|
56
|
-
self._signal_receiver = signal_receiver
|
57
|
-
self._process_groups = process_groups
|
58
|
-
|
59
|
-
def set_signals(self) -> None:
|
60
|
-
self._signal_receiver.install(
|
61
|
-
signal.SIGTERM,
|
62
|
-
signal.SIGINT,
|
63
|
-
signal.SIGQUIT,
|
64
|
-
signal.SIGHUP,
|
65
|
-
signal.SIGCHLD,
|
66
|
-
signal.SIGUSR2,
|
67
|
-
)
|
68
|
-
|
69
|
-
def handle_signals(self) -> None:
|
70
|
-
sig = self._signal_receiver.get_signal()
|
71
|
-
if not sig:
|
72
|
-
return
|
73
|
-
|
74
|
-
if sig in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
|
75
|
-
log.warning('received %s indicating exit request', sig_name(sig))
|
76
|
-
self._context.set_state(SupervisorState.SHUTDOWN)
|
77
|
-
|
78
|
-
elif sig == signal.SIGHUP:
|
79
|
-
if self._context.state == SupervisorState.SHUTDOWN:
|
80
|
-
log.warning('ignored %s indicating restart request (shutdown in progress)', sig_name(sig)) # noqa
|
81
|
-
else:
|
82
|
-
log.warning('received %s indicating restart request', sig_name(sig)) # noqa
|
83
|
-
self._context.set_state(SupervisorState.RESTARTING)
|
47
|
+
self._state: SupervisorState = SupervisorState.RUNNING
|
84
48
|
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
elif sig == signal.SIGUSR2:
|
89
|
-
log.info('received %s indicating log reopen request', sig_name(sig))
|
90
|
-
|
91
|
-
for p in self._process_groups.all_processes():
|
92
|
-
for d in p.get_dispatchers():
|
93
|
-
if isinstance(d, OutputDispatcher):
|
94
|
-
d.reopen_logs()
|
49
|
+
@property
|
50
|
+
def state(self) -> SupervisorState:
|
51
|
+
return self._state
|
95
52
|
|
96
|
-
|
97
|
-
|
53
|
+
def set_state(self, state: SupervisorState) -> None:
|
54
|
+
self._state = state
|
98
55
|
|
99
56
|
|
100
57
|
##
|
@@ -108,7 +65,7 @@ class Supervisor:
|
|
108
65
|
def __init__(
|
109
66
|
self,
|
110
67
|
*,
|
111
|
-
|
68
|
+
config: ServerConfig,
|
112
69
|
poller: Poller,
|
113
70
|
process_groups: ProcessGroupManager,
|
114
71
|
signal_handler: SignalHandler,
|
@@ -116,10 +73,12 @@ class Supervisor:
|
|
116
73
|
process_group_factory: ProcessGroupFactory,
|
117
74
|
pid_history: PidHistory,
|
118
75
|
setup: SupervisorSetup,
|
76
|
+
states: SupervisorStateManager,
|
77
|
+
io: IoManager,
|
119
78
|
) -> None:
|
120
79
|
super().__init__()
|
121
80
|
|
122
|
-
self.
|
81
|
+
self._config = config
|
123
82
|
self._poller = poller
|
124
83
|
self._process_groups = process_groups
|
125
84
|
self._signal_handler = signal_handler
|
@@ -127,6 +86,8 @@ class Supervisor:
|
|
127
86
|
self._process_group_factory = process_group_factory
|
128
87
|
self._pid_history = pid_history
|
129
88
|
self._setup = setup
|
89
|
+
self._states = states
|
90
|
+
self._io = io
|
130
91
|
|
131
92
|
self._ticks: ta.Dict[int, float] = {}
|
132
93
|
self._stop_groups: ta.Optional[ta.List[ProcessGroup]] = None # list used for priority ordered shutdown
|
@@ -136,11 +97,8 @@ class Supervisor:
|
|
136
97
|
#
|
137
98
|
|
138
99
|
@property
|
139
|
-
def
|
140
|
-
return self.
|
141
|
-
|
142
|
-
def get_state(self) -> SupervisorState:
|
143
|
-
return self._context.state
|
100
|
+
def state(self) -> SupervisorState:
|
101
|
+
return self._states.state
|
144
102
|
|
145
103
|
#
|
146
104
|
|
@@ -181,7 +139,7 @@ class Supervisor:
|
|
181
139
|
log.info('waiting for %s to die', namestr)
|
182
140
|
self._last_shutdown_report = now
|
183
141
|
for proc in unstopped:
|
184
|
-
log.debug('%s state: %s', proc.config.name, proc.
|
142
|
+
log.debug('%s state: %s', proc.config.name, proc.state.name)
|
185
143
|
|
186
144
|
return unstopped
|
187
145
|
|
@@ -205,7 +163,7 @@ class Supervisor:
|
|
205
163
|
self._event_callbacks.clear()
|
206
164
|
|
207
165
|
try:
|
208
|
-
for config in self.
|
166
|
+
for config in self._config.groups or []:
|
209
167
|
self.add_process_group(config)
|
210
168
|
|
211
169
|
self._signal_handler.set_signals()
|
@@ -229,7 +187,7 @@ class Supervisor:
|
|
229
187
|
self._signal_handler.handle_signals()
|
230
188
|
self._tick()
|
231
189
|
|
232
|
-
if self.
|
190
|
+
if self._states.state < SupervisorState.RUNNING:
|
233
191
|
self._ordered_stop_groups_phase_2()
|
234
192
|
|
235
193
|
def _ordered_stop_groups_phase_1(self) -> None:
|
@@ -248,20 +206,11 @@ class Supervisor:
|
|
248
206
|
# down, so push it back on to the end of the stop group queue
|
249
207
|
self._stop_groups.append(group)
|
250
208
|
|
251
|
-
def get_dispatchers(self) -> Dispatchers:
|
252
|
-
return Dispatchers(
|
253
|
-
d
|
254
|
-
for p in self._process_groups.all_processes()
|
255
|
-
for d in p.get_dispatchers()
|
256
|
-
)
|
257
|
-
|
258
209
|
def _poll(self) -> None:
|
259
|
-
dispatchers = self.get_dispatchers()
|
260
|
-
|
261
210
|
sorted_groups = list(self._process_groups)
|
262
211
|
sorted_groups.sort()
|
263
212
|
|
264
|
-
if self.
|
213
|
+
if self._states.state < SupervisorState.RUNNING:
|
265
214
|
if not self._stopping:
|
266
215
|
# first time, set the stopping flag, do a notification and set stop_groups
|
267
216
|
self._stopping = True
|
@@ -274,54 +223,7 @@ class Supervisor:
|
|
274
223
|
# if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
|
275
224
|
raise ExitNow
|
276
225
|
|
277
|
-
|
278
|
-
if dispatcher.readable():
|
279
|
-
self._poller.register_readable(fd)
|
280
|
-
if dispatcher.writable():
|
281
|
-
self._poller.register_writable(fd)
|
282
|
-
|
283
|
-
timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
|
284
|
-
r, w = self._poller.poll(timeout)
|
285
|
-
|
286
|
-
for fd in r:
|
287
|
-
if fd in dispatchers:
|
288
|
-
try:
|
289
|
-
dispatcher = dispatchers[fd]
|
290
|
-
log.debug('read event caused by %r', dispatcher)
|
291
|
-
dispatcher.handle_read_event()
|
292
|
-
if not dispatcher.readable():
|
293
|
-
self._poller.unregister_readable(fd)
|
294
|
-
except ExitNow:
|
295
|
-
raise
|
296
|
-
except Exception: # noqa
|
297
|
-
dispatchers[fd].handle_error()
|
298
|
-
else:
|
299
|
-
# if the fd is not in combined map, we should unregister it. otherwise, it will be polled every
|
300
|
-
# time, which may cause 100% cpu usage
|
301
|
-
log.debug('unexpected read event from fd %r', fd)
|
302
|
-
try:
|
303
|
-
self._poller.unregister_readable(fd)
|
304
|
-
except Exception: # noqa
|
305
|
-
pass
|
306
|
-
|
307
|
-
for fd in w:
|
308
|
-
if fd in dispatchers:
|
309
|
-
try:
|
310
|
-
dispatcher = dispatchers[fd]
|
311
|
-
log.debug('write event caused by %r', dispatcher)
|
312
|
-
dispatcher.handle_write_event()
|
313
|
-
if not dispatcher.writable():
|
314
|
-
self._poller.unregister_writable(fd)
|
315
|
-
except ExitNow:
|
316
|
-
raise
|
317
|
-
except Exception: # noqa
|
318
|
-
dispatchers[fd].handle_error()
|
319
|
-
else:
|
320
|
-
log.debug('unexpected write event from fd %r', fd)
|
321
|
-
try:
|
322
|
-
self._poller.unregister_writable(fd)
|
323
|
-
except Exception: # noqa
|
324
|
-
pass
|
226
|
+
self._io.poll()
|
325
227
|
|
326
228
|
for group in sorted_groups:
|
327
229
|
for process in group:
|
@@ -331,17 +233,17 @@ class Supervisor:
|
|
331
233
|
if depth >= 100:
|
332
234
|
return
|
333
235
|
|
334
|
-
|
335
|
-
if not pid:
|
236
|
+
wp = waitpid()
|
237
|
+
if wp is None or not wp.pid:
|
336
238
|
return
|
337
239
|
|
338
|
-
process = self._pid_history.get(pid, None)
|
240
|
+
process = self._pid_history.get(wp.pid, None)
|
339
241
|
if process is None:
|
340
|
-
_, msg = decode_wait_status(
|
341
|
-
log.info('reaped unknown pid %s (%s)', pid, msg)
|
242
|
+
_, msg = decode_wait_status(wp.sts)
|
243
|
+
log.info('reaped unknown pid %s (%s)', wp.pid, msg)
|
342
244
|
else:
|
343
|
-
process.finish(
|
344
|
-
del self._pid_history[pid]
|
245
|
+
process.finish(wp.sts)
|
246
|
+
del self._pid_history[wp.pid]
|
345
247
|
|
346
248
|
if not once:
|
347
249
|
# keep reaping until no more kids to reap, but don't recurse infinitely
|
@@ -366,3 +268,31 @@ class Supervisor:
|
|
366
268
|
if this_tick != last_tick:
|
367
269
|
self._ticks[period] = this_tick
|
368
270
|
self._event_callbacks.notify(event(this_tick, self))
|
271
|
+
|
272
|
+
|
273
|
+
##
|
274
|
+
|
275
|
+
|
276
|
+
class WaitedPid(ta.NamedTuple):
|
277
|
+
pid: Pid
|
278
|
+
sts: Rc
|
279
|
+
|
280
|
+
|
281
|
+
def waitpid() -> ta.Optional[WaitedPid]:
|
282
|
+
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
283
|
+
# still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
|
284
|
+
# waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
|
285
|
+
# normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
|
286
|
+
# call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
|
287
|
+
# lying around.
|
288
|
+
try:
|
289
|
+
pid, sts = os.waitpid(-1, os.WNOHANG)
|
290
|
+
except OSError as exc:
|
291
|
+
code = exc.args[0]
|
292
|
+
if code not in (errno.ECHILD, errno.EINTR):
|
293
|
+
log.critical('waitpid error %r; a process may not be cleaned up properly', code)
|
294
|
+
if code == errno.EINTR:
|
295
|
+
log.debug('EINTR during reap')
|
296
|
+
return None
|
297
|
+
else:
|
298
|
+
return WaitedPid(pid, sts) # type: ignore
|
ominfra/supervisor/types.py
CHANGED
@@ -5,7 +5,6 @@ import typing as ta
|
|
5
5
|
|
6
6
|
from .configs import ProcessConfig
|
7
7
|
from .configs import ProcessGroupConfig
|
8
|
-
from .configs import ServerConfig
|
9
8
|
from .states import ProcessState
|
10
9
|
from .states import SupervisorState
|
11
10
|
from .utils.collections import KeyedCollectionAccessors
|
@@ -21,6 +20,10 @@ if ta.TYPE_CHECKING:
|
|
21
20
|
##
|
22
21
|
|
23
22
|
|
23
|
+
class ExitNow(Exception): # noqa
|
24
|
+
pass
|
25
|
+
|
26
|
+
|
24
27
|
ServerEpoch = ta.NewType('ServerEpoch', int)
|
25
28
|
|
26
29
|
|
@@ -44,12 +47,7 @@ class ConfigPriorityOrdered(abc.ABC):
|
|
44
47
|
##
|
45
48
|
|
46
49
|
|
47
|
-
class
|
48
|
-
@property
|
49
|
-
@abc.abstractmethod
|
50
|
-
def config(self) -> ServerConfig:
|
51
|
-
raise NotImplementedError
|
52
|
-
|
50
|
+
class SupervisorStateManager(abc.ABC):
|
53
51
|
@property
|
54
52
|
@abc.abstractmethod
|
55
53
|
def state(self) -> SupervisorState:
|
@@ -64,11 +62,6 @@ class ServerContext(abc.ABC):
|
|
64
62
|
|
65
63
|
|
66
64
|
class Dispatcher(abc.ABC):
|
67
|
-
@property
|
68
|
-
@abc.abstractmethod
|
69
|
-
def process(self) -> 'Process':
|
70
|
-
raise NotImplementedError
|
71
|
-
|
72
65
|
@property
|
73
66
|
@abc.abstractmethod
|
74
67
|
def channel(self) -> str:
|
@@ -112,8 +105,32 @@ class Dispatcher(abc.ABC):
|
|
112
105
|
def handle_write_event(self) -> None:
|
113
106
|
raise TypeError
|
114
107
|
|
108
|
+
#
|
109
|
+
|
110
|
+
def handle_connect(self) -> None:
|
111
|
+
raise TypeError
|
112
|
+
|
113
|
+
def handle_close(self) -> None:
|
114
|
+
raise TypeError
|
115
|
+
|
116
|
+
def handle_accepted(self, sock, addr) -> None:
|
117
|
+
raise TypeError
|
118
|
+
|
119
|
+
|
120
|
+
class HasDispatchers(abc.ABC):
|
121
|
+
@abc.abstractmethod
|
122
|
+
def get_dispatchers(self) -> 'Dispatchers':
|
123
|
+
raise NotImplementedError
|
124
|
+
|
115
125
|
|
116
|
-
class
|
126
|
+
class ProcessDispatcher(Dispatcher, abc.ABC):
|
127
|
+
@property
|
128
|
+
@abc.abstractmethod
|
129
|
+
def process(self) -> 'Process':
|
130
|
+
raise NotImplementedError
|
131
|
+
|
132
|
+
|
133
|
+
class ProcessOutputDispatcher(ProcessDispatcher, abc.ABC):
|
117
134
|
@abc.abstractmethod
|
118
135
|
def remove_logs(self) -> None:
|
119
136
|
raise NotImplementedError
|
@@ -123,7 +140,7 @@ class OutputDispatcher(Dispatcher, abc.ABC):
|
|
123
140
|
raise NotImplementedError
|
124
141
|
|
125
142
|
|
126
|
-
class
|
143
|
+
class ProcessInputDispatcher(ProcessDispatcher, abc.ABC):
|
127
144
|
@abc.abstractmethod
|
128
145
|
def write(self, chars: ta.Union[bytes, str]) -> None:
|
129
146
|
raise NotImplementedError
|
@@ -136,7 +153,11 @@ class InputDispatcher(Dispatcher, abc.ABC):
|
|
136
153
|
##
|
137
154
|
|
138
155
|
|
139
|
-
class Process(
|
156
|
+
class Process(
|
157
|
+
ConfigPriorityOrdered,
|
158
|
+
HasDispatchers,
|
159
|
+
abc.ABC,
|
160
|
+
):
|
140
161
|
@property
|
141
162
|
@abc.abstractmethod
|
142
163
|
def name(self) -> str:
|
@@ -159,11 +180,6 @@ class Process(ConfigPriorityOrdered, abc.ABC):
|
|
159
180
|
|
160
181
|
#
|
161
182
|
|
162
|
-
@property
|
163
|
-
@abc.abstractmethod
|
164
|
-
def context(self) -> ServerContext:
|
165
|
-
raise NotImplementedError
|
166
|
-
|
167
183
|
@abc.abstractmethod
|
168
184
|
def finish(self, sts: Rc) -> None:
|
169
185
|
raise NotImplementedError
|
@@ -180,18 +196,15 @@ class Process(ConfigPriorityOrdered, abc.ABC):
|
|
180
196
|
def transition(self) -> None:
|
181
197
|
raise NotImplementedError
|
182
198
|
|
199
|
+
@property
|
183
200
|
@abc.abstractmethod
|
184
|
-
def
|
201
|
+
def state(self) -> ProcessState:
|
185
202
|
raise NotImplementedError
|
186
203
|
|
187
204
|
@abc.abstractmethod
|
188
205
|
def after_setuid(self) -> None:
|
189
206
|
raise NotImplementedError
|
190
207
|
|
191
|
-
@abc.abstractmethod
|
192
|
-
def get_dispatchers(self) -> 'Dispatchers':
|
193
|
-
raise NotImplementedError
|
194
|
-
|
195
208
|
|
196
209
|
##
|
197
210
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ominfra
|
3
|
-
Version: 0.0.0.
|
3
|
+
Version: 0.0.0.dev129
|
4
4
|
Summary: ominfra
|
5
5
|
Author: wrmsr
|
6
6
|
License: BSD-3-Clause
|
@@ -12,8 +12,8 @@ Classifier: Operating System :: OS Independent
|
|
12
12
|
Classifier: Operating System :: POSIX
|
13
13
|
Requires-Python: >=3.12
|
14
14
|
License-File: LICENSE
|
15
|
-
Requires-Dist: omdev==0.0.0.
|
16
|
-
Requires-Dist: omlish==0.0.0.
|
15
|
+
Requires-Dist: omdev==0.0.0.dev129
|
16
|
+
Requires-Dist: omlish==0.0.0.dev129
|
17
17
|
Provides-Extra: all
|
18
18
|
Requires-Dist: paramiko~=3.5; extra == "all"
|
19
19
|
Requires-Dist: asyncssh~=2.18; extra == "all"
|
@@ -22,7 +22,7 @@ ominfra/clouds/aws/journald2aws/poster.py,sha256=hz1XuctW8GtLmfjhRvCFY6py52D4BzX
|
|
22
22
|
ominfra/clouds/gcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
23
|
ominfra/clouds/gcp/auth.py,sha256=3PyfRJNgajjMqJFem3SKui0CqGeHEsZlvbRhuxFcZG8,1348
|
24
24
|
ominfra/deploy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
25
|
-
ominfra/deploy/_executor.py,sha256=
|
25
|
+
ominfra/deploy/_executor.py,sha256=1DQxeHJjxtRGxdFO7PiPhzxwiJlm8K2BelJfLXb_OxU,35079
|
26
26
|
ominfra/deploy/configs.py,sha256=qi0kwT7G2NH7dXLOQic-u6R3yeadup_QtvrjwWIggbM,435
|
27
27
|
ominfra/deploy/remote.py,sha256=6ACmpXU1uBdyGs3Xsp97ktKFq30cJlzN9LRWNUWlGY4,2144
|
28
28
|
ominfra/deploy/executor/__init__.py,sha256=Y3l4WY4JRi2uLG6kgbGp93fuGfkxkKwZDvhsa0Rwgtk,15
|
@@ -56,37 +56,38 @@ ominfra/journald/tailer.py,sha256=5abcFMfgi7fnY9ZEQe2ZVobaJxjQkeu6d9Kagw33a1w,33
|
|
56
56
|
ominfra/manage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
57
57
|
ominfra/manage/manage.py,sha256=BttL8LFEknHZE_h2Pt5dAqbfUkv6qy43WI0raXBZ1a8,151
|
58
58
|
ominfra/pyremote/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
59
|
-
ominfra/pyremote/_runcommands.py,sha256=
|
59
|
+
ominfra/pyremote/_runcommands.py,sha256=bzN-253qyoKwS-5OIqPmfhxVJdt4HtrnspeUbpKKPWM,28915
|
60
60
|
ominfra/pyremote/bootstrap.py,sha256=RvMO3YGaN1E4sgUi1JEtiPak8cjvqtc_vRCq1yqbeZg,3370
|
61
61
|
ominfra/pyremote/runcommands.py,sha256=bviS0_TDIoZVAe4h-_iavbvJtVSFu8lnk7fQ5iasCWE,1571
|
62
62
|
ominfra/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
63
|
-
ominfra/scripts/journald2aws.py,sha256=
|
64
|
-
ominfra/scripts/supervisor.py,sha256=
|
63
|
+
ominfra/scripts/journald2aws.py,sha256=1fqTSLiZE3BCCxdyj5QMCWGHN4leS5INORaDuKyRm_A,128964
|
64
|
+
ominfra/scripts/supervisor.py,sha256=y84cLJj98qvyHiwWf0bDly2Rwexvc9QaT0qrQx06lGc,224746
|
65
65
|
ominfra/supervisor/LICENSE.txt,sha256=yvqaMNsDhWxziHa9ien6qCW1SkZv-DQlAg96XjfSee8,1746
|
66
66
|
ominfra/supervisor/__init__.py,sha256=Y3l4WY4JRi2uLG6kgbGp93fuGfkxkKwZDvhsa0Rwgtk,15
|
67
67
|
ominfra/supervisor/__main__.py,sha256=I0yFw-C08OOiZ3BF6lF1Oiv789EQXu-_j6whDhQUTEA,66
|
68
68
|
ominfra/supervisor/configs.py,sha256=AhBlbifwDXc0acEhcbdv9jphJL-SBFODFDAWDVckzAE,3945
|
69
|
-
ominfra/supervisor/
|
70
|
-
ominfra/supervisor/
|
71
|
-
ominfra/supervisor/dispatchersimpl.py,sha256=t1VFcofj1kTH1q13Z-S1OUTXixPwgSqJkh5A5IkHKPA,10956
|
69
|
+
ominfra/supervisor/dispatchers.py,sha256=8yReui-dVPfvqYrHLsCU7M0siluOs1p5Up7Ag4nMAeI,991
|
70
|
+
ominfra/supervisor/dispatchersimpl.py,sha256=EZYxK_RbOkjMv4N3pjH6-_fBhXDmcxFhEyZEgEAbtUk,11294
|
72
71
|
ominfra/supervisor/events.py,sha256=w3HQFrq-SuroYWoQfNFYeU1phnTvHTgsAqA6TGtAafI,6593
|
73
72
|
ominfra/supervisor/exceptions.py,sha256=Qbu211H3CLlSmi9LsSikOwrcL5HgJP9ugvcKWlGTAoI,750
|
74
73
|
ominfra/supervisor/groups.py,sha256=g5Zp_lkVhn1FSe6GSEPbaELincG5a46ctv1xpB-WmnQ,2163
|
75
|
-
ominfra/supervisor/groupsimpl.py,sha256=
|
76
|
-
ominfra/supervisor/inject.py,sha256=
|
77
|
-
ominfra/supervisor/
|
74
|
+
ominfra/supervisor/groupsimpl.py,sha256=nIrW4SmB0W6c2jOR_HhkfVcH4eHyLZnG1FJ0MCzc6mQ,2292
|
75
|
+
ominfra/supervisor/inject.py,sha256=J1XGUkgCuaD-MPASso-2sbTSoVuDDIhcGbazQLVjajY,3217
|
76
|
+
ominfra/supervisor/io.py,sha256=iYYqQO0Hu1FIYzYXdSGHEIHYW-tGm4SCWky_ZTHaKWY,2727
|
77
|
+
ominfra/supervisor/main.py,sha256=ebe7skFPfwXV2meMVRndhuLZmz-LiuHH1x1CgiarR0o,4132
|
78
78
|
ominfra/supervisor/pipes.py,sha256=XrJ9lD04tPdzZD3xhhYKxpBKHWhZ0Ii315E78bgj7ws,2233
|
79
79
|
ominfra/supervisor/poller.py,sha256=LnQVttPCm8a1UtnDvsho6zLw8NP-2_2VUiNM-d0w_FU,7776
|
80
80
|
ominfra/supervisor/privileges.py,sha256=bO7rJGT7cMOBALK_4D4NiQnOS5dOYb14Sz66R-ymG24,2071
|
81
81
|
ominfra/supervisor/process.py,sha256=UaubVxsxVqDnbuWVpTH0DTGbJGLO0vGJ9mNcvy2kCXM,217
|
82
|
-
ominfra/supervisor/processimpl.py,sha256=
|
82
|
+
ominfra/supervisor/processimpl.py,sha256=bFhf9RqAB_yNlMM5dM1_4m964T9m3vKThEv3n7OYQN4,18684
|
83
83
|
ominfra/supervisor/setup.py,sha256=7HwwwI-WT_Z0WjZ9_l5Orr4K298nKKhQ1f_ZgGsi9TU,622
|
84
84
|
ominfra/supervisor/setupimpl.py,sha256=S_YgCH3XzLsFIAriJROvDMUDh7OzVVJoxzEzCkbb4g4,9648
|
85
|
+
ominfra/supervisor/signals.py,sha256=jY52naUifcAjd6nICTP1ZW3IQSPsHB4cvbsJo8_QV_U,2196
|
85
86
|
ominfra/supervisor/spawning.py,sha256=i1k3tmqWyU-KIN7kel-JVxTVGnLiTIVmZzlstJSZpjM,622
|
86
|
-
ominfra/supervisor/spawningimpl.py,sha256=
|
87
|
+
ominfra/supervisor/spawningimpl.py,sha256=E702F10TexP5-V6i97U6ysiBcTKyLSPzQoJZtedUjQs,11140
|
87
88
|
ominfra/supervisor/states.py,sha256=9yoNOSwalRcKEnCP9zG6tVS0oivo5tCeuH6AaaW7Jpc,890
|
88
|
-
ominfra/supervisor/supervisor.py,sha256=
|
89
|
-
ominfra/supervisor/types.py,sha256=
|
89
|
+
ominfra/supervisor/supervisor.py,sha256=bLl4cHu6G4Kr_nDAr6Q6jG6g5oRWIx0rQaPHGVG0WMQ,9359
|
90
|
+
ominfra/supervisor/types.py,sha256=TZYvdX_3XQ82PgpOOJF2PT5VMlOD9zqMVZhB-ZofTRI,4891
|
90
91
|
ominfra/supervisor/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
91
92
|
ominfra/supervisor/utils/collections.py,sha256=vcfmVYS4QngMdtEI1DvdRIcubmy55Wj40NCzW27_rIY,1361
|
92
93
|
ominfra/supervisor/utils/diag.py,sha256=ujz4gkW7p3wmbaKFM8Hz5eHEwpoUkbB8JeDvcHilCz0,705
|
@@ -102,9 +103,9 @@ ominfra/tailscale/api.py,sha256=C5-t_b6jZXUWcy5k8bXm7CFnk73pSdrlMOgGDeGVrpw,1370
|
|
102
103
|
ominfra/tailscale/cli.py,sha256=DSGp4hn5xwOW-l_u_InKlSF6kIobxtUtVssf_73STs0,3567
|
103
104
|
ominfra/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
104
105
|
ominfra/tools/listresources.py,sha256=4qVg5txsb10EHhvqXXeM6gJ2jx9LbroEnPydDv1uXs0,6176
|
105
|
-
ominfra-0.0.0.
|
106
|
-
ominfra-0.0.0.
|
107
|
-
ominfra-0.0.0.
|
108
|
-
ominfra-0.0.0.
|
109
|
-
ominfra-0.0.0.
|
110
|
-
ominfra-0.0.0.
|
106
|
+
ominfra-0.0.0.dev129.dist-info/LICENSE,sha256=B_hVtavaA8zCYDW99DYdcpDLKz1n3BBRjZrcbv8uG8c,1451
|
107
|
+
ominfra-0.0.0.dev129.dist-info/METADATA,sha256=VsF9d4SW55_llwPjsmpCXtrLrw5L0r86L5DndQ7YOwo,731
|
108
|
+
ominfra-0.0.0.dev129.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
109
|
+
ominfra-0.0.0.dev129.dist-info/entry_points.txt,sha256=kgecQ2MgGrM9qK744BoKS3tMesaC3yjLnl9pa5CRczg,37
|
110
|
+
ominfra-0.0.0.dev129.dist-info/top_level.txt,sha256=E-b2OHkk_AOBLXHYZQ2EOFKl-_6uOGd8EjeG-Zy6h_w,8
|
111
|
+
ominfra-0.0.0.dev129.dist-info/RECORD,,
|
ominfra/supervisor/context.py
DELETED
@@ -1,80 +0,0 @@
|
|
1
|
-
# ruff: noqa: UP006 UP007
|
2
|
-
import errno
|
3
|
-
import os
|
4
|
-
import typing as ta
|
5
|
-
|
6
|
-
from omlish.lite.logs import log
|
7
|
-
|
8
|
-
from .configs import ServerConfig
|
9
|
-
from .poller import Poller
|
10
|
-
from .states import SupervisorState
|
11
|
-
from .types import ServerContext
|
12
|
-
from .types import ServerEpoch
|
13
|
-
from .utils.fs import mktempfile
|
14
|
-
from .utils.ostypes import Pid
|
15
|
-
from .utils.ostypes import Rc
|
16
|
-
|
17
|
-
|
18
|
-
class ServerContextImpl(ServerContext):
|
19
|
-
def __init__(
|
20
|
-
self,
|
21
|
-
config: ServerConfig,
|
22
|
-
poller: Poller,
|
23
|
-
*,
|
24
|
-
epoch: ServerEpoch = ServerEpoch(0),
|
25
|
-
) -> None:
|
26
|
-
super().__init__()
|
27
|
-
|
28
|
-
self._config = config
|
29
|
-
self._poller = poller
|
30
|
-
self._epoch = epoch
|
31
|
-
|
32
|
-
self._state: SupervisorState = SupervisorState.RUNNING
|
33
|
-
|
34
|
-
@property
|
35
|
-
def config(self) -> ServerConfig:
|
36
|
-
return self._config
|
37
|
-
|
38
|
-
@property
|
39
|
-
def epoch(self) -> ServerEpoch:
|
40
|
-
return self._epoch
|
41
|
-
|
42
|
-
@property
|
43
|
-
def first(self) -> bool:
|
44
|
-
return not self._epoch
|
45
|
-
|
46
|
-
@property
|
47
|
-
def state(self) -> SupervisorState:
|
48
|
-
return self._state
|
49
|
-
|
50
|
-
def set_state(self, state: SupervisorState) -> None:
|
51
|
-
self._state = state
|
52
|
-
|
53
|
-
#
|
54
|
-
|
55
|
-
def waitpid(self) -> ta.Tuple[ta.Optional[Pid], ta.Optional[Rc]]:
|
56
|
-
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
57
|
-
# still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
|
58
|
-
# waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
|
59
|
-
# normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
|
60
|
-
# call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
|
61
|
-
# lying around.
|
62
|
-
try:
|
63
|
-
pid, sts = os.waitpid(-1, os.WNOHANG)
|
64
|
-
except OSError as exc:
|
65
|
-
code = exc.args[0]
|
66
|
-
if code not in (errno.ECHILD, errno.EINTR):
|
67
|
-
log.critical('waitpid error %r; a process may not be cleaned up properly', code)
|
68
|
-
if code == errno.EINTR:
|
69
|
-
log.debug('EINTR during reap')
|
70
|
-
pid, sts = None, None
|
71
|
-
return pid, sts # type: ignore
|
72
|
-
|
73
|
-
def get_auto_child_log_name(self, name: str, identifier: str, channel: str) -> str:
|
74
|
-
prefix = f'{name}-{channel}---{identifier}-'
|
75
|
-
logfile = mktempfile(
|
76
|
-
suffix='.log',
|
77
|
-
prefix=prefix,
|
78
|
-
dir=self.config.child_logdir,
|
79
|
-
)
|
80
|
-
return logfile
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|