ominfra 0.0.0.dev118__py3-none-any.whl → 0.0.0.dev120__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- ominfra/deploy/_executor.py +6 -2
- ominfra/deploy/poly/_main.py +6 -2
- ominfra/journald/fields.py +187 -0
- ominfra/journald/tailer.py +375 -312
- ominfra/pyremote/_runcommands.py +6 -2
- ominfra/scripts/journald2aws.py +381 -314
- ominfra/scripts/supervisor.py +697 -464
- ominfra/supervisor/__main__.py +1 -1
- ominfra/supervisor/context.py +50 -25
- ominfra/supervisor/dispatchers.py +12 -13
- ominfra/supervisor/events.py +4 -7
- ominfra/supervisor/main.py +68 -0
- ominfra/supervisor/poller.py +1 -3
- ominfra/supervisor/process.py +10 -11
- ominfra/supervisor/supervisor.py +161 -193
- {ominfra-0.0.0.dev118.dist-info → ominfra-0.0.0.dev120.dist-info}/METADATA +3 -3
- {ominfra-0.0.0.dev118.dist-info → ominfra-0.0.0.dev120.dist-info}/RECORD +21 -19
- {ominfra-0.0.0.dev118.dist-info → ominfra-0.0.0.dev120.dist-info}/LICENSE +0 -0
- {ominfra-0.0.0.dev118.dist-info → ominfra-0.0.0.dev120.dist-info}/WHEEL +0 -0
- {ominfra-0.0.0.dev118.dist-info → ominfra-0.0.0.dev120.dist-info}/entry_points.txt +0 -0
- {ominfra-0.0.0.dev118.dist-info → ominfra-0.0.0.dev120.dist-info}/top_level.txt +0 -0
ominfra/supervisor/supervisor.py
CHANGED
@@ -1,31 +1,25 @@
|
|
1
|
-
#!/usr/bin/env python3
|
2
1
|
# ruff: noqa: UP006 UP007
|
3
|
-
# @omlish-amalg ../scripts/supervisor.py
|
4
|
-
import json
|
5
|
-
import logging
|
6
2
|
import signal
|
7
3
|
import time
|
8
4
|
import typing as ta
|
9
5
|
|
6
|
+
from omlish.lite.cached import cached_nullary
|
10
7
|
from omlish.lite.check import check_not_none
|
11
|
-
from omlish.lite.logs import
|
12
|
-
from omlish.lite.marshal import unmarshal_obj
|
8
|
+
from omlish.lite.logs import log
|
13
9
|
|
14
10
|
from .compat import ExitNow
|
15
11
|
from .compat import as_string
|
16
12
|
from .compat import decode_wait_status
|
17
13
|
from .compat import signame
|
18
14
|
from .configs import ProcessGroupConfig
|
19
|
-
from .configs import ServerConfig
|
20
15
|
from .context import ServerContext
|
21
16
|
from .dispatchers import Dispatcher
|
17
|
+
from .events import EVENT_CALLBACKS
|
22
18
|
from .events import TICK_EVENTS
|
23
19
|
from .events import ProcessGroupAddedEvent
|
24
20
|
from .events import ProcessGroupRemovedEvent
|
25
21
|
from .events import SupervisorRunningEvent
|
26
22
|
from .events import SupervisorStoppingEvent
|
27
|
-
from .events import clear_events
|
28
|
-
from .events import notify_event
|
29
23
|
from .process import ProcessGroup
|
30
24
|
from .process import Subprocess
|
31
25
|
from .states import SupervisorState
|
@@ -33,7 +27,8 @@ from .states import SupervisorStates
|
|
33
27
|
from .states import get_process_state_description
|
34
28
|
|
35
29
|
|
36
|
-
|
30
|
+
def timeslice(period: int, when: float) -> int:
|
31
|
+
return int(when - (when % period))
|
37
32
|
|
38
33
|
|
39
34
|
class Supervisor:
|
@@ -48,6 +43,8 @@ class Supervisor:
|
|
48
43
|
self._stopping = False # set after we detect that we are handling a stop request
|
49
44
|
self._last_shutdown_report = 0. # throttle for delayed process error reports at stop
|
50
45
|
|
46
|
+
#
|
47
|
+
|
51
48
|
@property
|
52
49
|
def context(self) -> ServerContext:
|
53
50
|
return self._context
|
@@ -55,47 +52,14 @@ class Supervisor:
|
|
55
52
|
def get_state(self) -> SupervisorState:
|
56
53
|
return self._context.state
|
57
54
|
|
58
|
-
|
59
|
-
if not self._context.first:
|
60
|
-
# prevent crash on libdispatch-based systems, at least for the first request
|
61
|
-
self._context.cleanup_fds()
|
62
|
-
|
63
|
-
self._context.set_uid_or_exit()
|
55
|
+
#
|
64
56
|
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
if not self._context.config.nocleanup:
|
70
|
-
# clean up old automatic logs
|
71
|
-
self._context.clear_auto_child_logdir()
|
72
|
-
|
73
|
-
self.run()
|
74
|
-
|
75
|
-
def run(self) -> None:
|
76
|
-
self._process_groups = {} # clear
|
77
|
-
self._stop_groups = None # clear
|
78
|
-
|
79
|
-
clear_events()
|
80
|
-
|
81
|
-
try:
|
82
|
-
for config in self._context.config.groups or []:
|
83
|
-
self.add_process_group(config)
|
84
|
-
|
85
|
-
self._context.set_signals()
|
86
|
-
|
87
|
-
if not self._context.config.nodaemon and self._context.first:
|
88
|
-
self._context.daemonize()
|
89
|
-
|
90
|
-
# writing pid file needs to come *after* daemonizing or pid will be wrong
|
91
|
-
self._context.write_pidfile()
|
57
|
+
class DiffToActive(ta.NamedTuple):
|
58
|
+
added: ta.List[ProcessGroupConfig]
|
59
|
+
changed: ta.List[ProcessGroupConfig]
|
60
|
+
removed: ta.List[ProcessGroupConfig]
|
92
61
|
|
93
|
-
|
94
|
-
|
95
|
-
finally:
|
96
|
-
self._context.cleanup()
|
97
|
-
|
98
|
-
def diff_to_active(self):
|
62
|
+
def diff_to_active(self) -> DiffToActive:
|
99
63
|
new = self._context.config.groups or []
|
100
64
|
cur = [group.config for group in self._process_groups.values()]
|
101
65
|
|
@@ -107,7 +71,7 @@ class Supervisor:
|
|
107
71
|
|
108
72
|
changed = [cand for cand in new if cand != curdict.get(cand.name, cand)]
|
109
73
|
|
110
|
-
return added, changed, removed
|
74
|
+
return Supervisor.DiffToActive(added, changed, removed)
|
111
75
|
|
112
76
|
def add_process_group(self, config: ProcessGroupConfig) -> bool:
|
113
77
|
name = config.name
|
@@ -117,7 +81,7 @@ class Supervisor:
|
|
117
81
|
group = self._process_groups[name] = ProcessGroup(config, self._context)
|
118
82
|
group.after_setuid()
|
119
83
|
|
120
|
-
|
84
|
+
EVENT_CALLBACKS.notify(ProcessGroupAddedEvent(name))
|
121
85
|
return True
|
122
86
|
|
123
87
|
def remove_process_group(self, name: str) -> bool:
|
@@ -128,7 +92,7 @@ class Supervisor:
|
|
128
92
|
|
129
93
|
del self._process_groups[name]
|
130
94
|
|
131
|
-
|
95
|
+
EVENT_CALLBACKS.notify(ProcessGroupRemovedEvent(name))
|
132
96
|
return True
|
133
97
|
|
134
98
|
def get_process_map(self) -> ta.Dict[int, Dispatcher]:
|
@@ -157,6 +121,72 @@ class Supervisor:
|
|
157
121
|
|
158
122
|
return unstopped
|
159
123
|
|
124
|
+
#
|
125
|
+
|
126
|
+
def main(self) -> None:
|
127
|
+
self.setup()
|
128
|
+
self.run()
|
129
|
+
|
130
|
+
@cached_nullary
|
131
|
+
def setup(self) -> None:
|
132
|
+
if not self._context.first:
|
133
|
+
# prevent crash on libdispatch-based systems, at least for the first request
|
134
|
+
self._context.cleanup_fds()
|
135
|
+
|
136
|
+
self._context.set_uid_or_exit()
|
137
|
+
|
138
|
+
if self._context.first:
|
139
|
+
self._context.set_rlimits_or_exit()
|
140
|
+
|
141
|
+
# this sets the options.logger object delay logger instantiation until after setuid
|
142
|
+
if not self._context.config.nocleanup:
|
143
|
+
# clean up old automatic logs
|
144
|
+
self._context.clear_auto_child_logdir()
|
145
|
+
|
146
|
+
def run(
|
147
|
+
self,
|
148
|
+
*,
|
149
|
+
callback: ta.Optional[ta.Callable[['Supervisor'], bool]] = None,
|
150
|
+
) -> None:
|
151
|
+
self._process_groups = {} # clear
|
152
|
+
self._stop_groups = None # clear
|
153
|
+
|
154
|
+
EVENT_CALLBACKS.clear()
|
155
|
+
|
156
|
+
try:
|
157
|
+
for config in self._context.config.groups or []:
|
158
|
+
self.add_process_group(config)
|
159
|
+
|
160
|
+
self._context.set_signals()
|
161
|
+
|
162
|
+
if not self._context.config.nodaemon and self._context.first:
|
163
|
+
self._context.daemonize()
|
164
|
+
|
165
|
+
# writing pid file needs to come *after* daemonizing or pid will be wrong
|
166
|
+
self._context.write_pidfile()
|
167
|
+
|
168
|
+
EVENT_CALLBACKS.notify(SupervisorRunningEvent())
|
169
|
+
|
170
|
+
while True:
|
171
|
+
if callback is not None and not callback(self):
|
172
|
+
break
|
173
|
+
|
174
|
+
self._run_once()
|
175
|
+
|
176
|
+
finally:
|
177
|
+
self._context.cleanup()
|
178
|
+
|
179
|
+
#
|
180
|
+
|
181
|
+
def _run_once(self) -> None:
|
182
|
+
self._poll()
|
183
|
+
self._reap()
|
184
|
+
self._handle_signal()
|
185
|
+
self._tick()
|
186
|
+
|
187
|
+
if self._context.state < SupervisorStates.RUNNING:
|
188
|
+
self._ordered_stop_groups_phase_2()
|
189
|
+
|
160
190
|
def _ordered_stop_groups_phase_1(self) -> None:
|
161
191
|
if self._stop_groups:
|
162
192
|
# stop the last group (the one with the "highest" priority)
|
@@ -173,110 +203,77 @@ class Supervisor:
|
|
173
203
|
# down, so push it back on to the end of the stop group queue
|
174
204
|
self._stop_groups.append(group)
|
175
205
|
|
176
|
-
def
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
while True:
|
181
|
-
combined_map = {}
|
182
|
-
combined_map.update(self.get_process_map())
|
183
|
-
|
184
|
-
pgroups = list(self._process_groups.values())
|
185
|
-
pgroups.sort()
|
186
|
-
|
187
|
-
if self._context.state < SupervisorStates.RUNNING:
|
188
|
-
if not self._stopping:
|
189
|
-
# first time, set the stopping flag, do a notification and set stop_groups
|
190
|
-
self._stopping = True
|
191
|
-
self._stop_groups = pgroups[:]
|
192
|
-
notify_event(SupervisorStoppingEvent())
|
193
|
-
|
194
|
-
self._ordered_stop_groups_phase_1()
|
195
|
-
|
196
|
-
if not self.shutdown_report():
|
197
|
-
# if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
|
198
|
-
raise ExitNow
|
199
|
-
|
200
|
-
for fd, dispatcher in combined_map.items():
|
201
|
-
if dispatcher.readable():
|
202
|
-
self._context.poller.register_readable(fd)
|
203
|
-
if dispatcher.writable():
|
204
|
-
self._context.poller.register_writable(fd)
|
205
|
-
|
206
|
-
r, w = self._context.poller.poll(timeout)
|
207
|
-
|
208
|
-
for fd in r:
|
209
|
-
if fd in combined_map:
|
210
|
-
try:
|
211
|
-
dispatcher = combined_map[fd]
|
212
|
-
log.debug('read event caused by %r', dispatcher)
|
213
|
-
dispatcher.handle_read_event()
|
214
|
-
if not dispatcher.readable():
|
215
|
-
self._context.poller.unregister_readable(fd)
|
216
|
-
except ExitNow:
|
217
|
-
raise
|
218
|
-
except Exception: # noqa
|
219
|
-
combined_map[fd].handle_error()
|
220
|
-
else:
|
221
|
-
# if the fd is not in combined_map, we should unregister it. otherwise, it will be polled every
|
222
|
-
# time, which may cause 100% cpu usage
|
223
|
-
log.debug('unexpected read event from fd %r', fd)
|
224
|
-
try:
|
225
|
-
self._context.poller.unregister_readable(fd)
|
226
|
-
except Exception: # noqa
|
227
|
-
pass
|
228
|
-
|
229
|
-
for fd in w:
|
230
|
-
if fd in combined_map:
|
231
|
-
try:
|
232
|
-
dispatcher = combined_map[fd]
|
233
|
-
log.debug('write event caused by %r', dispatcher)
|
234
|
-
dispatcher.handle_write_event()
|
235
|
-
if not dispatcher.writable():
|
236
|
-
self._context.poller.unregister_writable(fd)
|
237
|
-
except ExitNow:
|
238
|
-
raise
|
239
|
-
except Exception: # noqa
|
240
|
-
combined_map[fd].handle_error()
|
241
|
-
else:
|
242
|
-
log.debug('unexpected write event from fd %r', fd)
|
243
|
-
try:
|
244
|
-
self._context.poller.unregister_writable(fd)
|
245
|
-
except Exception: # noqa
|
246
|
-
pass
|
247
|
-
|
248
|
-
for group in pgroups:
|
249
|
-
group.transition()
|
206
|
+
def _poll(self) -> None:
|
207
|
+
combined_map = {}
|
208
|
+
combined_map.update(self.get_process_map())
|
250
209
|
|
251
|
-
|
252
|
-
|
253
|
-
self._tick()
|
210
|
+
pgroups = list(self._process_groups.values())
|
211
|
+
pgroups.sort()
|
254
212
|
|
255
|
-
|
256
|
-
|
213
|
+
if self._context.state < SupervisorStates.RUNNING:
|
214
|
+
if not self._stopping:
|
215
|
+
# first time, set the stopping flag, do a notification and set stop_groups
|
216
|
+
self._stopping = True
|
217
|
+
self._stop_groups = pgroups[:]
|
218
|
+
EVENT_CALLBACKS.notify(SupervisorStoppingEvent())
|
257
219
|
|
258
|
-
|
259
|
-
break
|
220
|
+
self._ordered_stop_groups_phase_1()
|
260
221
|
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
if now is None:
|
265
|
-
# now won't be None in unit tests
|
266
|
-
now = time.time()
|
222
|
+
if not self.shutdown_report():
|
223
|
+
# if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
|
224
|
+
raise ExitNow
|
267
225
|
|
268
|
-
for
|
269
|
-
|
226
|
+
for fd, dispatcher in combined_map.items():
|
227
|
+
if dispatcher.readable():
|
228
|
+
self._context.poller.register_readable(fd)
|
229
|
+
if dispatcher.writable():
|
230
|
+
self._context.poller.register_writable(fd)
|
270
231
|
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
232
|
+
timeout = 1 # this cannot be fewer than the smallest TickEvent (5)
|
233
|
+
r, w = self._context.poller.poll(timeout)
|
234
|
+
|
235
|
+
for fd in r:
|
236
|
+
if fd in combined_map:
|
237
|
+
try:
|
238
|
+
dispatcher = combined_map[fd]
|
239
|
+
log.debug('read event caused by %r', dispatcher)
|
240
|
+
dispatcher.handle_read_event()
|
241
|
+
if not dispatcher.readable():
|
242
|
+
self._context.poller.unregister_readable(fd)
|
243
|
+
except ExitNow:
|
244
|
+
raise
|
245
|
+
except Exception: # noqa
|
246
|
+
combined_map[fd].handle_error()
|
247
|
+
else:
|
248
|
+
# if the fd is not in combined_map, we should unregister it. otherwise, it will be polled every
|
249
|
+
# time, which may cause 100% cpu usage
|
250
|
+
log.debug('unexpected read event from fd %r', fd)
|
251
|
+
try:
|
252
|
+
self._context.poller.unregister_readable(fd)
|
253
|
+
except Exception: # noqa
|
254
|
+
pass
|
255
|
+
|
256
|
+
for fd in w:
|
257
|
+
if fd in combined_map:
|
258
|
+
try:
|
259
|
+
dispatcher = combined_map[fd]
|
260
|
+
log.debug('write event caused by %r', dispatcher)
|
261
|
+
dispatcher.handle_write_event()
|
262
|
+
if not dispatcher.writable():
|
263
|
+
self._context.poller.unregister_writable(fd)
|
264
|
+
except ExitNow:
|
265
|
+
raise
|
266
|
+
except Exception: # noqa
|
267
|
+
combined_map[fd].handle_error()
|
268
|
+
else:
|
269
|
+
log.debug('unexpected write event from fd %r', fd)
|
270
|
+
try:
|
271
|
+
self._context.poller.unregister_writable(fd)
|
272
|
+
except Exception: # noqa
|
273
|
+
pass
|
275
274
|
|
276
|
-
|
277
|
-
|
278
|
-
self._ticks[period] = this_tick
|
279
|
-
notify_event(event(this_tick, self))
|
275
|
+
for group in pgroups:
|
276
|
+
group.transition()
|
280
277
|
|
281
278
|
def _reap(self, *, once: bool = False, depth: int = 0) -> None:
|
282
279
|
if depth >= 100:
|
@@ -326,51 +323,22 @@ class Supervisor:
|
|
326
323
|
else:
|
327
324
|
log.debug('received %s indicating nothing', signame(sig))
|
328
325
|
|
326
|
+
def _tick(self, now: ta.Optional[float] = None) -> None:
|
327
|
+
"""Send one or more 'tick' events when the timeslice related to the period for the event type rolls over"""
|
329
328
|
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
def main(args=None, test=False):
|
335
|
-
import argparse
|
336
|
-
|
337
|
-
parser = argparse.ArgumentParser()
|
338
|
-
parser.add_argument('config_file', metavar='config-file')
|
339
|
-
args = parser.parse_args()
|
340
|
-
|
341
|
-
configure_standard_logging('INFO')
|
342
|
-
|
343
|
-
if not (cf := args.config_file):
|
344
|
-
raise RuntimeError('No config file specified')
|
345
|
-
|
346
|
-
# if we hup, restart by making a new Supervisor()
|
347
|
-
first = True
|
348
|
-
while True:
|
349
|
-
with open(cf) as f:
|
350
|
-
config_src = f.read()
|
351
|
-
config_dct = json.loads(config_src)
|
352
|
-
config: ServerConfig = unmarshal_obj(config_dct, ServerConfig)
|
353
|
-
|
354
|
-
context = ServerContext(
|
355
|
-
config,
|
356
|
-
)
|
357
|
-
|
358
|
-
context.first = first
|
359
|
-
context.test = test
|
360
|
-
go(context)
|
361
|
-
# options.close_logger()
|
362
|
-
first = False
|
363
|
-
if test or (context.state < SupervisorStates.RESTARTING):
|
364
|
-
break
|
365
|
-
|
329
|
+
if now is None:
|
330
|
+
# now won't be None in unit tests
|
331
|
+
now = time.time()
|
366
332
|
|
367
|
-
|
368
|
-
|
369
|
-
try:
|
370
|
-
d.main()
|
371
|
-
except ExitNow:
|
372
|
-
pass
|
333
|
+
for event in TICK_EVENTS:
|
334
|
+
period = event.period # type: ignore
|
373
335
|
|
336
|
+
last_tick = self._ticks.get(period)
|
337
|
+
if last_tick is None:
|
338
|
+
# we just started up
|
339
|
+
last_tick = self._ticks[period] = timeslice(period, now)
|
374
340
|
|
375
|
-
|
376
|
-
|
341
|
+
this_tick = timeslice(period, now)
|
342
|
+
if this_tick != last_tick:
|
343
|
+
self._ticks[period] = this_tick
|
344
|
+
EVENT_CALLBACKS.notify(event(this_tick, self))
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ominfra
|
3
|
-
Version: 0.0.0.
|
3
|
+
Version: 0.0.0.dev120
|
4
4
|
Summary: ominfra
|
5
5
|
Author: wrmsr
|
6
6
|
License: BSD-3-Clause
|
@@ -12,8 +12,8 @@ Classifier: Operating System :: OS Independent
|
|
12
12
|
Classifier: Operating System :: POSIX
|
13
13
|
Requires-Python: >=3.12
|
14
14
|
License-File: LICENSE
|
15
|
-
Requires-Dist: omdev ==0.0.0.
|
16
|
-
Requires-Dist: omlish ==0.0.0.
|
15
|
+
Requires-Dist: omdev ==0.0.0.dev120
|
16
|
+
Requires-Dist: omlish ==0.0.0.dev120
|
17
17
|
Provides-Extra: all
|
18
18
|
Requires-Dist: paramiko ~=3.5 ; extra == 'all'
|
19
19
|
Requires-Dist: asyncssh ~=2.18 ; extra == 'all'
|
@@ -20,7 +20,7 @@ ominfra/clouds/aws/journald2aws/poster.py,sha256=hz1XuctW8GtLmfjhRvCFY6py52D4BzX
|
|
20
20
|
ominfra/clouds/gcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
21
21
|
ominfra/clouds/gcp/auth.py,sha256=3PyfRJNgajjMqJFem3SKui0CqGeHEsZlvbRhuxFcZG8,1348
|
22
22
|
ominfra/deploy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
|
-
ominfra/deploy/_executor.py,sha256=
|
23
|
+
ominfra/deploy/_executor.py,sha256=YnEw91L55EXvR51UTl95FtXFUe3rUmidlkD4qccE7Tg,34211
|
24
24
|
ominfra/deploy/configs.py,sha256=qi0kwT7G2NH7dXLOQic-u6R3yeadup_QtvrjwWIggbM,435
|
25
25
|
ominfra/deploy/remote.py,sha256=6ACmpXU1uBdyGs3Xsp97ktKFq30cJlzN9LRWNUWlGY4,2144
|
26
26
|
ominfra/deploy/executor/__init__.py,sha256=Y3l4WY4JRi2uLG6kgbGp93fuGfkxkKwZDvhsa0Rwgtk,15
|
@@ -35,7 +35,7 @@ ominfra/deploy/executor/concerns/systemd.py,sha256=MtsSEToEa1HNouern_JukcYTnypw_
|
|
35
35
|
ominfra/deploy/executor/concerns/user.py,sha256=j5LDfQXquIp-eEM7t6aShsrYoQrM_ILXZycTmTcRVxA,686
|
36
36
|
ominfra/deploy/executor/concerns/venv.py,sha256=jbRriqJHO4r9Zyo5Hfl_qVmcU6Qm6UgrouBroKcPn2g,775
|
37
37
|
ominfra/deploy/poly/__init__.py,sha256=Y3l4WY4JRi2uLG6kgbGp93fuGfkxkKwZDvhsa0Rwgtk,15
|
38
|
-
ominfra/deploy/poly/_main.py,sha256=
|
38
|
+
ominfra/deploy/poly/_main.py,sha256=iKhlTayS2j3Ra_eCSK6Qro8j4H573DLoPw8490RMIA8,24194
|
39
39
|
ominfra/deploy/poly/base.py,sha256=Bd-CzUTaDvTRbdXKiTxMxs77WCEXItwNoBYCRnTk1u4,4167
|
40
40
|
ominfra/deploy/poly/configs.py,sha256=9bzWdbxhOk_Q4KokDjmRz254KHnUU71Vl1frLlhQyU4,584
|
41
41
|
ominfra/deploy/poly/deploy.py,sha256=tMYKslXLjstcv86siRt5j37USsS0Wd6lsfeGRE26zio,544
|
@@ -47,40 +47,42 @@ ominfra/deploy/poly/site.py,sha256=QJwDDJoVm2-kxi4bxIrp-mn4y2qDLuW3CAUax3W8gv8,2
|
|
47
47
|
ominfra/deploy/poly/supervisor.py,sha256=zkl6VQBcAZaMAhyR9DbbbqULcgFCDZoe9S_vP-mMFQ8,2289
|
48
48
|
ominfra/deploy/poly/venv.py,sha256=BoipDEa4NTeodjf3L57KJfq9eGKLagFNKwD8pS4yrzA,1552
|
49
49
|
ominfra/journald/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
50
|
+
ominfra/journald/fields.py,sha256=NjjVn7GW4jkcGdyiiizVjEfQqSFnolXYk3kDcSQcMmc,12278
|
50
51
|
ominfra/journald/genmessages.py,sha256=rLTS-K2v7otNOtTz4RoOEVYCm0fQuuBzf47e0T61tA8,1857
|
51
52
|
ominfra/journald/messages.py,sha256=2iMY4k63XGNcN3LPvBmmK55ftjupnNh8f_ijlW9mkhQ,2208
|
52
|
-
ominfra/journald/tailer.py,sha256
|
53
|
+
ominfra/journald/tailer.py,sha256=5abcFMfgi7fnY9ZEQe2ZVobaJxjQkeu6d9Kagw33a1w,33525
|
53
54
|
ominfra/manage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
54
55
|
ominfra/manage/manage.py,sha256=BttL8LFEknHZE_h2Pt5dAqbfUkv6qy43WI0raXBZ1a8,151
|
55
56
|
ominfra/pyremote/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
56
|
-
ominfra/pyremote/_runcommands.py,sha256=
|
57
|
+
ominfra/pyremote/_runcommands.py,sha256=XWBVKBE6QOlzue9lu5_wFdySt_pJ1sOj7N9KTjawW-A,28047
|
57
58
|
ominfra/pyremote/bootstrap.py,sha256=RvMO3YGaN1E4sgUi1JEtiPak8cjvqtc_vRCq1yqbeZg,3370
|
58
59
|
ominfra/pyremote/runcommands.py,sha256=bviS0_TDIoZVAe4h-_iavbvJtVSFu8lnk7fQ5iasCWE,1571
|
59
60
|
ominfra/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
60
|
-
ominfra/scripts/journald2aws.py,sha256=
|
61
|
-
ominfra/scripts/supervisor.py,sha256=
|
61
|
+
ominfra/scripts/journald2aws.py,sha256=0HHYi_uBV1t2KefVrExs3IZ6Zy-mQa7xN_ka9W9Obb8,94910
|
62
|
+
ominfra/scripts/supervisor.py,sha256=C1eT7pIqPRJgN4U87tOjR_SuOSUwd5aUswvPeTy5Xlw,121831
|
62
63
|
ominfra/supervisor/__init__.py,sha256=Y3l4WY4JRi2uLG6kgbGp93fuGfkxkKwZDvhsa0Rwgtk,15
|
63
|
-
ominfra/supervisor/__main__.py,sha256=
|
64
|
+
ominfra/supervisor/__main__.py,sha256=I0yFw-C08OOiZ3BF6lF1Oiv789EQXu-_j6whDhQUTEA,66
|
64
65
|
ominfra/supervisor/compat.py,sha256=Y1d_pk4eN18AbVYjDHAXMMnPwOKTFpc7JDb1uClYMsQ,5064
|
65
66
|
ominfra/supervisor/configs.py,sha256=KpibZJ-V-4UpoJM2fnjXOXJLvDbwRJzNLXLGESUljV4,2966
|
66
|
-
ominfra/supervisor/context.py,sha256=
|
67
|
+
ominfra/supervisor/context.py,sha256=Fg_wD6oUR8vxe3JMM14mR-5ssIrTNwxRr-AXfoGbzJQ,15795
|
67
68
|
ominfra/supervisor/datatypes.py,sha256=UnXO_UlCyJD9u0uvea1wvnk_UZCzxNMeFvPK83gv530,4432
|
68
|
-
ominfra/supervisor/dispatchers.py,sha256=
|
69
|
-
ominfra/supervisor/events.py,sha256=
|
69
|
+
ominfra/supervisor/dispatchers.py,sha256=sJ61yTo9EEbxHwe2NbzOTAFFFCuuyIhYli_xJioQBoo,10423
|
70
|
+
ominfra/supervisor/events.py,sha256=IhdL7Fj-hEvTvZ5WF6aIa2YjSPQhuUoasoJSMmRLQkU,7650
|
70
71
|
ominfra/supervisor/exceptions.py,sha256=Qbu211H3CLlSmi9LsSikOwrcL5HgJP9ugvcKWlGTAoI,750
|
71
|
-
ominfra/supervisor/
|
72
|
-
ominfra/supervisor/
|
72
|
+
ominfra/supervisor/main.py,sha256=0bj_9AzIfDlB1BB8zcX9npQIknamN7FGoVEYgLMLuP0,1701
|
73
|
+
ominfra/supervisor/poller.py,sha256=VCBxLItfA4Vj69jet2XFbFScPbmdD9JA1evaofk_AnY,7709
|
74
|
+
ominfra/supervisor/process.py,sha256=94cglin7qBwxTNXjOBxqec4qsACu-VfeboW-JfzvvbE,31454
|
73
75
|
ominfra/supervisor/states.py,sha256=JMxXYTZhJkMNQZ2tTV6wId7wrvnWgiZteskACprKskM,1374
|
74
|
-
ominfra/supervisor/supervisor.py,sha256=
|
76
|
+
ominfra/supervisor/supervisor.py,sha256=p612yph5aKzkVLH6CfOMcRHEbNfs_TMlmjfLwtG8Jo0,12324
|
75
77
|
ominfra/supervisor/types.py,sha256=ec62QG0CDJc0XNxCnf3lXxhsxrr4CCScLPI-1SpQjlc,1141
|
76
78
|
ominfra/tailscale/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
77
79
|
ominfra/tailscale/api.py,sha256=C5-t_b6jZXUWcy5k8bXm7CFnk73pSdrlMOgGDeGVrpw,1370
|
78
80
|
ominfra/tailscale/cli.py,sha256=DSGp4hn5xwOW-l_u_InKlSF6kIobxtUtVssf_73STs0,3567
|
79
81
|
ominfra/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
80
82
|
ominfra/tools/listresources.py,sha256=4qVg5txsb10EHhvqXXeM6gJ2jx9LbroEnPydDv1uXs0,6176
|
81
|
-
ominfra-0.0.0.
|
82
|
-
ominfra-0.0.0.
|
83
|
-
ominfra-0.0.0.
|
84
|
-
ominfra-0.0.0.
|
85
|
-
ominfra-0.0.0.
|
86
|
-
ominfra-0.0.0.
|
83
|
+
ominfra-0.0.0.dev120.dist-info/LICENSE,sha256=B_hVtavaA8zCYDW99DYdcpDLKz1n3BBRjZrcbv8uG8c,1451
|
84
|
+
ominfra-0.0.0.dev120.dist-info/METADATA,sha256=xuFwZN7LnvBCDHvhqgdKONRKkDYUYJ6SZqTjaE4k2rI,742
|
85
|
+
ominfra-0.0.0.dev120.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
|
86
|
+
ominfra-0.0.0.dev120.dist-info/entry_points.txt,sha256=kgecQ2MgGrM9qK744BoKS3tMesaC3yjLnl9pa5CRczg,37
|
87
|
+
ominfra-0.0.0.dev120.dist-info/top_level.txt,sha256=E-b2OHkk_AOBLXHYZQ2EOFKl-_6uOGd8EjeG-Zy6h_w,8
|
88
|
+
ominfra-0.0.0.dev120.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|