ominfra 0.0.0.dev119__py3-none-any.whl → 0.0.0.dev121__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,4 +1,5 @@
1
1
  # ruff: noqa: UP006 UP007
2
+ import dataclasses as dc
2
3
  import signal
3
4
  import time
4
5
  import typing as ta
@@ -14,13 +15,12 @@ from .compat import signame
14
15
  from .configs import ProcessGroupConfig
15
16
  from .context import ServerContext
16
17
  from .dispatchers import Dispatcher
18
+ from .events import EVENT_CALLBACKS
17
19
  from .events import TICK_EVENTS
18
20
  from .events import ProcessGroupAddedEvent
19
21
  from .events import ProcessGroupRemovedEvent
20
22
  from .events import SupervisorRunningEvent
21
23
  from .events import SupervisorStoppingEvent
22
- from .events import clear_events
23
- from .events import notify_event
24
24
  from .process import ProcessGroup
25
25
  from .process import Subprocess
26
26
  from .states import SupervisorState
@@ -28,22 +28,44 @@ from .states import SupervisorStates
28
28
  from .states import get_process_state_description
29
29
 
30
30
 
31
- def timeslice(period, when):
31
+ def timeslice(period: int, when: float) -> int:
32
32
  return int(when - (when % period))
33
33
 
34
34
 
35
+ @dc.dataclass(frozen=True)
36
+ class ProcessGroupFactory:
37
+ fn: ta.Callable[[ProcessGroupConfig], ProcessGroup]
38
+
39
+ def __call__(self, config: ProcessGroupConfig) -> ProcessGroup:
40
+ return self.fn(config)
41
+
42
+
35
43
  class Supervisor:
36
44
 
37
- def __init__(self, context: ServerContext) -> None:
45
+ def __init__(
46
+ self,
47
+ context: ServerContext,
48
+ *,
49
+ process_group_factory: ta.Optional[ProcessGroupFactory] = None,
50
+ ) -> None:
38
51
  super().__init__()
39
52
 
40
53
  self._context = context
54
+
55
+ if process_group_factory is None:
56
+ def make_process_group(config: ProcessGroupConfig) -> ProcessGroup:
57
+ return ProcessGroup(config, self._context)
58
+ process_group_factory = ProcessGroupFactory(make_process_group)
59
+ self._process_group_factory = process_group_factory
60
+
41
61
  self._ticks: ta.Dict[int, float] = {}
42
62
  self._process_groups: ta.Dict[str, ProcessGroup] = {} # map of process group name to process group object
43
63
  self._stop_groups: ta.Optional[ta.List[ProcessGroup]] = None # list used for priority ordered shutdown
44
64
  self._stopping = False # set after we detect that we are handling a stop request
45
65
  self._last_shutdown_report = 0. # throttle for delayed process error reports at stop
46
66
 
67
+ #
68
+
47
69
  @property
48
70
  def context(self) -> ServerContext:
49
71
  return self._context
@@ -51,58 +73,7 @@ class Supervisor:
51
73
  def get_state(self) -> SupervisorState:
52
74
  return self._context.state
53
75
 
54
- def main(self) -> None:
55
- self.setup()
56
- self.run()
57
-
58
- @cached_nullary
59
- def setup(self) -> None:
60
- if not self._context.first:
61
- # prevent crash on libdispatch-based systems, at least for the first request
62
- self._context.cleanup_fds()
63
-
64
- self._context.set_uid_or_exit()
65
-
66
- if self._context.first:
67
- self._context.set_rlimits_or_exit()
68
-
69
- # this sets the options.logger object delay logger instantiation until after setuid
70
- if not self._context.config.nocleanup:
71
- # clean up old automatic logs
72
- self._context.clear_auto_child_logdir()
73
-
74
- def run(
75
- self,
76
- *,
77
- callback: ta.Optional[ta.Callable[['Supervisor'], bool]] = None,
78
- ) -> None:
79
- self._process_groups = {} # clear
80
- self._stop_groups = None # clear
81
-
82
- clear_events()
83
-
84
- try:
85
- for config in self._context.config.groups or []:
86
- self.add_process_group(config)
87
-
88
- self._context.set_signals()
89
-
90
- if not self._context.config.nodaemon and self._context.first:
91
- self._context.daemonize()
92
-
93
- # writing pid file needs to come *after* daemonizing or pid will be wrong
94
- self._context.write_pidfile()
95
-
96
- notify_event(SupervisorRunningEvent())
97
-
98
- while True:
99
- if callback is not None and not callback(self):
100
- break
101
-
102
- self._run_once()
103
-
104
- finally:
105
- self._context.cleanup()
76
+ #
106
77
 
107
78
  class DiffToActive(ta.NamedTuple):
108
79
  added: ta.List[ProcessGroupConfig]
@@ -128,10 +99,10 @@ class Supervisor:
128
99
  if name in self._process_groups:
129
100
  return False
130
101
 
131
- group = self._process_groups[name] = ProcessGroup(config, self._context)
102
+ group = self._process_groups[name] = self._process_group_factory(config)
132
103
  group.after_setuid()
133
104
 
134
- notify_event(ProcessGroupAddedEvent(name))
105
+ EVENT_CALLBACKS.notify(ProcessGroupAddedEvent(name))
135
106
  return True
136
107
 
137
108
  def remove_process_group(self, name: str) -> bool:
@@ -142,7 +113,7 @@ class Supervisor:
142
113
 
143
114
  del self._process_groups[name]
144
115
 
145
- notify_event(ProcessGroupRemovedEvent(name))
116
+ EVENT_CALLBACKS.notify(ProcessGroupRemovedEvent(name))
146
117
  return True
147
118
 
148
119
  def get_process_map(self) -> ta.Dict[int, Dispatcher]:
@@ -171,6 +142,72 @@ class Supervisor:
171
142
 
172
143
  return unstopped
173
144
 
145
+ #
146
+
147
+ def main(self) -> None:
148
+ self.setup()
149
+ self.run()
150
+
151
+ @cached_nullary
152
+ def setup(self) -> None:
153
+ if not self._context.first:
154
+ # prevent crash on libdispatch-based systems, at least for the first request
155
+ self._context.cleanup_fds()
156
+
157
+ self._context.set_uid_or_exit()
158
+
159
+ if self._context.first:
160
+ self._context.set_rlimits_or_exit()
161
+
162
+ # this sets the options.logger object delay logger instantiation until after setuid
163
+ if not self._context.config.nocleanup:
164
+ # clean up old automatic logs
165
+ self._context.clear_auto_child_logdir()
166
+
167
+ def run(
168
+ self,
169
+ *,
170
+ callback: ta.Optional[ta.Callable[['Supervisor'], bool]] = None,
171
+ ) -> None:
172
+ self._process_groups = {} # clear
173
+ self._stop_groups = None # clear
174
+
175
+ EVENT_CALLBACKS.clear()
176
+
177
+ try:
178
+ for config in self._context.config.groups or []:
179
+ self.add_process_group(config)
180
+
181
+ self._context.set_signals()
182
+
183
+ if not self._context.config.nodaemon and self._context.first:
184
+ self._context.daemonize()
185
+
186
+ # writing pid file needs to come *after* daemonizing or pid will be wrong
187
+ self._context.write_pidfile()
188
+
189
+ EVENT_CALLBACKS.notify(SupervisorRunningEvent())
190
+
191
+ while True:
192
+ if callback is not None and not callback(self):
193
+ break
194
+
195
+ self._run_once()
196
+
197
+ finally:
198
+ self._context.cleanup()
199
+
200
+ #
201
+
202
+ def _run_once(self) -> None:
203
+ self._poll()
204
+ self._reap()
205
+ self._handle_signal()
206
+ self._tick()
207
+
208
+ if self._context.state < SupervisorStates.RUNNING:
209
+ self._ordered_stop_groups_phase_2()
210
+
174
211
  def _ordered_stop_groups_phase_1(self) -> None:
175
212
  if self._stop_groups:
176
213
  # stop the last group (the one with the "highest" priority)
@@ -187,7 +224,7 @@ class Supervisor:
187
224
  # down, so push it back on to the end of the stop group queue
188
225
  self._stop_groups.append(group)
189
226
 
190
- def _run_once(self) -> None:
227
+ def _poll(self) -> None:
191
228
  combined_map = {}
192
229
  combined_map.update(self.get_process_map())
193
230
 
@@ -199,7 +236,7 @@ class Supervisor:
199
236
  # first time, set the stopping flag, do a notification and set stop_groups
200
237
  self._stopping = True
201
238
  self._stop_groups = pgroups[:]
202
- notify_event(SupervisorStoppingEvent())
239
+ EVENT_CALLBACKS.notify(SupervisorStoppingEvent())
203
240
 
204
241
  self._ordered_stop_groups_phase_1()
205
242
 
@@ -259,33 +296,6 @@ class Supervisor:
259
296
  for group in pgroups:
260
297
  group.transition()
261
298
 
262
- self._reap()
263
- self._handle_signal()
264
- self._tick()
265
-
266
- if self._context.state < SupervisorStates.RUNNING:
267
- self._ordered_stop_groups_phase_2()
268
-
269
- def _tick(self, now: ta.Optional[float] = None) -> None:
270
- """Send one or more 'tick' events when the timeslice related to the period for the event type rolls over"""
271
-
272
- if now is None:
273
- # now won't be None in unit tests
274
- now = time.time()
275
-
276
- for event in TICK_EVENTS:
277
- period = event.period # type: ignore
278
-
279
- last_tick = self._ticks.get(period)
280
- if last_tick is None:
281
- # we just started up
282
- last_tick = self._ticks[period] = timeslice(period, now)
283
-
284
- this_tick = timeslice(period, now)
285
- if this_tick != last_tick:
286
- self._ticks[period] = this_tick
287
- notify_event(event(this_tick, self))
288
-
289
299
  def _reap(self, *, once: bool = False, depth: int = 0) -> None:
290
300
  if depth >= 100:
291
301
  return
@@ -333,3 +343,23 @@ class Supervisor:
333
343
 
334
344
  else:
335
345
  log.debug('received %s indicating nothing', signame(sig))
346
+
347
+ def _tick(self, now: ta.Optional[float] = None) -> None:
348
+ """Send one or more 'tick' events when the timeslice related to the period for the event type rolls over"""
349
+
350
+ if now is None:
351
+ # now won't be None in unit tests
352
+ now = time.time()
353
+
354
+ for event in TICK_EVENTS:
355
+ period = event.period # type: ignore
356
+
357
+ last_tick = self._ticks.get(period)
358
+ if last_tick is None:
359
+ # we just started up
360
+ last_tick = self._ticks[period] = timeslice(period, now)
361
+
362
+ this_tick = timeslice(period, now)
363
+ if this_tick != last_tick:
364
+ self._ticks[period] = this_tick
365
+ EVENT_CALLBACKS.notify(event(this_tick, self))
@@ -27,6 +27,11 @@ class AbstractServerContext(abc.ABC):
27
27
  def pid_history(self) -> ta.Dict[int, 'AbstractSubprocess']:
28
28
  raise NotImplementedError
29
29
 
30
+ @property
31
+ @abc.abstractmethod
32
+ def inherited_fds(self) -> ta.FrozenSet[int]:
33
+ raise NotImplementedError
34
+
30
35
 
31
36
  class AbstractSubprocess(abc.ABC):
32
37
  @property
ominfra/threadworkers.py CHANGED
@@ -1,9 +1,11 @@
1
1
  # ruff: noqa: UP006 UP007
2
2
  # @omlish-lite
3
3
  """
4
+ FIXME:
5
+ - group is racy af - meditate on has_started, etc
6
+
4
7
  TODO:
5
- - implement stop lol
6
- - collective heartbeat monitoring - ThreadWorkerGroups
8
+ - overhaul stop lol
7
9
  - group -> 'context'? :|
8
10
  - shared stop_event?
9
11
  """
@@ -29,6 +31,7 @@ class ThreadWorker(ExitStacked, abc.ABC):
29
31
  self,
30
32
  *,
31
33
  stop_event: ta.Optional[threading.Event] = None,
34
+ worker_groups: ta.Optional[ta.Iterable['ThreadWorkerGroup']] = None,
32
35
  ) -> None:
33
36
  super().__init__()
34
37
 
@@ -40,6 +43,9 @@ class ThreadWorker(ExitStacked, abc.ABC):
40
43
  self._thread: ta.Optional[threading.Thread] = None
41
44
  self._last_heartbeat: ta.Optional[float] = None
42
45
 
46
+ for g in worker_groups or []:
47
+ g.add(self)
48
+
43
49
  #
44
50
 
45
51
  def __enter__(self: ThreadWorkerT) -> ThreadWorkerT:
@@ -84,13 +90,13 @@ class ThreadWorker(ExitStacked, abc.ABC):
84
90
  if self._thread is not None:
85
91
  raise RuntimeError('Thread already started: %r', self)
86
92
 
87
- thr = threading.Thread(target=self.__run)
93
+ thr = threading.Thread(target=self.__thread_main)
88
94
  self._thread = thr
89
95
  thr.start()
90
96
 
91
97
  #
92
98
 
93
- def __run(self) -> None:
99
+ def __thread_main(self) -> None:
94
100
  try:
95
101
  self._run()
96
102
  except ThreadWorker.Stopping:
@@ -108,10 +114,17 @@ class ThreadWorker(ExitStacked, abc.ABC):
108
114
  def stop(self) -> None:
109
115
  self._stop_event.set()
110
116
 
111
- def join(self, timeout: ta.Optional[float] = None) -> None:
117
+ def join(
118
+ self,
119
+ timeout: ta.Optional[float] = None,
120
+ *,
121
+ unless_not_started: bool = False,
122
+ ) -> None:
112
123
  with self._lock:
113
124
  if self._thread is None:
114
- raise RuntimeError('Thread not started: %r', self)
125
+ if not unless_not_started:
126
+ raise RuntimeError('Thread not started: %r', self)
127
+ return
115
128
  self._thread.join(timeout)
116
129
 
117
130
 
@@ -120,20 +133,64 @@ class ThreadWorker(ExitStacked, abc.ABC):
120
133
 
121
134
  class ThreadWorkerGroup:
122
135
  @dc.dataclass()
123
- class State:
136
+ class _State:
124
137
  worker: ThreadWorker
125
138
 
139
+ last_heartbeat: ta.Optional[float] = None
140
+
126
141
  def __init__(self) -> None:
127
142
  super().__init__()
128
143
 
129
144
  self._lock = threading.RLock()
130
- self._states: ta.Dict[ThreadWorker, ThreadWorkerGroup.State] = {}
145
+ self._states: ta.Dict[ThreadWorker, ThreadWorkerGroup._State] = {}
146
+ self._last_heartbeat_check: ta.Optional[float] = None
147
+
148
+ #
131
149
 
132
150
  def add(self, *workers: ThreadWorker) -> 'ThreadWorkerGroup':
133
151
  with self._lock:
134
152
  for w in workers:
135
153
  if w in self._states:
136
154
  raise KeyError(w)
137
- self._states[w] = ThreadWorkerGroup.State(w)
155
+ self._states[w] = ThreadWorkerGroup._State(w)
138
156
 
139
157
  return self
158
+
159
+ #
160
+
161
+ def start_all(self) -> None:
162
+ thrs = list(self._states)
163
+ with self._lock:
164
+ for thr in thrs:
165
+ if not thr.has_started():
166
+ thr.start()
167
+
168
+ def stop_all(self) -> None:
169
+ for w in reversed(list(self._states)):
170
+ if w.has_started():
171
+ w.stop()
172
+
173
+ def join_all(self, timeout: ta.Optional[float] = None) -> None:
174
+ for w in reversed(list(self._states)):
175
+ if w.has_started():
176
+ w.join(timeout, unless_not_started=True)
177
+
178
+ #
179
+
180
+ def get_dead(self) -> ta.List[ThreadWorker]:
181
+ with self._lock:
182
+ return [thr for thr in self._states if not thr.is_alive()]
183
+
184
+ def check_heartbeats(self) -> ta.Dict[ThreadWorker, float]:
185
+ with self._lock:
186
+ dct: ta.Dict[ThreadWorker, float] = {}
187
+ for thr, st in self._states.items():
188
+ if not thr.has_started():
189
+ continue
190
+ hb = thr.last_heartbeat
191
+ if hb is None:
192
+ hb = time.time()
193
+ st.last_heartbeat = hb
194
+ dct[st.worker] = time.time() - hb
195
+ self._last_heartbeat_check = time.time()
196
+ return dct
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ominfra
3
- Version: 0.0.0.dev119
3
+ Version: 0.0.0.dev121
4
4
  Summary: ominfra
5
5
  Author: wrmsr
6
6
  License: BSD-3-Clause
@@ -12,8 +12,8 @@ Classifier: Operating System :: OS Independent
12
12
  Classifier: Operating System :: POSIX
13
13
  Requires-Python: >=3.12
14
14
  License-File: LICENSE
15
- Requires-Dist: omdev ==0.0.0.dev119
16
- Requires-Dist: omlish ==0.0.0.dev119
15
+ Requires-Dist: omdev ==0.0.0.dev121
16
+ Requires-Dist: omlish ==0.0.0.dev121
17
17
  Provides-Extra: all
18
18
  Requires-Dist: paramiko ~=3.5 ; extra == 'all'
19
19
  Requires-Dist: asyncssh ~=2.18 ; extra == 'all'
@@ -2,8 +2,9 @@ ominfra/.manifests.json,sha256=8KREXxMAlsilZOktXPYru1ND3V5hFI22vnrp6hT3bio,589
2
2
  ominfra/__about__.py,sha256=6i1AoruFYQCd-PyhhbDQDWY2d1tiQu9nkwWr-fXAqfY,705
3
3
  ominfra/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  ominfra/cmds.py,sha256=E0AfnvEmnKntXWvmLW5L05_NeDpBET1VBXn7vV6EwBQ,2083
5
+ ominfra/configs.py,sha256=8aU1Qmbr-qjaE2iP3gAbA2SWJYMPZ-uGK007L01PoOI,1727
5
6
  ominfra/ssh.py,sha256=jQpc4WvkMckIfk4vILda8zFaeharRqc_6wxW50b0OjQ,5431
6
- ominfra/threadworkers.py,sha256=QuRpz9Yjyb4F8_IjzqmL7eNCAmXZfy3XLl7QoVF7Ohw,3273
7
+ ominfra/threadworkers.py,sha256=oX4ubZn7h932saXpRIJu2MNhBExgGGMuGhdXarZxLJw,4948
7
8
  ominfra/clouds/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
9
  ominfra/clouds/aws/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
10
  ominfra/clouds/aws/__main__.py,sha256=HXMoxEl9KHhv6zOOPQxiJAftfR2SjBqeVTYw-og9aFw,163
@@ -13,14 +14,15 @@ ominfra/clouds/aws/dataclasses.py,sha256=rKhtJKJ0JhMssU9n9CABX_JaUiokIboEATJ9TZg
13
14
  ominfra/clouds/aws/logs.py,sha256=z9ouU2IYXNHsl7_Whbjs1FGtlUwsEq0RV8LNrM_QNTE,5471
14
15
  ominfra/clouds/aws/metadata.py,sha256=XR1BuMdQheyeFjjA3MN8GCNWVAp5ahoPdbWXEmViutQ,2767
15
16
  ominfra/clouds/aws/journald2aws/__init__.py,sha256=Y3l4WY4JRi2uLG6kgbGp93fuGfkxkKwZDvhsa0Rwgtk,15
17
+ ominfra/clouds/aws/journald2aws/__main__.py,sha256=d23loR_cKfTYZwYiqpt_CmKI7dd5WcYFgIYzqMep75E,68
16
18
  ominfra/clouds/aws/journald2aws/cursor.py,sha256=tQ7O6BHlEdaalbiI_Rqagj0aHfdtTQ_ZJwdOSRUjNvQ,1173
17
- ominfra/clouds/aws/journald2aws/driver.py,sha256=8jiuEpOgKFSucpEJBBTBiSVg6L_tA4alUNK-I788HWU,5452
18
- ominfra/clouds/aws/journald2aws/main.py,sha256=xFkEhkYKtFfW0XRfY0UaX_gd_FU66WTZOMCyiIaPY3E,2237
19
+ ominfra/clouds/aws/journald2aws/driver.py,sha256=E9RhdMzDXxOnDfLyHqU7N_7iygXqxy4apq2xz_EDxkA,6127
20
+ ominfra/clouds/aws/journald2aws/main.py,sha256=RQJhk4aPtnp4EHzC-ST1Rs9BN6D7bqQQVjCRxGU7JuQ,2147
19
21
  ominfra/clouds/aws/journald2aws/poster.py,sha256=hz1XuctW8GtLmfjhRvCFY6py52D4BzXHYny5XKFpHSA,2833
20
22
  ominfra/clouds/gcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
23
  ominfra/clouds/gcp/auth.py,sha256=3PyfRJNgajjMqJFem3SKui0CqGeHEsZlvbRhuxFcZG8,1348
22
24
  ominfra/deploy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
- ominfra/deploy/_executor.py,sha256=YnEw91L55EXvR51UTl95FtXFUe3rUmidlkD4qccE7Tg,34211
25
+ ominfra/deploy/_executor.py,sha256=7-A5aScQKhkvh1RgMHBYhccIXECaSHEf0Cv0tqIE_DY,34215
24
26
  ominfra/deploy/configs.py,sha256=qi0kwT7G2NH7dXLOQic-u6R3yeadup_QtvrjwWIggbM,435
25
27
  ominfra/deploy/remote.py,sha256=6ACmpXU1uBdyGs3Xsp97ktKFq30cJlzN9LRWNUWlGY4,2144
26
28
  ominfra/deploy/executor/__init__.py,sha256=Y3l4WY4JRi2uLG6kgbGp93fuGfkxkKwZDvhsa0Rwgtk,15
@@ -35,7 +37,7 @@ ominfra/deploy/executor/concerns/systemd.py,sha256=MtsSEToEa1HNouern_JukcYTnypw_
35
37
  ominfra/deploy/executor/concerns/user.py,sha256=j5LDfQXquIp-eEM7t6aShsrYoQrM_ILXZycTmTcRVxA,686
36
38
  ominfra/deploy/executor/concerns/venv.py,sha256=jbRriqJHO4r9Zyo5Hfl_qVmcU6Qm6UgrouBroKcPn2g,775
37
39
  ominfra/deploy/poly/__init__.py,sha256=Y3l4WY4JRi2uLG6kgbGp93fuGfkxkKwZDvhsa0Rwgtk,15
38
- ominfra/deploy/poly/_main.py,sha256=iKhlTayS2j3Ra_eCSK6Qro8j4H573DLoPw8490RMIA8,24194
40
+ ominfra/deploy/poly/_main.py,sha256=N-ajqT7UPdacBbFViTyzDkbM8y2kyH4lzGKsV4lN7uo,24198
39
41
  ominfra/deploy/poly/base.py,sha256=Bd-CzUTaDvTRbdXKiTxMxs77WCEXItwNoBYCRnTk1u4,4167
40
42
  ominfra/deploy/poly/configs.py,sha256=9bzWdbxhOk_Q4KokDjmRz254KHnUU71Vl1frLlhQyU4,584
41
43
  ominfra/deploy/poly/deploy.py,sha256=tMYKslXLjstcv86siRt5j37USsS0Wd6lsfeGRE26zio,544
@@ -54,35 +56,35 @@ ominfra/journald/tailer.py,sha256=5abcFMfgi7fnY9ZEQe2ZVobaJxjQkeu6d9Kagw33a1w,33
54
56
  ominfra/manage/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
57
  ominfra/manage/manage.py,sha256=BttL8LFEknHZE_h2Pt5dAqbfUkv6qy43WI0raXBZ1a8,151
56
58
  ominfra/pyremote/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
57
- ominfra/pyremote/_runcommands.py,sha256=XWBVKBE6QOlzue9lu5_wFdySt_pJ1sOj7N9KTjawW-A,28047
59
+ ominfra/pyremote/_runcommands.py,sha256=g-e_vL1if4ls-GokLpUdAlwLQFTEwtJwsc31Vgb1uw8,28051
58
60
  ominfra/pyremote/bootstrap.py,sha256=RvMO3YGaN1E4sgUi1JEtiPak8cjvqtc_vRCq1yqbeZg,3370
59
61
  ominfra/pyremote/runcommands.py,sha256=bviS0_TDIoZVAe4h-_iavbvJtVSFu8lnk7fQ5iasCWE,1571
60
62
  ominfra/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
61
- ominfra/scripts/journald2aws.py,sha256=0HHYi_uBV1t2KefVrExs3IZ6Zy-mQa7xN_ka9W9Obb8,94910
62
- ominfra/scripts/supervisor.py,sha256=31TqubTvWBa3FXNew9hM4aDUqyw_5Pl4DmJZOckiIGc,121636
63
+ ominfra/scripts/journald2aws.py,sha256=PAhsys4Ya_FERZ6fcYWJDqTNB2PNfE-dpZwaq4tw2nI,128130
64
+ ominfra/scripts/supervisor.py,sha256=zk8oHSBar7wCW890NBELECAZxQ5oYhRjHvqhYnSxBmM,172749
63
65
  ominfra/supervisor/__init__.py,sha256=Y3l4WY4JRi2uLG6kgbGp93fuGfkxkKwZDvhsa0Rwgtk,15
64
66
  ominfra/supervisor/__main__.py,sha256=I0yFw-C08OOiZ3BF6lF1Oiv789EQXu-_j6whDhQUTEA,66
65
- ominfra/supervisor/compat.py,sha256=Y1d_pk4eN18AbVYjDHAXMMnPwOKTFpc7JDb1uClYMsQ,5064
66
- ominfra/supervisor/configs.py,sha256=KpibZJ-V-4UpoJM2fnjXOXJLvDbwRJzNLXLGESUljV4,2966
67
- ominfra/supervisor/context.py,sha256=Fg_wD6oUR8vxe3JMM14mR-5ssIrTNwxRr-AXfoGbzJQ,15795
67
+ ominfra/supervisor/compat.py,sha256=mutfnQbSCDaE7TSuQArOcFdfGVw4uEE_E04rIhs1IqU,5312
68
+ ominfra/supervisor/configs.py,sha256=TtVyWdxinrd3tueM6q8j2YVcEqauFTJqlbykkSjByEo,3524
69
+ ominfra/supervisor/context.py,sha256=Ss4xqnLe8LafIcg10oSr8KyqAJZxjofgNDB_pVoyBA8,16164
68
70
  ominfra/supervisor/datatypes.py,sha256=UnXO_UlCyJD9u0uvea1wvnk_UZCzxNMeFvPK83gv530,4432
69
- ominfra/supervisor/dispatchers.py,sha256=Xoor4MrYSV4sKUsXKe8RLWsz2tSc2v4T2OWPw2qXizc,10390
70
- ominfra/supervisor/events.py,sha256=OGGCuf1RWobFPDecksHJO3I4GmzBPgFvPce4-DOaZ3s,7729
71
+ ominfra/supervisor/dispatchers.py,sha256=sJ61yTo9EEbxHwe2NbzOTAFFFCuuyIhYli_xJioQBoo,10423
72
+ ominfra/supervisor/events.py,sha256=IhdL7Fj-hEvTvZ5WF6aIa2YjSPQhuUoasoJSMmRLQkU,7650
71
73
  ominfra/supervisor/exceptions.py,sha256=Qbu211H3CLlSmi9LsSikOwrcL5HgJP9ugvcKWlGTAoI,750
72
- ominfra/supervisor/main.py,sha256=0bj_9AzIfDlB1BB8zcX9npQIknamN7FGoVEYgLMLuP0,1701
74
+ ominfra/supervisor/main.py,sha256=v3Ezr7ECzqYX33HmhWbZrh78-h-1OnnxqPqmjS4zkFw,4000
73
75
  ominfra/supervisor/poller.py,sha256=VCBxLItfA4Vj69jet2XFbFScPbmdD9JA1evaofk_AnY,7709
74
- ominfra/supervisor/process.py,sha256=6Ut2QFRnCS4NP6xje-KiR519gNxGA6uNzoyiZjhrOM8,31373
76
+ ominfra/supervisor/process.py,sha256=oEd58g6KcLNWhwaZBu8wZJjb6Vd0t1sKPMVE4BjbBSI,32270
75
77
  ominfra/supervisor/states.py,sha256=JMxXYTZhJkMNQZ2tTV6wId7wrvnWgiZteskACprKskM,1374
76
- ominfra/supervisor/supervisor.py,sha256=CfYK3fgwNA-Z5g7ZeWxf1yg6ubVCe0rl_AXELu85o34,12198
77
- ominfra/supervisor/types.py,sha256=ec62QG0CDJc0XNxCnf3lXxhsxrr4CCScLPI-1SpQjlc,1141
78
+ ominfra/supervisor/supervisor.py,sha256=7T7RAtGc9qsb0s9fBiimReVyaWknl1e3rGU9nNxjll8,13002
79
+ ominfra/supervisor/types.py,sha256=GhVixieeJ00fWclxPLM2oMugCe9wEjW44wJzQ2AO0V0,1264
78
80
  ominfra/tailscale/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
79
81
  ominfra/tailscale/api.py,sha256=C5-t_b6jZXUWcy5k8bXm7CFnk73pSdrlMOgGDeGVrpw,1370
80
82
  ominfra/tailscale/cli.py,sha256=DSGp4hn5xwOW-l_u_InKlSF6kIobxtUtVssf_73STs0,3567
81
83
  ominfra/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
84
  ominfra/tools/listresources.py,sha256=4qVg5txsb10EHhvqXXeM6gJ2jx9LbroEnPydDv1uXs0,6176
83
- ominfra-0.0.0.dev119.dist-info/LICENSE,sha256=B_hVtavaA8zCYDW99DYdcpDLKz1n3BBRjZrcbv8uG8c,1451
84
- ominfra-0.0.0.dev119.dist-info/METADATA,sha256=XLOYfTWCuS2Ny4MBd0vDgy6hmHZw1WupGdpxfKBrWVQ,742
85
- ominfra-0.0.0.dev119.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
86
- ominfra-0.0.0.dev119.dist-info/entry_points.txt,sha256=kgecQ2MgGrM9qK744BoKS3tMesaC3yjLnl9pa5CRczg,37
87
- ominfra-0.0.0.dev119.dist-info/top_level.txt,sha256=E-b2OHkk_AOBLXHYZQ2EOFKl-_6uOGd8EjeG-Zy6h_w,8
88
- ominfra-0.0.0.dev119.dist-info/RECORD,,
85
+ ominfra-0.0.0.dev121.dist-info/LICENSE,sha256=B_hVtavaA8zCYDW99DYdcpDLKz1n3BBRjZrcbv8uG8c,1451
86
+ ominfra-0.0.0.dev121.dist-info/METADATA,sha256=SBtjNf2M-A5Y1F8QRg2785aRubDRD_ff15vrWTLA90E,742
87
+ ominfra-0.0.0.dev121.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
88
+ ominfra-0.0.0.dev121.dist-info/entry_points.txt,sha256=kgecQ2MgGrM9qK744BoKS3tMesaC3yjLnl9pa5CRczg,37
89
+ ominfra-0.0.0.dev121.dist-info/top_level.txt,sha256=E-b2OHkk_AOBLXHYZQ2EOFKl-_6uOGd8EjeG-Zy6h_w,8
90
+ ominfra-0.0.0.dev121.dist-info/RECORD,,