ominfra 0.0.0.dev156__py3-none-any.whl → 0.0.0.dev157__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- ominfra/manage/main.py +33 -4
- ominfra/manage/remote/spawning.py +1 -6
- ominfra/scripts/journald2aws.py +3 -2
- ominfra/scripts/manage.py +946 -11
- ominfra/scripts/supervisor.py +44 -31
- ominfra/supervisor/configs.py +2 -0
- ominfra/supervisor/supervisor.py +2 -33
- ominfra/supervisor/utils/os.py +41 -0
- {ominfra-0.0.0.dev156.dist-info → ominfra-0.0.0.dev157.dist-info}/METADATA +3 -3
- {ominfra-0.0.0.dev156.dist-info → ominfra-0.0.0.dev157.dist-info}/RECORD +14 -14
- {ominfra-0.0.0.dev156.dist-info → ominfra-0.0.0.dev157.dist-info}/LICENSE +0 -0
- {ominfra-0.0.0.dev156.dist-info → ominfra-0.0.0.dev157.dist-info}/WHEEL +0 -0
- {ominfra-0.0.0.dev156.dist-info → ominfra-0.0.0.dev157.dist-info}/entry_points.txt +0 -0
- {ominfra-0.0.0.dev156.dist-info → ominfra-0.0.0.dev157.dist-info}/top_level.txt +0 -0
ominfra/scripts/supervisor.py
CHANGED
@@ -1697,8 +1697,8 @@ class _CachedNullary(_AbstractCachedNullary):
|
|
1697
1697
|
return self._value
|
1698
1698
|
|
1699
1699
|
|
1700
|
-
def cached_nullary(fn
|
1701
|
-
return _CachedNullary(fn)
|
1700
|
+
def cached_nullary(fn: CallableT) -> CallableT:
|
1701
|
+
return _CachedNullary(fn) # type: ignore
|
1702
1702
|
|
1703
1703
|
|
1704
1704
|
def static_init(fn: CallableT) -> CallableT:
|
@@ -2858,6 +2858,44 @@ def decode_wait_status(sts: int) -> ta.Tuple[Rc, str]:
|
|
2858
2858
|
return Rc(-1), msg
|
2859
2859
|
|
2860
2860
|
|
2861
|
+
##
|
2862
|
+
|
2863
|
+
|
2864
|
+
class WaitedPid(ta.NamedTuple):
|
2865
|
+
pid: Pid
|
2866
|
+
sts: Rc
|
2867
|
+
|
2868
|
+
|
2869
|
+
def waitpid(
|
2870
|
+
*,
|
2871
|
+
log: ta.Optional[logging.Logger] = None,
|
2872
|
+
) -> ta.Optional[WaitedPid]:
|
2873
|
+
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
2874
|
+
# still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
|
2875
|
+
# waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
|
2876
|
+
# normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
|
2877
|
+
# call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
|
2878
|
+
# lying around.
|
2879
|
+
try:
|
2880
|
+
pid, sts = os.waitpid(-1, os.WNOHANG)
|
2881
|
+
|
2882
|
+
except OSError as exc:
|
2883
|
+
code = exc.args[0]
|
2884
|
+
|
2885
|
+
if code not in (errno.ECHILD, errno.EINTR):
|
2886
|
+
if log is not None:
|
2887
|
+
log.critical('waitpid error %r; a process may not be cleaned up properly', code)
|
2888
|
+
|
2889
|
+
if code == errno.EINTR:
|
2890
|
+
if log is not None:
|
2891
|
+
log.debug('EINTR during reap')
|
2892
|
+
|
2893
|
+
return None
|
2894
|
+
|
2895
|
+
else:
|
2896
|
+
return WaitedPid(pid, sts) # type: ignore
|
2897
|
+
|
2898
|
+
|
2861
2899
|
########################################
|
2862
2900
|
# ../utils/users.py
|
2863
2901
|
|
@@ -4971,6 +5009,7 @@ TODO:
|
|
4971
5009
|
- pickle stdlib objs? have to pin to 3.8 pickle protocol, will be cross-version
|
4972
5010
|
- namedtuple
|
4973
5011
|
- literals
|
5012
|
+
- newtypes?
|
4974
5013
|
"""
|
4975
5014
|
|
4976
5015
|
|
@@ -6049,6 +6088,8 @@ class ServerConfig:
|
|
6049
6088
|
|
6050
6089
|
groups: ta.Optional[ta.Sequence[ProcessGroupConfig]] = None
|
6051
6090
|
|
6091
|
+
group_config_dirs: ta.Optional[ta.Sequence[str]] = None
|
6092
|
+
|
6052
6093
|
@classmethod
|
6053
6094
|
def new(
|
6054
6095
|
cls,
|
@@ -9054,7 +9095,7 @@ class Supervisor:
|
|
9054
9095
|
if depth >= 100:
|
9055
9096
|
return
|
9056
9097
|
|
9057
|
-
wp = waitpid()
|
9098
|
+
wp = waitpid(log=log)
|
9058
9099
|
|
9059
9100
|
if wp is None or not wp.pid:
|
9060
9101
|
return
|
@@ -9095,34 +9136,6 @@ class Supervisor:
|
|
9095
9136
|
self._event_callbacks.notify(event(this_tick, self))
|
9096
9137
|
|
9097
9138
|
|
9098
|
-
##
|
9099
|
-
|
9100
|
-
|
9101
|
-
class WaitedPid(ta.NamedTuple):
|
9102
|
-
pid: Pid
|
9103
|
-
sts: Rc
|
9104
|
-
|
9105
|
-
|
9106
|
-
def waitpid() -> ta.Optional[WaitedPid]:
|
9107
|
-
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
9108
|
-
# still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
|
9109
|
-
# waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
|
9110
|
-
# normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
|
9111
|
-
# call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
|
9112
|
-
# lying around.
|
9113
|
-
try:
|
9114
|
-
pid, sts = os.waitpid(-1, os.WNOHANG)
|
9115
|
-
except OSError as exc:
|
9116
|
-
code = exc.args[0]
|
9117
|
-
if code not in (errno.ECHILD, errno.EINTR):
|
9118
|
-
log.critical('waitpid error %r; a process may not be cleaned up properly', code)
|
9119
|
-
if code == errno.EINTR:
|
9120
|
-
log.debug('EINTR during reap')
|
9121
|
-
return None
|
9122
|
-
else:
|
9123
|
-
return WaitedPid(pid, sts) # type: ignore
|
9124
|
-
|
9125
|
-
|
9126
9139
|
########################################
|
9127
9140
|
# ../inject.py
|
9128
9141
|
|
ominfra/supervisor/configs.py
CHANGED
ominfra/supervisor/supervisor.py
CHANGED
@@ -1,6 +1,4 @@
|
|
1
1
|
# ruff: noqa: UP006 UP007
|
2
|
-
import errno
|
3
|
-
import os
|
4
2
|
import time
|
5
3
|
import typing as ta
|
6
4
|
|
@@ -26,8 +24,7 @@ from .types import ExitNow
|
|
26
24
|
from .types import Process
|
27
25
|
from .types import SupervisorStateManager
|
28
26
|
from .utils.os import decode_wait_status
|
29
|
-
from .utils.
|
30
|
-
from .utils.ostypes import Rc
|
27
|
+
from .utils.os import waitpid
|
31
28
|
|
32
29
|
|
33
30
|
##
|
@@ -232,7 +229,7 @@ class Supervisor:
|
|
232
229
|
if depth >= 100:
|
233
230
|
return
|
234
231
|
|
235
|
-
wp = waitpid()
|
232
|
+
wp = waitpid(log=log)
|
236
233
|
|
237
234
|
if wp is None or not wp.pid:
|
238
235
|
return
|
@@ -271,31 +268,3 @@ class Supervisor:
|
|
271
268
|
if this_tick != last_tick:
|
272
269
|
self._ticks[period] = this_tick
|
273
270
|
self._event_callbacks.notify(event(this_tick, self))
|
274
|
-
|
275
|
-
|
276
|
-
##
|
277
|
-
|
278
|
-
|
279
|
-
class WaitedPid(ta.NamedTuple):
|
280
|
-
pid: Pid
|
281
|
-
sts: Rc
|
282
|
-
|
283
|
-
|
284
|
-
def waitpid() -> ta.Optional[WaitedPid]:
|
285
|
-
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
286
|
-
# still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
|
287
|
-
# waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
|
288
|
-
# normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
|
289
|
-
# call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
|
290
|
-
# lying around.
|
291
|
-
try:
|
292
|
-
pid, sts = os.waitpid(-1, os.WNOHANG)
|
293
|
-
except OSError as exc:
|
294
|
-
code = exc.args[0]
|
295
|
-
if code not in (errno.ECHILD, errno.EINTR):
|
296
|
-
log.critical('waitpid error %r; a process may not be cleaned up properly', code)
|
297
|
-
if code == errno.EINTR:
|
298
|
-
log.debug('EINTR during reap')
|
299
|
-
return None
|
300
|
-
else:
|
301
|
-
return WaitedPid(pid, sts) # type: ignore
|
ominfra/supervisor/utils/os.py
CHANGED
@@ -1,7 +1,10 @@
|
|
1
1
|
# ruff: noqa: UP006 UP007
|
2
|
+
import errno
|
3
|
+
import logging
|
2
4
|
import os
|
3
5
|
import typing as ta
|
4
6
|
|
7
|
+
from .ostypes import Pid
|
5
8
|
from .ostypes import Rc
|
6
9
|
from .signals import sig_name
|
7
10
|
|
@@ -43,3 +46,41 @@ def decode_wait_status(sts: int) -> ta.Tuple[Rc, str]:
|
|
43
46
|
else:
|
44
47
|
msg = 'unknown termination cause 0x%04x' % sts # noqa
|
45
48
|
return Rc(-1), msg
|
49
|
+
|
50
|
+
|
51
|
+
##
|
52
|
+
|
53
|
+
|
54
|
+
class WaitedPid(ta.NamedTuple):
|
55
|
+
pid: Pid
|
56
|
+
sts: Rc
|
57
|
+
|
58
|
+
|
59
|
+
def waitpid(
|
60
|
+
*,
|
61
|
+
log: ta.Optional[logging.Logger] = None,
|
62
|
+
) -> ta.Optional[WaitedPid]:
|
63
|
+
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
64
|
+
# still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
|
65
|
+
# waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
|
66
|
+
# normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
|
67
|
+
# call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
|
68
|
+
# lying around.
|
69
|
+
try:
|
70
|
+
pid, sts = os.waitpid(-1, os.WNOHANG)
|
71
|
+
|
72
|
+
except OSError as exc:
|
73
|
+
code = exc.args[0]
|
74
|
+
|
75
|
+
if code not in (errno.ECHILD, errno.EINTR):
|
76
|
+
if log is not None:
|
77
|
+
log.critical('waitpid error %r; a process may not be cleaned up properly', code)
|
78
|
+
|
79
|
+
if code == errno.EINTR:
|
80
|
+
if log is not None:
|
81
|
+
log.debug('EINTR during reap')
|
82
|
+
|
83
|
+
return None
|
84
|
+
|
85
|
+
else:
|
86
|
+
return WaitedPid(pid, sts) # type: ignore
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ominfra
|
3
|
-
Version: 0.0.0.
|
3
|
+
Version: 0.0.0.dev157
|
4
4
|
Summary: ominfra
|
5
5
|
Author: wrmsr
|
6
6
|
License: BSD-3-Clause
|
@@ -12,8 +12,8 @@ Classifier: Operating System :: OS Independent
|
|
12
12
|
Classifier: Operating System :: POSIX
|
13
13
|
Requires-Python: >=3.12
|
14
14
|
License-File: LICENSE
|
15
|
-
Requires-Dist: omdev==0.0.0.
|
16
|
-
Requires-Dist: omlish==0.0.0.
|
15
|
+
Requires-Dist: omdev==0.0.0.dev157
|
16
|
+
Requires-Dist: omlish==0.0.0.dev157
|
17
17
|
Provides-Extra: all
|
18
18
|
Requires-Dist: paramiko~=3.5; extra == "all"
|
19
19
|
Requires-Dist: asyncssh~=2.18; extra == "all"
|
@@ -33,7 +33,7 @@ ominfra/manage/bootstrap.py,sha256=1RIRhVkUZjxZcZerHMg8U6xgWhhemGgPN5cDye8dQ68,4
|
|
33
33
|
ominfra/manage/bootstrap_.py,sha256=CYO8HfuVsw02PiVMMnyabL5jqJhexeP0b5mWz8eOtoA,652
|
34
34
|
ominfra/manage/config.py,sha256=1y2N_8nXHBZc6YbW6BaRZoDDCTBmiHuWtTOQ7zdr5VE,184
|
35
35
|
ominfra/manage/inject.py,sha256=_FVaMZUBKi-oObv14H77luWYCodxNJJD1t4pNQzckFE,2030
|
36
|
-
ominfra/manage/main.py,sha256=
|
36
|
+
ominfra/manage/main.py,sha256=xc8iTMNCvdS_leKA-5vodEj_Dc-V8pThpPxcewrROns,4287
|
37
37
|
ominfra/manage/marshal.py,sha256=WKj7IU9bo4fBMSSzT6ZMm_WFalXIJZ-V7j8oi92fNhk,305
|
38
38
|
ominfra/manage/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
39
39
|
ominfra/manage/commands/base.py,sha256=LtaI0AgnplttQK7gNncNItq8yuTZQimJTaprVpZktI8,3993
|
@@ -61,7 +61,7 @@ ominfra/manage/remote/connection.py,sha256=T4fL-GXXAfnbCbkZ3_28t8LAwwki4Td3j41eY
|
|
61
61
|
ominfra/manage/remote/execution.py,sha256=_bygZi_0Uel615uIg43S14CTdmv1unEIu9TPz2mVRJ4,11738
|
62
62
|
ominfra/manage/remote/inject.py,sha256=nSNP_VInCCZOWVrUIRHBvLmnM45geFoYmMh-zqc__as,1080
|
63
63
|
ominfra/manage/remote/payload.py,sha256=Rn-Yo26POpHEOOfUHX3jWkqcQVEAvkJ_5Bu13jwoob4,944
|
64
|
-
ominfra/manage/remote/spawning.py,sha256=
|
64
|
+
ominfra/manage/remote/spawning.py,sha256=W47kS07MKPYTqh6SgiPd1rRq8rPDjgr5ZinSnrWTuJs,3035
|
65
65
|
ominfra/manage/system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
66
66
|
ominfra/manage/system/commands.py,sha256=XrYvsxiwTJh17buIWmoFGH8zTUIXmrXvYkLy1INtmkU,1173
|
67
67
|
ominfra/manage/system/config.py,sha256=mEVBL1cy4twO6F0bdnCI01Sm0xuLe1Z5eiAzCvbmoAc,196
|
@@ -73,13 +73,13 @@ ominfra/manage/targets/connection.py,sha256=j2QrVS-QFOZJ47TqwaMt8MSPg0whokysGePa
|
|
73
73
|
ominfra/manage/targets/inject.py,sha256=P4597xWM-V3I_gCt2O71OLhYQkkXtuJvkYRsIbhhMcE,1561
|
74
74
|
ominfra/manage/targets/targets.py,sha256=CFl8Uirgn3gfowO1Fn-LBK-6qYqEMFJ9snPUl0gCRuM,1753
|
75
75
|
ominfra/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
76
|
-
ominfra/scripts/journald2aws.py,sha256=
|
77
|
-
ominfra/scripts/manage.py,sha256=
|
78
|
-
ominfra/scripts/supervisor.py,sha256=
|
76
|
+
ominfra/scripts/journald2aws.py,sha256=nR30v1coSLvVdgTgZDmw-oyTAtACsWUFIReLIoSmw60,151700
|
77
|
+
ominfra/scripts/manage.py,sha256=djPW77QZ9tiNLYpYuzxshWEVsV7ArRTfDHc_ccujH-w,282516
|
78
|
+
ominfra/scripts/supervisor.py,sha256=VvL_E4U_gw1YtwwjJAJHFhz7f8UJ3n4dnJDxHl0cxqY,272610
|
79
79
|
ominfra/supervisor/LICENSE.txt,sha256=yvqaMNsDhWxziHa9ien6qCW1SkZv-DQlAg96XjfSee8,1746
|
80
80
|
ominfra/supervisor/__init__.py,sha256=Y3l4WY4JRi2uLG6kgbGp93fuGfkxkKwZDvhsa0Rwgtk,15
|
81
81
|
ominfra/supervisor/__main__.py,sha256=I0yFw-C08OOiZ3BF6lF1Oiv789EQXu-_j6whDhQUTEA,66
|
82
|
-
ominfra/supervisor/configs.py,sha256=
|
82
|
+
ominfra/supervisor/configs.py,sha256=InaLW0T93dbAuzKyc6H8wGIFR2oM1GROSiQX9hZY_ko,13711
|
83
83
|
ominfra/supervisor/dispatchers.py,sha256=zXLwQS4Vc6dWw5o9QOL04UMDt7w6CKu9wf19CjUiS2Q,1005
|
84
84
|
ominfra/supervisor/dispatchersimpl.py,sha256=q3dEyOHWTPKm28nmAGisjgIW1BX6O3-SzbYa7nWuTEs,11349
|
85
85
|
ominfra/supervisor/events.py,sha256=XGrtzHr1xm0dwjz329fn9eR0_Ap-LQL6Sk8LJ8eVDEo,6692
|
@@ -100,14 +100,14 @@ ominfra/supervisor/signals.py,sha256=jY52naUifcAjd6nICTP1ZW3IQSPsHB4cvbsJo8_QV_U
|
|
100
100
|
ominfra/supervisor/spawning.py,sha256=i1k3tmqWyU-KIN7kel-JVxTVGnLiTIVmZzlstJSZpjM,622
|
101
101
|
ominfra/supervisor/spawningimpl.py,sha256=rWlL-yumu5I1QGhN08XWHVi5HlNPMPkt08D4jEMb_wU,11104
|
102
102
|
ominfra/supervisor/states.py,sha256=x1trJQbItkSegOmotpn5YNcZMLbBL8I3GmhvFCQl4Oo,1400
|
103
|
-
ominfra/supervisor/supervisor.py,sha256=
|
103
|
+
ominfra/supervisor/supervisor.py,sha256=XsQrWjMsv0o0CzI5nZoGybeUnSDouc8X7jSLr920_eQ,8169
|
104
104
|
ominfra/supervisor/types.py,sha256=g6C-NuPWMy7pk-kcnT3mO7auLWElZU_4dhzrUaX--TA,4015
|
105
105
|
ominfra/supervisor/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
106
106
|
ominfra/supervisor/utils/collections.py,sha256=vcfmVYS4QngMdtEI1DvdRIcubmy55Wj40NCzW27_rIY,1361
|
107
107
|
ominfra/supervisor/utils/diag.py,sha256=ujz4gkW7p3wmbaKFM8Hz5eHEwpoUkbB8JeDvcHilCz0,705
|
108
108
|
ominfra/supervisor/utils/fds.py,sha256=lz8DWXzGYvu93dqhWK0WrhXrrJVQ_psoom4Nj_o8g2g,849
|
109
109
|
ominfra/supervisor/utils/fs.py,sha256=ABbNcsCpzSXAvq_ZZSCj61mj5kGnVuC4spUmoWenlqw,1155
|
110
|
-
ominfra/supervisor/utils/os.py,sha256=
|
110
|
+
ominfra/supervisor/utils/os.py,sha256=S-y50uhwUhYklIkYRXTHiejnWj_wtofplaOvFqwS0iM,2399
|
111
111
|
ominfra/supervisor/utils/ostypes.py,sha256=B7VjwbzVesz9we9MztoSk8bH8sTxMIWtILy_Qde0G7w,164
|
112
112
|
ominfra/supervisor/utils/signals.py,sha256=uZkTvissbtq7TlJD4MkTiL3F-zyWmAFUuWQtFjsf0MI,1474
|
113
113
|
ominfra/supervisor/utils/strings.py,sha256=gZOYiFI3ZQEMrXq6VlK2WadK12JPO6zYjPenq_OPcYU,2475
|
@@ -117,9 +117,9 @@ ominfra/tailscale/api.py,sha256=C5-t_b6jZXUWcy5k8bXm7CFnk73pSdrlMOgGDeGVrpw,1370
|
|
117
117
|
ominfra/tailscale/cli.py,sha256=h6akQJMl0KuWLHS7Ur6WcBZ2JwF0DJQhsPTnFBdGyNk,3571
|
118
118
|
ominfra/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
119
119
|
ominfra/tools/listresources.py,sha256=4qVg5txsb10EHhvqXXeM6gJ2jx9LbroEnPydDv1uXs0,6176
|
120
|
-
ominfra-0.0.0.
|
121
|
-
ominfra-0.0.0.
|
122
|
-
ominfra-0.0.0.
|
123
|
-
ominfra-0.0.0.
|
124
|
-
ominfra-0.0.0.
|
125
|
-
ominfra-0.0.0.
|
120
|
+
ominfra-0.0.0.dev157.dist-info/LICENSE,sha256=B_hVtavaA8zCYDW99DYdcpDLKz1n3BBRjZrcbv8uG8c,1451
|
121
|
+
ominfra-0.0.0.dev157.dist-info/METADATA,sha256=5kfpmRratw9taQ6vywr5YKkCkOrSxqcXwrYvX0pieKU,731
|
122
|
+
ominfra-0.0.0.dev157.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
123
|
+
ominfra-0.0.0.dev157.dist-info/entry_points.txt,sha256=kgecQ2MgGrM9qK744BoKS3tMesaC3yjLnl9pa5CRczg,37
|
124
|
+
ominfra-0.0.0.dev157.dist-info/top_level.txt,sha256=E-b2OHkk_AOBLXHYZQ2EOFKl-_6uOGd8EjeG-Zy6h_w,8
|
125
|
+
ominfra-0.0.0.dev157.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|