ominfra 0.0.0.dev127__py3-none-any.whl → 0.0.0.dev129__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ominfra/deploy/_executor.py +24 -0
- ominfra/pyremote/_runcommands.py +24 -0
- ominfra/scripts/journald2aws.py +24 -0
- ominfra/scripts/supervisor.py +1320 -1225
- ominfra/supervisor/configs.py +34 -11
- ominfra/supervisor/dispatchers.py +7 -6
- ominfra/supervisor/dispatchersimpl.py +29 -22
- ominfra/supervisor/groups.py +1 -1
- ominfra/supervisor/groupsimpl.py +2 -2
- ominfra/supervisor/inject.py +22 -17
- ominfra/supervisor/io.py +82 -0
- ominfra/supervisor/main.py +6 -7
- ominfra/supervisor/pipes.py +15 -13
- ominfra/supervisor/poller.py +36 -35
- ominfra/supervisor/{processes.py → process.py} +2 -1
- ominfra/supervisor/{processesimpl.py → processimpl.py} +42 -54
- ominfra/supervisor/setup.py +1 -1
- ominfra/supervisor/setupimpl.py +4 -3
- ominfra/supervisor/signals.py +56 -50
- ominfra/supervisor/spawning.py +2 -1
- ominfra/supervisor/spawningimpl.py +24 -21
- ominfra/supervisor/supervisor.py +72 -134
- ominfra/supervisor/types.py +45 -34
- ominfra/supervisor/utils/__init__.py +0 -0
- ominfra/supervisor/utils/diag.py +31 -0
- ominfra/supervisor/utils/fds.py +46 -0
- ominfra/supervisor/utils/fs.py +47 -0
- ominfra/supervisor/utils/os.py +45 -0
- ominfra/supervisor/utils/ostypes.py +9 -0
- ominfra/supervisor/utils/signals.py +60 -0
- ominfra/supervisor/utils/strings.py +105 -0
- ominfra/supervisor/{users.py → utils/users.py} +11 -8
- {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev129.dist-info}/METADATA +3 -3
- {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev129.dist-info}/RECORD +39 -33
- ominfra/supervisor/context.py +0 -84
- ominfra/supervisor/datatypes.py +0 -113
- ominfra/supervisor/utils.py +0 -206
- /ominfra/supervisor/{collections.py → utils/collections.py} +0 -0
- {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev129.dist-info}/LICENSE +0 -0
- {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev129.dist-info}/WHEEL +0 -0
- {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev129.dist-info}/entry_points.txt +0 -0
- {ominfra-0.0.0.dev127.dist-info → ominfra-0.0.0.dev129.dist-info}/top_level.txt +0 -0
ominfra/scripts/supervisor.py
CHANGED
@@ -95,7 +95,7 @@ TomlParseFloat = ta.Callable[[str], ta.Any]
|
|
95
95
|
TomlKey = ta.Tuple[str, ...]
|
96
96
|
TomlPos = int # ta.TypeAlias
|
97
97
|
|
98
|
-
# ../collections.py
|
98
|
+
# ../utils/collections.py
|
99
99
|
K = ta.TypeVar('K')
|
100
100
|
V = ta.TypeVar('V')
|
101
101
|
|
@@ -952,168 +952,6 @@ def toml_make_safe_parse_float(parse_float: TomlParseFloat) -> TomlParseFloat:
|
|
952
952
|
return safe_parse_float
|
953
953
|
|
954
954
|
|
955
|
-
########################################
|
956
|
-
# ../collections.py
|
957
|
-
|
958
|
-
|
959
|
-
class KeyedCollectionAccessors(abc.ABC, ta.Generic[K, V]):
|
960
|
-
@property
|
961
|
-
@abc.abstractmethod
|
962
|
-
def _by_key(self) -> ta.Mapping[K, V]:
|
963
|
-
raise NotImplementedError
|
964
|
-
|
965
|
-
def __iter__(self) -> ta.Iterator[V]:
|
966
|
-
return iter(self._by_key.values())
|
967
|
-
|
968
|
-
def __len__(self) -> int:
|
969
|
-
return len(self._by_key)
|
970
|
-
|
971
|
-
def __contains__(self, key: K) -> bool:
|
972
|
-
return key in self._by_key
|
973
|
-
|
974
|
-
def __getitem__(self, key: K) -> V:
|
975
|
-
return self._by_key[key]
|
976
|
-
|
977
|
-
def get(self, key: K, default: ta.Optional[V] = None) -> ta.Optional[V]:
|
978
|
-
return self._by_key.get(key, default)
|
979
|
-
|
980
|
-
def items(self) -> ta.Iterator[ta.Tuple[K, V]]:
|
981
|
-
return iter(self._by_key.items())
|
982
|
-
|
983
|
-
|
984
|
-
class KeyedCollection(KeyedCollectionAccessors[K, V]):
|
985
|
-
def __init__(self, items: ta.Iterable[V]) -> None:
|
986
|
-
super().__init__()
|
987
|
-
|
988
|
-
by_key: ta.Dict[K, V] = {}
|
989
|
-
for v in items:
|
990
|
-
if (k := self._key(v)) in by_key:
|
991
|
-
raise KeyError(f'key {k} of {v} already registered by {by_key[k]}')
|
992
|
-
by_key[k] = v
|
993
|
-
self.__by_key = by_key
|
994
|
-
|
995
|
-
@property
|
996
|
-
def _by_key(self) -> ta.Mapping[K, V]:
|
997
|
-
return self.__by_key
|
998
|
-
|
999
|
-
@abc.abstractmethod
|
1000
|
-
def _key(self, v: V) -> K:
|
1001
|
-
raise NotImplementedError
|
1002
|
-
|
1003
|
-
|
1004
|
-
########################################
|
1005
|
-
# ../datatypes.py
|
1006
|
-
|
1007
|
-
|
1008
|
-
class Automatic:
|
1009
|
-
pass
|
1010
|
-
|
1011
|
-
|
1012
|
-
class Syslog:
|
1013
|
-
"""TODO deprecated; remove this special 'syslog' filename in the future"""
|
1014
|
-
|
1015
|
-
|
1016
|
-
LOGFILE_NONES = ('none', 'off', None)
|
1017
|
-
LOGFILE_AUTOS = (Automatic, 'auto')
|
1018
|
-
LOGFILE_SYSLOGS = (Syslog, 'syslog')
|
1019
|
-
|
1020
|
-
|
1021
|
-
def logfile_name(val):
|
1022
|
-
if hasattr(val, 'lower'):
|
1023
|
-
coerced = val.lower()
|
1024
|
-
else:
|
1025
|
-
coerced = val
|
1026
|
-
|
1027
|
-
if coerced in LOGFILE_NONES:
|
1028
|
-
return None
|
1029
|
-
elif coerced in LOGFILE_AUTOS:
|
1030
|
-
return Automatic
|
1031
|
-
elif coerced in LOGFILE_SYSLOGS:
|
1032
|
-
return Syslog
|
1033
|
-
else:
|
1034
|
-
return existing_dirpath(val)
|
1035
|
-
|
1036
|
-
|
1037
|
-
##
|
1038
|
-
|
1039
|
-
|
1040
|
-
def octal_type(arg: ta.Union[str, int]) -> int:
|
1041
|
-
if isinstance(arg, int):
|
1042
|
-
return arg
|
1043
|
-
try:
|
1044
|
-
return int(arg, 8)
|
1045
|
-
except (TypeError, ValueError):
|
1046
|
-
raise ValueError(f'{arg} can not be converted to an octal type') # noqa
|
1047
|
-
|
1048
|
-
|
1049
|
-
def existing_directory(v: str) -> str:
|
1050
|
-
nv = os.path.expanduser(v)
|
1051
|
-
if os.path.isdir(nv):
|
1052
|
-
return nv
|
1053
|
-
raise ValueError(f'{v} is not an existing directory')
|
1054
|
-
|
1055
|
-
|
1056
|
-
def existing_dirpath(v: str) -> str:
|
1057
|
-
nv = os.path.expanduser(v)
|
1058
|
-
dir = os.path.dirname(nv) # noqa
|
1059
|
-
if not dir:
|
1060
|
-
# relative pathname with no directory component
|
1061
|
-
return nv
|
1062
|
-
if os.path.isdir(dir):
|
1063
|
-
return nv
|
1064
|
-
raise ValueError(f'The directory named as part of the path {v} does not exist')
|
1065
|
-
|
1066
|
-
|
1067
|
-
def logging_level(value: ta.Union[str, int]) -> int:
|
1068
|
-
if isinstance(value, int):
|
1069
|
-
return value
|
1070
|
-
s = str(value).lower()
|
1071
|
-
level = logging.getLevelNamesMapping().get(s.upper())
|
1072
|
-
if level is None:
|
1073
|
-
raise ValueError(f'bad logging level name {value!r}')
|
1074
|
-
return level
|
1075
|
-
|
1076
|
-
|
1077
|
-
class SuffixMultiplier:
|
1078
|
-
# d is a dictionary of suffixes to integer multipliers. If no suffixes match, default is the multiplier. Matches
|
1079
|
-
# are case insensitive. Return values are in the fundamental unit.
|
1080
|
-
def __init__(self, d, default=1):
|
1081
|
-
super().__init__()
|
1082
|
-
self._d = d
|
1083
|
-
self._default = default
|
1084
|
-
# all keys must be the same size
|
1085
|
-
self._keysz = None
|
1086
|
-
for k in d:
|
1087
|
-
if self._keysz is None:
|
1088
|
-
self._keysz = len(k)
|
1089
|
-
elif self._keysz != len(k): # type: ignore
|
1090
|
-
raise ValueError(k)
|
1091
|
-
|
1092
|
-
def __call__(self, v: ta.Union[str, int]) -> int:
|
1093
|
-
if isinstance(v, int):
|
1094
|
-
return v
|
1095
|
-
v = v.lower()
|
1096
|
-
for s, m in self._d.items():
|
1097
|
-
if v[-self._keysz:] == s: # type: ignore
|
1098
|
-
return int(v[:-self._keysz]) * m # type: ignore
|
1099
|
-
return int(v) * self._default
|
1100
|
-
|
1101
|
-
|
1102
|
-
byte_size = SuffixMultiplier({
|
1103
|
-
'kb': 1024,
|
1104
|
-
'mb': 1024 * 1024,
|
1105
|
-
'gb': 1024 * 1024 * 1024,
|
1106
|
-
})
|
1107
|
-
|
1108
|
-
|
1109
|
-
class RestartWhenExitUnexpected:
|
1110
|
-
pass
|
1111
|
-
|
1112
|
-
|
1113
|
-
class RestartUnconditionally:
|
1114
|
-
pass
|
1115
|
-
|
1116
|
-
|
1117
955
|
########################################
|
1118
956
|
# ../exceptions.py
|
1119
957
|
|
@@ -1208,67 +1046,6 @@ def drop_privileges(user: ta.Union[int, str, None]) -> ta.Optional[str]:
|
|
1208
1046
|
return None
|
1209
1047
|
|
1210
1048
|
|
1211
|
-
########################################
|
1212
|
-
# ../signals.py
|
1213
|
-
|
1214
|
-
|
1215
|
-
##
|
1216
|
-
|
1217
|
-
|
1218
|
-
_SIGS_BY_NUM: ta.Mapping[int, signal.Signals] = {s.value: s for s in signal.Signals}
|
1219
|
-
_SIGS_BY_NAME: ta.Mapping[str, signal.Signals] = {s.name: s for s in signal.Signals}
|
1220
|
-
|
1221
|
-
|
1222
|
-
def sig_num(value: ta.Union[int, str]) -> int:
|
1223
|
-
try:
|
1224
|
-
num = int(value)
|
1225
|
-
|
1226
|
-
except (ValueError, TypeError):
|
1227
|
-
name = value.strip().upper() # type: ignore
|
1228
|
-
if not name.startswith('SIG'):
|
1229
|
-
name = f'SIG{name}'
|
1230
|
-
|
1231
|
-
if (sn := _SIGS_BY_NAME.get(name)) is None:
|
1232
|
-
raise ValueError(f'value {value!r} is not a valid signal name') # noqa
|
1233
|
-
num = sn
|
1234
|
-
|
1235
|
-
if num not in _SIGS_BY_NUM:
|
1236
|
-
raise ValueError(f'value {value!r} is not a valid signal number')
|
1237
|
-
|
1238
|
-
return num
|
1239
|
-
|
1240
|
-
|
1241
|
-
def sig_name(num: int) -> str:
|
1242
|
-
if (sig := _SIGS_BY_NUM.get(num)) is not None:
|
1243
|
-
return sig.name
|
1244
|
-
return f'signal {sig}'
|
1245
|
-
|
1246
|
-
|
1247
|
-
##
|
1248
|
-
|
1249
|
-
|
1250
|
-
class SignalReceiver:
|
1251
|
-
def __init__(self) -> None:
|
1252
|
-
super().__init__()
|
1253
|
-
|
1254
|
-
self._signals_recvd: ta.List[int] = []
|
1255
|
-
|
1256
|
-
def receive(self, sig: int, frame: ta.Any = None) -> None:
|
1257
|
-
if sig not in self._signals_recvd:
|
1258
|
-
self._signals_recvd.append(sig)
|
1259
|
-
|
1260
|
-
def install(self, *sigs: int) -> None:
|
1261
|
-
for sig in sigs:
|
1262
|
-
signal.signal(sig, self.receive)
|
1263
|
-
|
1264
|
-
def get_signal(self) -> ta.Optional[int]:
|
1265
|
-
if self._signals_recvd:
|
1266
|
-
sig = self._signals_recvd.pop(0)
|
1267
|
-
else:
|
1268
|
-
sig = None
|
1269
|
-
return sig
|
1270
|
-
|
1271
|
-
|
1272
1049
|
########################################
|
1273
1050
|
# ../states.py
|
1274
1051
|
|
@@ -1330,67 +1107,310 @@ class SupervisorState(enum.IntEnum):
|
|
1330
1107
|
|
1331
1108
|
|
1332
1109
|
########################################
|
1333
|
-
# ../
|
1110
|
+
# ../utils/collections.py
|
1334
1111
|
|
1335
1112
|
|
1336
|
-
|
1113
|
+
class KeyedCollectionAccessors(abc.ABC, ta.Generic[K, V]):
|
1114
|
+
@property
|
1115
|
+
@abc.abstractmethod
|
1116
|
+
def _by_key(self) -> ta.Mapping[K, V]:
|
1117
|
+
raise NotImplementedError
|
1337
1118
|
|
1119
|
+
def __iter__(self) -> ta.Iterator[V]:
|
1120
|
+
return iter(self._by_key.values())
|
1338
1121
|
|
1339
|
-
def
|
1340
|
-
|
1341
|
-
uid = int(name)
|
1342
|
-
except ValueError:
|
1343
|
-
try:
|
1344
|
-
pwdrec = pwd.getpwnam(name)
|
1345
|
-
except KeyError:
|
1346
|
-
raise ValueError(f'Invalid user name {name}') # noqa
|
1347
|
-
uid = pwdrec[2]
|
1348
|
-
else:
|
1349
|
-
try:
|
1350
|
-
pwd.getpwuid(uid) # check if uid is valid
|
1351
|
-
except KeyError:
|
1352
|
-
raise ValueError(f'Invalid user id {name}') # noqa
|
1353
|
-
return uid
|
1122
|
+
def __len__(self) -> int:
|
1123
|
+
return len(self._by_key)
|
1354
1124
|
|
1125
|
+
def __contains__(self, key: K) -> bool:
|
1126
|
+
return key in self._by_key
|
1355
1127
|
|
1356
|
-
def
|
1357
|
-
|
1358
|
-
gid = int(name)
|
1359
|
-
except ValueError:
|
1360
|
-
try:
|
1361
|
-
grprec = grp.getgrnam(name)
|
1362
|
-
except KeyError:
|
1363
|
-
raise ValueError(f'Invalid group name {name}') # noqa
|
1364
|
-
gid = grprec[2]
|
1365
|
-
else:
|
1366
|
-
try:
|
1367
|
-
grp.getgrgid(gid) # check if gid is valid
|
1368
|
-
except KeyError:
|
1369
|
-
raise ValueError(f'Invalid group id {name}') # noqa
|
1370
|
-
return gid
|
1128
|
+
def __getitem__(self, key: K) -> V:
|
1129
|
+
return self._by_key[key]
|
1371
1130
|
|
1131
|
+
def get(self, key: K, default: ta.Optional[V] = None) -> ta.Optional[V]:
|
1132
|
+
return self._by_key.get(key, default)
|
1372
1133
|
|
1373
|
-
def
|
1374
|
-
|
1375
|
-
return pwrec[3]
|
1134
|
+
def items(self) -> ta.Iterator[ta.Tuple[K, V]]:
|
1135
|
+
return iter(self._by_key.items())
|
1376
1136
|
|
1377
1137
|
|
1378
|
-
|
1138
|
+
class KeyedCollection(KeyedCollectionAccessors[K, V]):
|
1139
|
+
def __init__(self, items: ta.Iterable[V]) -> None:
|
1140
|
+
super().__init__()
|
1379
1141
|
|
1142
|
+
by_key: ta.Dict[K, V] = {}
|
1143
|
+
for v in items:
|
1144
|
+
if (k := self._key(v)) in by_key:
|
1145
|
+
raise KeyError(f'key {k} of {v} already registered by {by_key[k]}')
|
1146
|
+
by_key[k] = v
|
1147
|
+
self.__by_key = by_key
|
1380
1148
|
|
1381
|
-
@
|
1382
|
-
|
1383
|
-
|
1384
|
-
uid: int
|
1385
|
-
gid: int
|
1149
|
+
@property
|
1150
|
+
def _by_key(self) -> ta.Mapping[K, V]:
|
1151
|
+
return self.__by_key
|
1386
1152
|
|
1153
|
+
@abc.abstractmethod
|
1154
|
+
def _key(self, v: V) -> K:
|
1155
|
+
raise NotImplementedError
|
1156
|
+
|
1157
|
+
|
1158
|
+
########################################
|
1159
|
+
# ../utils/diag.py
|
1160
|
+
|
1161
|
+
|
1162
|
+
def compact_traceback() -> ta.Tuple[
|
1163
|
+
ta.Tuple[str, str, int],
|
1164
|
+
ta.Type[BaseException],
|
1165
|
+
BaseException,
|
1166
|
+
types.TracebackType,
|
1167
|
+
]:
|
1168
|
+
t, v, tb = sys.exc_info()
|
1169
|
+
if not tb:
|
1170
|
+
raise RuntimeError('No traceback')
|
1171
|
+
|
1172
|
+
tbinfo = []
|
1173
|
+
while tb:
|
1174
|
+
tbinfo.append((
|
1175
|
+
tb.tb_frame.f_code.co_filename,
|
1176
|
+
tb.tb_frame.f_code.co_name,
|
1177
|
+
str(tb.tb_lineno),
|
1178
|
+
))
|
1179
|
+
tb = tb.tb_next
|
1180
|
+
|
1181
|
+
# just to be safe
|
1182
|
+
del tb
|
1183
|
+
|
1184
|
+
file, function, line = tbinfo[-1]
|
1185
|
+
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo]) # noqa
|
1186
|
+
return (file, function, line), t, v, info # type: ignore
|
1187
|
+
|
1188
|
+
|
1189
|
+
########################################
|
1190
|
+
# ../utils/fs.py
|
1191
|
+
|
1192
|
+
|
1193
|
+
def try_unlink(path: str) -> bool:
|
1194
|
+
try:
|
1195
|
+
os.unlink(path)
|
1196
|
+
except OSError:
|
1197
|
+
return False
|
1198
|
+
return True
|
1199
|
+
|
1200
|
+
|
1201
|
+
def mktempfile(suffix: str, prefix: str, dir: str) -> str: # noqa
|
1202
|
+
fd, filename = tempfile.mkstemp(suffix, prefix, dir)
|
1203
|
+
os.close(fd)
|
1204
|
+
return filename
|
1205
|
+
|
1206
|
+
|
1207
|
+
def get_path() -> ta.Sequence[str]:
|
1208
|
+
"""Return a list corresponding to $PATH, or a default."""
|
1209
|
+
|
1210
|
+
path = ['/bin', '/usr/bin', '/usr/local/bin']
|
1211
|
+
if 'PATH' in os.environ:
|
1212
|
+
p = os.environ['PATH']
|
1213
|
+
if p:
|
1214
|
+
path = p.split(os.pathsep)
|
1215
|
+
return path
|
1216
|
+
|
1217
|
+
|
1218
|
+
def check_existing_dir(v: str) -> str:
|
1219
|
+
nv = os.path.expanduser(v)
|
1220
|
+
if os.path.isdir(nv):
|
1221
|
+
return nv
|
1222
|
+
raise ValueError(f'{v} is not an existing directory')
|
1223
|
+
|
1224
|
+
|
1225
|
+
def check_path_with_existing_dir(v: str) -> str:
|
1226
|
+
nv = os.path.expanduser(v)
|
1227
|
+
dir = os.path.dirname(nv) # noqa
|
1228
|
+
if not dir:
|
1229
|
+
# relative pathname with no directory component
|
1230
|
+
return nv
|
1231
|
+
if os.path.isdir(dir):
|
1232
|
+
return nv
|
1233
|
+
raise ValueError(f'The directory named as part of the path {v} does not exist')
|
1234
|
+
|
1235
|
+
|
1236
|
+
########################################
|
1237
|
+
# ../utils/ostypes.py
|
1238
|
+
|
1239
|
+
|
1240
|
+
Fd = ta.NewType('Fd', int)
|
1241
|
+
Pid = ta.NewType('Pid', int)
|
1242
|
+
Rc = ta.NewType('Rc', int)
|
1243
|
+
|
1244
|
+
Uid = ta.NewType('Uid', int)
|
1245
|
+
Gid = ta.NewType('Gid', int)
|
1246
|
+
|
1247
|
+
|
1248
|
+
########################################
|
1249
|
+
# ../utils/signals.py
|
1250
|
+
|
1251
|
+
|
1252
|
+
##
|
1253
|
+
|
1254
|
+
|
1255
|
+
_SIGS_BY_NUM: ta.Mapping[int, signal.Signals] = {s.value: s for s in signal.Signals}
|
1256
|
+
_SIGS_BY_NAME: ta.Mapping[str, signal.Signals] = {s.name: s for s in signal.Signals}
|
1257
|
+
|
1258
|
+
|
1259
|
+
def sig_num(value: ta.Union[int, str]) -> int:
|
1260
|
+
try:
|
1261
|
+
num = int(value)
|
1262
|
+
|
1263
|
+
except (ValueError, TypeError):
|
1264
|
+
name = value.strip().upper() # type: ignore
|
1265
|
+
if not name.startswith('SIG'):
|
1266
|
+
name = f'SIG{name}'
|
1267
|
+
|
1268
|
+
if (sn := _SIGS_BY_NAME.get(name)) is None:
|
1269
|
+
raise ValueError(f'value {value!r} is not a valid signal name') # noqa
|
1270
|
+
num = sn
|
1271
|
+
|
1272
|
+
if num not in _SIGS_BY_NUM:
|
1273
|
+
raise ValueError(f'value {value!r} is not a valid signal number')
|
1274
|
+
|
1275
|
+
return num
|
1276
|
+
|
1277
|
+
|
1278
|
+
def sig_name(num: int) -> str:
|
1279
|
+
if (sig := _SIGS_BY_NUM.get(num)) is not None:
|
1280
|
+
return sig.name
|
1281
|
+
return f'signal {sig}'
|
1282
|
+
|
1283
|
+
|
1284
|
+
##
|
1285
|
+
|
1286
|
+
|
1287
|
+
class SignalReceiver:
|
1288
|
+
def __init__(self) -> None:
|
1289
|
+
super().__init__()
|
1290
|
+
|
1291
|
+
self._signals_recvd: ta.List[int] = []
|
1292
|
+
|
1293
|
+
def receive(self, sig: int, frame: ta.Any = None) -> None:
|
1294
|
+
if sig not in self._signals_recvd:
|
1295
|
+
self._signals_recvd.append(sig)
|
1296
|
+
|
1297
|
+
def install(self, *sigs: int) -> None:
|
1298
|
+
for sig in sigs:
|
1299
|
+
signal.signal(sig, self.receive)
|
1300
|
+
|
1301
|
+
def get_signal(self) -> ta.Optional[int]:
|
1302
|
+
if self._signals_recvd:
|
1303
|
+
sig = self._signals_recvd.pop(0)
|
1304
|
+
else:
|
1305
|
+
sig = None
|
1306
|
+
return sig
|
1307
|
+
|
1308
|
+
|
1309
|
+
########################################
|
1310
|
+
# ../utils/strings.py
|
1311
|
+
|
1312
|
+
|
1313
|
+
##
|
1314
|
+
|
1315
|
+
|
1316
|
+
def as_bytes(s: ta.Union[str, bytes], encoding: str = 'utf8') -> bytes:
|
1317
|
+
if isinstance(s, bytes):
|
1318
|
+
return s
|
1319
|
+
else:
|
1320
|
+
return s.encode(encoding)
|
1321
|
+
|
1322
|
+
|
1323
|
+
@ta.overload
|
1324
|
+
def find_prefix_at_end(haystack: str, needle: str) -> int:
|
1325
|
+
...
|
1326
|
+
|
1327
|
+
|
1328
|
+
@ta.overload
|
1329
|
+
def find_prefix_at_end(haystack: bytes, needle: bytes) -> int:
|
1330
|
+
...
|
1331
|
+
|
1332
|
+
|
1333
|
+
def find_prefix_at_end(haystack, needle):
|
1334
|
+
l = len(needle) - 1
|
1335
|
+
while l and not haystack.endswith(needle[:l]):
|
1336
|
+
l -= 1
|
1337
|
+
return l
|
1338
|
+
|
1339
|
+
|
1340
|
+
##
|
1341
|
+
|
1342
|
+
|
1343
|
+
ANSI_ESCAPE_BEGIN = b'\x1b['
|
1344
|
+
ANSI_TERMINATORS = (b'H', b'f', b'A', b'B', b'C', b'D', b'R', b's', b'u', b'J', b'K', b'h', b'l', b'p', b'm')
|
1345
|
+
|
1346
|
+
|
1347
|
+
def strip_escapes(s: bytes) -> bytes:
|
1348
|
+
"""Remove all ANSI color escapes from the given string."""
|
1349
|
+
|
1350
|
+
result = b''
|
1351
|
+
show = 1
|
1352
|
+
i = 0
|
1353
|
+
l = len(s)
|
1354
|
+
while i < l:
|
1355
|
+
if show == 0 and s[i:i + 1] in ANSI_TERMINATORS:
|
1356
|
+
show = 1
|
1357
|
+
elif show:
|
1358
|
+
n = s.find(ANSI_ESCAPE_BEGIN, i)
|
1359
|
+
if n == -1:
|
1360
|
+
return result + s[i:]
|
1361
|
+
else:
|
1362
|
+
result = result + s[i:n]
|
1363
|
+
i = n
|
1364
|
+
show = 0
|
1365
|
+
i += 1
|
1366
|
+
return result
|
1367
|
+
|
1368
|
+
|
1369
|
+
##
|
1370
|
+
|
1371
|
+
|
1372
|
+
class SuffixMultiplier:
|
1373
|
+
# d is a dictionary of suffixes to integer multipliers. If no suffixes match, default is the multiplier. Matches
|
1374
|
+
# are case insensitive. Return values are in the fundamental unit.
|
1375
|
+
def __init__(self, d, default=1):
|
1376
|
+
super().__init__()
|
1377
|
+
self._d = d
|
1378
|
+
self._default = default
|
1379
|
+
# all keys must be the same size
|
1380
|
+
self._keysz = None
|
1381
|
+
for k in d:
|
1382
|
+
if self._keysz is None:
|
1383
|
+
self._keysz = len(k)
|
1384
|
+
elif self._keysz != len(k): # type: ignore
|
1385
|
+
raise ValueError(k)
|
1386
|
+
|
1387
|
+
def __call__(self, v: ta.Union[str, int]) -> int:
|
1388
|
+
if isinstance(v, int):
|
1389
|
+
return v
|
1390
|
+
v = v.lower()
|
1391
|
+
for s, m in self._d.items():
|
1392
|
+
if v[-self._keysz:] == s: # type: ignore
|
1393
|
+
return int(v[:-self._keysz]) * m # type: ignore
|
1394
|
+
return int(v) * self._default
|
1387
1395
|
|
1388
|
-
|
1389
|
-
|
1390
|
-
|
1391
|
-
|
1392
|
-
|
1393
|
-
|
1396
|
+
|
1397
|
+
parse_bytes_size = SuffixMultiplier({
|
1398
|
+
'kb': 1024,
|
1399
|
+
'mb': 1024 * 1024,
|
1400
|
+
'gb': 1024 * 1024 * 1024,
|
1401
|
+
})
|
1402
|
+
|
1403
|
+
|
1404
|
+
#
|
1405
|
+
|
1406
|
+
|
1407
|
+
def parse_octal(arg: ta.Union[str, int]) -> int:
|
1408
|
+
if isinstance(arg, int):
|
1409
|
+
return arg
|
1410
|
+
try:
|
1411
|
+
return int(arg, 8)
|
1412
|
+
except (TypeError, ValueError):
|
1413
|
+
raise ValueError(f'{arg} can not be converted to an octal type') # noqa
|
1394
1414
|
|
1395
1415
|
|
1396
1416
|
########################################
|
@@ -1474,6 +1494,30 @@ def check_not_equal(l: T, r: T) -> T:
|
|
1474
1494
|
return l
|
1475
1495
|
|
1476
1496
|
|
1497
|
+
def check_is(l: T, r: T) -> T:
|
1498
|
+
if l is not r:
|
1499
|
+
raise ValueError(l, r)
|
1500
|
+
return l
|
1501
|
+
|
1502
|
+
|
1503
|
+
def check_is_not(l: T, r: ta.Any) -> T:
|
1504
|
+
if l is r:
|
1505
|
+
raise ValueError(l, r)
|
1506
|
+
return l
|
1507
|
+
|
1508
|
+
|
1509
|
+
def check_in(v: T, c: ta.Container[T]) -> T:
|
1510
|
+
if v not in c:
|
1511
|
+
raise ValueError(v, c)
|
1512
|
+
return v
|
1513
|
+
|
1514
|
+
|
1515
|
+
def check_not_in(v: T, c: ta.Container[T]) -> T:
|
1516
|
+
if v in c:
|
1517
|
+
raise ValueError(v, c)
|
1518
|
+
return v
|
1519
|
+
|
1520
|
+
|
1477
1521
|
def check_single(vs: ta.Iterable[T]) -> T:
|
1478
1522
|
[v] = vs
|
1479
1523
|
return v
|
@@ -2032,112 +2076,64 @@ def get_event_name_by_type(requested):
|
|
2032
2076
|
|
2033
2077
|
|
2034
2078
|
########################################
|
2035
|
-
# ../
|
2036
|
-
|
2037
|
-
|
2038
|
-
##
|
2039
|
-
|
2079
|
+
# ../utils/fds.py
|
2040
2080
|
|
2041
|
-
SupervisorUser = ta.NewType('SupervisorUser', User)
|
2042
2081
|
|
2082
|
+
class PipeFds(ta.NamedTuple):
|
2083
|
+
r: Fd
|
2084
|
+
w: Fd
|
2043
2085
|
|
2044
|
-
##
|
2045
2086
|
|
2087
|
+
def make_pipe() -> PipeFds:
|
2088
|
+
return PipeFds(*os.pipe()) # type: ignore
|
2046
2089
|
|
2047
|
-
class DaemonizeListener(abc.ABC): # noqa
|
2048
|
-
def before_daemonize(self) -> None: # noqa
|
2049
|
-
pass
|
2050
|
-
|
2051
|
-
def after_daemonize(self) -> None: # noqa
|
2052
|
-
pass
|
2053
2090
|
|
2091
|
+
def read_fd(fd: Fd) -> bytes:
|
2092
|
+
try:
|
2093
|
+
data = os.read(fd, 2 << 16) # 128K
|
2094
|
+
except OSError as why:
|
2095
|
+
if why.args[0] not in (errno.EWOULDBLOCK, errno.EBADF, errno.EINTR):
|
2096
|
+
raise
|
2097
|
+
data = b''
|
2098
|
+
return data
|
2054
2099
|
|
2055
|
-
DaemonizeListeners = ta.NewType('DaemonizeListeners', ta.Sequence[DaemonizeListener])
|
2056
2100
|
|
2101
|
+
def close_fd(fd: Fd) -> bool:
|
2102
|
+
try:
|
2103
|
+
os.close(fd)
|
2104
|
+
except OSError:
|
2105
|
+
return False
|
2106
|
+
return True
|
2057
2107
|
|
2058
|
-
##
|
2059
2108
|
|
2109
|
+
def is_fd_open(fd: Fd) -> bool:
|
2110
|
+
try:
|
2111
|
+
n = os.dup(fd)
|
2112
|
+
except OSError:
|
2113
|
+
return False
|
2114
|
+
os.close(n)
|
2115
|
+
return True
|
2060
2116
|
|
2061
|
-
class SupervisorSetup(abc.ABC):
|
2062
|
-
@abc.abstractmethod
|
2063
|
-
def setup(self) -> None:
|
2064
|
-
raise NotImplementedError
|
2065
2117
|
|
2066
|
-
|
2067
|
-
|
2068
|
-
raise NotImplementedError
|
2118
|
+
def get_open_fds(limit: int) -> ta.FrozenSet[Fd]:
|
2119
|
+
return frozenset(fd for i in range(limit) if is_fd_open(fd := Fd(i)))
|
2069
2120
|
|
2070
2121
|
|
2071
2122
|
########################################
|
2072
|
-
# ../utils.py
|
2123
|
+
# ../utils/os.py
|
2073
2124
|
|
2074
2125
|
|
2075
2126
|
##
|
2076
2127
|
|
2077
2128
|
|
2078
|
-
def
|
2079
|
-
if isinstance(s, bytes):
|
2080
|
-
return s
|
2081
|
-
else:
|
2082
|
-
return s.encode(encoding)
|
2083
|
-
|
2084
|
-
|
2085
|
-
def as_string(s: ta.Union[str, bytes], encoding: str = 'utf8') -> str:
|
2086
|
-
if isinstance(s, str):
|
2087
|
-
return s
|
2088
|
-
else:
|
2089
|
-
return s.decode(encoding)
|
2090
|
-
|
2091
|
-
|
2092
|
-
def find_prefix_at_end(haystack: bytes, needle: bytes) -> int:
|
2093
|
-
l = len(needle) - 1
|
2094
|
-
while l and not haystack.endswith(needle[:l]):
|
2095
|
-
l -= 1
|
2096
|
-
return l
|
2097
|
-
|
2098
|
-
|
2099
|
-
##
|
2100
|
-
|
2101
|
-
|
2102
|
-
def compact_traceback() -> ta.Tuple[
|
2103
|
-
ta.Tuple[str, str, int],
|
2104
|
-
ta.Type[BaseException],
|
2105
|
-
BaseException,
|
2106
|
-
types.TracebackType,
|
2107
|
-
]:
|
2108
|
-
t, v, tb = sys.exc_info()
|
2109
|
-
if not tb:
|
2110
|
-
raise RuntimeError('No traceback')
|
2111
|
-
|
2112
|
-
tbinfo = []
|
2113
|
-
while tb:
|
2114
|
-
tbinfo.append((
|
2115
|
-
tb.tb_frame.f_code.co_filename,
|
2116
|
-
tb.tb_frame.f_code.co_name,
|
2117
|
-
str(tb.tb_lineno),
|
2118
|
-
))
|
2119
|
-
tb = tb.tb_next
|
2120
|
-
|
2121
|
-
# just to be safe
|
2122
|
-
del tb
|
2123
|
-
|
2124
|
-
file, function, line = tbinfo[-1]
|
2125
|
-
info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo]) # noqa
|
2126
|
-
return (file, function, line), t, v, info # type: ignore
|
2127
|
-
|
2128
|
-
|
2129
|
-
class ExitNow(Exception): # noqa
|
2130
|
-
pass
|
2131
|
-
|
2132
|
-
|
2133
|
-
def real_exit(code: int) -> None:
|
2129
|
+
def real_exit(code: Rc) -> None:
|
2134
2130
|
os._exit(code) # noqa
|
2135
2131
|
|
2136
2132
|
|
2137
2133
|
##
|
2138
2134
|
|
2139
2135
|
|
2140
|
-
def decode_wait_status(sts: int) -> ta.Tuple[
|
2136
|
+
def decode_wait_status(sts: int) -> ta.Tuple[Rc, str]:
|
2141
2137
|
"""
|
2142
2138
|
Decode the status returned by wait() or waitpid().
|
2143
2139
|
|
@@ -2148,7 +2144,8 @@ def decode_wait_status(sts: int) -> ta.Tuple[int, str]:
|
|
2148
2144
|
if os.WIFEXITED(sts):
|
2149
2145
|
es = os.WEXITSTATUS(sts) & 0xffff
|
2150
2146
|
msg = f'exit status {es}'
|
2151
|
-
return es, msg
|
2147
|
+
return Rc(es), msg
|
2148
|
+
|
2152
2149
|
elif os.WIFSIGNALED(sts):
|
2153
2150
|
sig = os.WTERMSIG(sts)
|
2154
2151
|
msg = f'terminated by {sig_name(sig)}'
|
@@ -2158,112 +2155,75 @@ def decode_wait_status(sts: int) -> ta.Tuple[int, str]:
|
|
2158
2155
|
iscore = bool(sts & 0x80)
|
2159
2156
|
if iscore:
|
2160
2157
|
msg += ' (core dumped)'
|
2161
|
-
return -1, msg
|
2158
|
+
return Rc(-1), msg
|
2159
|
+
|
2162
2160
|
else:
|
2163
2161
|
msg = 'unknown termination cause 0x%04x' % sts # noqa
|
2164
|
-
return -1, msg
|
2162
|
+
return Rc(-1), msg
|
2165
2163
|
|
2166
2164
|
|
2167
|
-
|
2168
|
-
|
2169
|
-
|
2170
|
-
def read_fd(fd: int) -> bytes:
|
2171
|
-
try:
|
2172
|
-
data = os.read(fd, 2 << 16) # 128K
|
2173
|
-
except OSError as why:
|
2174
|
-
if why.args[0] not in (errno.EWOULDBLOCK, errno.EBADF, errno.EINTR):
|
2175
|
-
raise
|
2176
|
-
data = b''
|
2177
|
-
return data
|
2165
|
+
########################################
|
2166
|
+
# ../utils/users.py
|
2178
2167
|
|
2179
2168
|
|
2180
|
-
|
2181
|
-
try:
|
2182
|
-
os.unlink(path)
|
2183
|
-
except OSError:
|
2184
|
-
return False
|
2185
|
-
return True
|
2169
|
+
##
|
2186
2170
|
|
2187
2171
|
|
2188
|
-
def
|
2172
|
+
def name_to_uid(name: str) -> Uid:
|
2189
2173
|
try:
|
2190
|
-
|
2191
|
-
except
|
2192
|
-
|
2193
|
-
|
2174
|
+
uid = int(name)
|
2175
|
+
except ValueError:
|
2176
|
+
try:
|
2177
|
+
pwdrec = pwd.getpwnam(name)
|
2178
|
+
except KeyError:
|
2179
|
+
raise ValueError(f'Invalid user name {name}') # noqa
|
2180
|
+
uid = pwdrec[2]
|
2181
|
+
else:
|
2182
|
+
try:
|
2183
|
+
pwd.getpwuid(uid) # check if uid is valid
|
2184
|
+
except KeyError:
|
2185
|
+
raise ValueError(f'Invalid user id {name}') # noqa
|
2186
|
+
return Uid(uid)
|
2194
2187
|
|
2195
2188
|
|
2196
|
-
def
|
2189
|
+
def name_to_gid(name: str) -> Gid:
|
2197
2190
|
try:
|
2198
|
-
|
2199
|
-
except
|
2200
|
-
|
2201
|
-
|
2202
|
-
|
2203
|
-
|
2204
|
-
|
2205
|
-
|
2206
|
-
|
2207
|
-
|
2208
|
-
|
2209
|
-
|
2210
|
-
|
2211
|
-
|
2212
|
-
|
2213
|
-
|
2214
|
-
|
2215
|
-
|
2216
|
-
|
2217
|
-
|
2218
|
-
def get_path() -> ta.Sequence[str]:
|
2219
|
-
"""Return a list corresponding to $PATH, or a default."""
|
2220
|
-
|
2221
|
-
path = ['/bin', '/usr/bin', '/usr/local/bin']
|
2222
|
-
if 'PATH' in os.environ:
|
2223
|
-
p = os.environ['PATH']
|
2224
|
-
if p:
|
2225
|
-
path = p.split(os.pathsep)
|
2226
|
-
return path
|
2227
|
-
|
2228
|
-
|
2229
|
-
def normalize_path(v: str) -> str:
|
2230
|
-
return os.path.normpath(os.path.abspath(os.path.expanduser(v)))
|
2231
|
-
|
2232
|
-
|
2233
|
-
##
|
2234
|
-
|
2235
|
-
|
2236
|
-
ANSI_ESCAPE_BEGIN = b'\x1b['
|
2237
|
-
ANSI_TERMINATORS = (b'H', b'f', b'A', b'B', b'C', b'D', b'R', b's', b'u', b'J', b'K', b'h', b'l', b'p', b'm')
|
2238
|
-
|
2239
|
-
|
2240
|
-
def strip_escapes(s: bytes) -> bytes:
|
2241
|
-
"""Remove all ANSI color escapes from the given string."""
|
2242
|
-
|
2243
|
-
result = b''
|
2244
|
-
show = 1
|
2245
|
-
i = 0
|
2246
|
-
l = len(s)
|
2247
|
-
while i < l:
|
2248
|
-
if show == 0 and s[i:i + 1] in ANSI_TERMINATORS:
|
2249
|
-
show = 1
|
2250
|
-
elif show:
|
2251
|
-
n = s.find(ANSI_ESCAPE_BEGIN, i)
|
2252
|
-
if n == -1:
|
2253
|
-
return result + s[i:]
|
2254
|
-
else:
|
2255
|
-
result = result + s[i:n]
|
2256
|
-
i = n
|
2257
|
-
show = 0
|
2258
|
-
i += 1
|
2259
|
-
return result
|
2191
|
+
gid = int(name)
|
2192
|
+
except ValueError:
|
2193
|
+
try:
|
2194
|
+
grprec = grp.getgrnam(name)
|
2195
|
+
except KeyError:
|
2196
|
+
raise ValueError(f'Invalid group name {name}') # noqa
|
2197
|
+
gid = grprec[2]
|
2198
|
+
else:
|
2199
|
+
try:
|
2200
|
+
grp.getgrgid(gid) # check if gid is valid
|
2201
|
+
except KeyError:
|
2202
|
+
raise ValueError(f'Invalid group id {name}') # noqa
|
2203
|
+
return Gid(gid)
|
2204
|
+
|
2205
|
+
|
2206
|
+
def gid_for_uid(uid: Uid) -> Gid:
|
2207
|
+
pwrec = pwd.getpwuid(uid)
|
2208
|
+
return Gid(pwrec[3])
|
2260
2209
|
|
2261
2210
|
|
2262
2211
|
##
|
2263
2212
|
|
2264
2213
|
|
2265
|
-
|
2266
|
-
|
2214
|
+
@dc.dataclass(frozen=True)
|
2215
|
+
class User:
|
2216
|
+
name: str
|
2217
|
+
uid: Uid
|
2218
|
+
gid: Gid
|
2219
|
+
|
2220
|
+
|
2221
|
+
def get_user(name: str) -> User:
|
2222
|
+
return User(
|
2223
|
+
name=name,
|
2224
|
+
uid=(uid := name_to_uid(name)),
|
2225
|
+
gid=gid_for_uid(uid),
|
2226
|
+
)
|
2267
2227
|
|
2268
2228
|
|
2269
2229
|
########################################
|
@@ -2668,6 +2628,11 @@ class HttpRequestParser:
|
|
2668
2628
|
|
2669
2629
|
########################################
|
2670
2630
|
# ../../../omlish/lite/inject.py
|
2631
|
+
"""
|
2632
|
+
TODO:
|
2633
|
+
- recursion detection
|
2634
|
+
- bind empty array
|
2635
|
+
"""
|
2671
2636
|
|
2672
2637
|
|
2673
2638
|
###
|
@@ -2767,7 +2732,7 @@ class InjectorError(Exception):
|
|
2767
2732
|
pass
|
2768
2733
|
|
2769
2734
|
|
2770
|
-
@dc.dataclass(
|
2735
|
+
@dc.dataclass()
|
2771
2736
|
class InjectorKeyError(InjectorError):
|
2772
2737
|
key: InjectorKey
|
2773
2738
|
|
@@ -2775,16 +2740,18 @@ class InjectorKeyError(InjectorError):
|
|
2775
2740
|
name: ta.Optional[str] = None
|
2776
2741
|
|
2777
2742
|
|
2778
|
-
@dc.dataclass(frozen=True)
|
2779
2743
|
class UnboundInjectorKeyError(InjectorKeyError):
|
2780
2744
|
pass
|
2781
2745
|
|
2782
2746
|
|
2783
|
-
@dc.dataclass(frozen=True)
|
2784
2747
|
class DuplicateInjectorKeyError(InjectorKeyError):
|
2785
2748
|
pass
|
2786
2749
|
|
2787
2750
|
|
2751
|
+
class CyclicDependencyInjectorKeyError(InjectorKeyError):
|
2752
|
+
pass
|
2753
|
+
|
2754
|
+
|
2788
2755
|
###
|
2789
2756
|
# keys
|
2790
2757
|
|
@@ -3118,22 +3085,65 @@ class _Injector(Injector):
|
|
3118
3085
|
if _INJECTOR_INJECTOR_KEY in self._pfm:
|
3119
3086
|
raise DuplicateInjectorKeyError(_INJECTOR_INJECTOR_KEY)
|
3120
3087
|
|
3088
|
+
self.__cur_req: ta.Optional[_Injector._Request] = None
|
3089
|
+
|
3090
|
+
class _Request:
|
3091
|
+
def __init__(self, injector: '_Injector') -> None:
|
3092
|
+
super().__init__()
|
3093
|
+
self._injector = injector
|
3094
|
+
self._provisions: ta.Dict[InjectorKey, Maybe] = {}
|
3095
|
+
self._seen_keys: ta.Set[InjectorKey] = set()
|
3096
|
+
|
3097
|
+
def handle_key(self, key: InjectorKey) -> Maybe[Maybe]:
|
3098
|
+
try:
|
3099
|
+
return Maybe.just(self._provisions[key])
|
3100
|
+
except KeyError:
|
3101
|
+
pass
|
3102
|
+
if key in self._seen_keys:
|
3103
|
+
raise CyclicDependencyInjectorKeyError(key)
|
3104
|
+
self._seen_keys.add(key)
|
3105
|
+
return Maybe.empty()
|
3106
|
+
|
3107
|
+
def handle_provision(self, key: InjectorKey, mv: Maybe) -> Maybe:
|
3108
|
+
check_in(key, self._seen_keys)
|
3109
|
+
check_not_in(key, self._provisions)
|
3110
|
+
self._provisions[key] = mv
|
3111
|
+
return mv
|
3112
|
+
|
3113
|
+
@contextlib.contextmanager
|
3114
|
+
def _current_request(self) -> ta.Generator[_Request, None, None]:
|
3115
|
+
if (cr := self.__cur_req) is not None:
|
3116
|
+
yield cr
|
3117
|
+
return
|
3118
|
+
|
3119
|
+
cr = self._Request(self)
|
3120
|
+
try:
|
3121
|
+
self.__cur_req = cr
|
3122
|
+
yield cr
|
3123
|
+
finally:
|
3124
|
+
self.__cur_req = None
|
3125
|
+
|
3121
3126
|
def try_provide(self, key: ta.Any) -> Maybe[ta.Any]:
|
3122
3127
|
key = as_injector_key(key)
|
3123
3128
|
|
3124
|
-
|
3125
|
-
|
3129
|
+
cr: _Injector._Request
|
3130
|
+
with self._current_request() as cr:
|
3131
|
+
if (rv := cr.handle_key(key)).present:
|
3132
|
+
return rv.must()
|
3126
3133
|
|
3127
|
-
|
3128
|
-
|
3129
|
-
return Maybe.just(fn(self))
|
3134
|
+
if key == _INJECTOR_INJECTOR_KEY:
|
3135
|
+
return cr.handle_provision(key, Maybe.just(self))
|
3130
3136
|
|
3131
|
-
|
3132
|
-
|
3133
|
-
|
3134
|
-
return Maybe.empty()
|
3137
|
+
fn = self._pfm.get(key)
|
3138
|
+
if fn is not None:
|
3139
|
+
return cr.handle_provision(key, Maybe.just(fn(self)))
|
3135
3140
|
|
3136
|
-
|
3141
|
+
if self._p is not None:
|
3142
|
+
pv = self._p.try_provide(key)
|
3143
|
+
if pv is not None:
|
3144
|
+
return cr.handle_provision(key, Maybe.empty())
|
3145
|
+
|
3146
|
+
return cr.handle_provision(key, Maybe.empty())
|
3137
3147
|
|
3138
3148
|
def provide(self, key: ta.Any) -> ta.Any:
|
3139
3149
|
v = self.try_provide(key)
|
@@ -4222,6 +4232,23 @@ def unmarshal_obj(o: ta.Any, ty: ta.Union[ta.Type[T], ta.Any]) -> T:
|
|
4222
4232
|
return get_obj_marshaler(ty).unmarshal(o)
|
4223
4233
|
|
4224
4234
|
|
4235
|
+
########################################
|
4236
|
+
# ../../../omlish/lite/runtime.py
|
4237
|
+
|
4238
|
+
|
4239
|
+
@cached_nullary
|
4240
|
+
def is_debugger_attached() -> bool:
|
4241
|
+
return any(frame[1].endswith('pydevd.py') for frame in inspect.stack())
|
4242
|
+
|
4243
|
+
|
4244
|
+
REQUIRED_PYTHON_VERSION = (3, 8)
|
4245
|
+
|
4246
|
+
|
4247
|
+
def check_runtime_version() -> None:
|
4248
|
+
if sys.version_info < REQUIRED_PYTHON_VERSION:
|
4249
|
+
raise OSError(f'Requires python {REQUIRED_PYTHON_VERSION}, got {sys.version_info} from {sys.executable}') # noqa
|
4250
|
+
|
4251
|
+
|
4225
4252
|
########################################
|
4226
4253
|
# ../../configs.py
|
4227
4254
|
|
@@ -4288,19 +4315,19 @@ def build_config_named_children(
|
|
4288
4315
|
|
4289
4316
|
@dc.dataclass(frozen=True)
|
4290
4317
|
class ProcessPipes:
|
4291
|
-
child_stdin: ta.Optional[
|
4292
|
-
stdin: ta.Optional[
|
4318
|
+
child_stdin: ta.Optional[Fd] = None
|
4319
|
+
stdin: ta.Optional[Fd] = None
|
4293
4320
|
|
4294
|
-
stdout: ta.Optional[
|
4295
|
-
child_stdout: ta.Optional[
|
4321
|
+
stdout: ta.Optional[Fd] = None
|
4322
|
+
child_stdout: ta.Optional[Fd] = None
|
4296
4323
|
|
4297
|
-
stderr: ta.Optional[
|
4298
|
-
child_stderr: ta.Optional[
|
4324
|
+
stderr: ta.Optional[Fd] = None
|
4325
|
+
child_stderr: ta.Optional[Fd] = None
|
4299
4326
|
|
4300
|
-
def child_fds(self) -> ta.List[
|
4327
|
+
def child_fds(self) -> ta.List[Fd]:
|
4301
4328
|
return [fd for fd in [self.child_stdin, self.child_stdout, self.child_stderr] if fd is not None]
|
4302
4329
|
|
4303
|
-
def parent_fds(self) -> ta.List[
|
4330
|
+
def parent_fds(self) -> ta.List[Fd]:
|
4304
4331
|
return [fd for fd in [self.stdin, self.stdout, self.stderr] if fd is not None]
|
4305
4332
|
|
4306
4333
|
|
@@ -4310,7 +4337,7 @@ def make_process_pipes(stderr=True) -> ProcessPipes:
|
|
4310
4337
|
read them in the mainloop without blocking. If stderr is False, don't create a pipe for stderr.
|
4311
4338
|
"""
|
4312
4339
|
|
4313
|
-
pipes: ta.Dict[str, ta.Optional[
|
4340
|
+
pipes: ta.Dict[str, ta.Optional[Fd]] = {
|
4314
4341
|
'child_stdin': None,
|
4315
4342
|
'stdin': None,
|
4316
4343
|
|
@@ -4322,11 +4349,11 @@ def make_process_pipes(stderr=True) -> ProcessPipes:
|
|
4322
4349
|
}
|
4323
4350
|
|
4324
4351
|
try:
|
4325
|
-
pipes['child_stdin'], pipes['stdin'] =
|
4326
|
-
pipes['stdout'], pipes['child_stdout'] =
|
4352
|
+
pipes['child_stdin'], pipes['stdin'] = make_pipe()
|
4353
|
+
pipes['stdout'], pipes['child_stdout'] = make_pipe()
|
4327
4354
|
|
4328
4355
|
if stderr:
|
4329
|
-
pipes['stderr'], pipes['child_stderr'] =
|
4356
|
+
pipes['stderr'], pipes['child_stderr'] = make_pipe()
|
4330
4357
|
|
4331
4358
|
for fd in (
|
4332
4359
|
pipes['stdout'],
|
@@ -4362,6 +4389,218 @@ def close_child_pipes(pipes: ProcessPipes) -> None:
|
|
4362
4389
|
close_fd(fd)
|
4363
4390
|
|
4364
4391
|
|
4392
|
+
########################################
|
4393
|
+
# ../setup.py
|
4394
|
+
|
4395
|
+
|
4396
|
+
##
|
4397
|
+
|
4398
|
+
|
4399
|
+
SupervisorUser = ta.NewType('SupervisorUser', User)
|
4400
|
+
|
4401
|
+
|
4402
|
+
##
|
4403
|
+
|
4404
|
+
|
4405
|
+
class DaemonizeListener(abc.ABC): # noqa
|
4406
|
+
def before_daemonize(self) -> None: # noqa
|
4407
|
+
pass
|
4408
|
+
|
4409
|
+
def after_daemonize(self) -> None: # noqa
|
4410
|
+
pass
|
4411
|
+
|
4412
|
+
|
4413
|
+
DaemonizeListeners = ta.NewType('DaemonizeListeners', ta.Sequence[DaemonizeListener])
|
4414
|
+
|
4415
|
+
|
4416
|
+
##
|
4417
|
+
|
4418
|
+
|
4419
|
+
class SupervisorSetup(abc.ABC):
|
4420
|
+
@abc.abstractmethod
|
4421
|
+
def setup(self) -> None:
|
4422
|
+
raise NotImplementedError
|
4423
|
+
|
4424
|
+
@abc.abstractmethod
|
4425
|
+
def cleanup(self) -> None:
|
4426
|
+
raise NotImplementedError
|
4427
|
+
|
4428
|
+
|
4429
|
+
########################################
|
4430
|
+
# ../../../omlish/lite/http/handlers.py
|
4431
|
+
|
4432
|
+
|
4433
|
+
@dc.dataclass(frozen=True)
|
4434
|
+
class HttpHandlerRequest:
|
4435
|
+
client_address: SocketAddress
|
4436
|
+
method: str
|
4437
|
+
path: str
|
4438
|
+
headers: HttpHeaders
|
4439
|
+
data: ta.Optional[bytes]
|
4440
|
+
|
4441
|
+
|
4442
|
+
@dc.dataclass(frozen=True)
|
4443
|
+
class HttpHandlerResponse:
|
4444
|
+
status: ta.Union[http.HTTPStatus, int]
|
4445
|
+
|
4446
|
+
headers: ta.Optional[ta.Mapping[str, str]] = None
|
4447
|
+
data: ta.Optional[bytes] = None
|
4448
|
+
close_connection: ta.Optional[bool] = None
|
4449
|
+
|
4450
|
+
|
4451
|
+
class HttpHandlerError(Exception):
|
4452
|
+
pass
|
4453
|
+
|
4454
|
+
|
4455
|
+
class UnsupportedMethodHttpHandlerError(Exception):
|
4456
|
+
pass
|
4457
|
+
|
4458
|
+
|
4459
|
+
########################################
|
4460
|
+
# ../configs.py
|
4461
|
+
|
4462
|
+
|
4463
|
+
##
|
4464
|
+
|
4465
|
+
|
4466
|
+
class RestartWhenExitUnexpected:
|
4467
|
+
pass
|
4468
|
+
|
4469
|
+
|
4470
|
+
class RestartUnconditionally:
|
4471
|
+
pass
|
4472
|
+
|
4473
|
+
|
4474
|
+
##
|
4475
|
+
|
4476
|
+
|
4477
|
+
@dc.dataclass(frozen=True)
|
4478
|
+
class ProcessConfig:
|
4479
|
+
name: str
|
4480
|
+
command: str
|
4481
|
+
|
4482
|
+
uid: ta.Optional[int] = None
|
4483
|
+
directory: ta.Optional[str] = None
|
4484
|
+
umask: ta.Optional[int] = None
|
4485
|
+
priority: int = 999
|
4486
|
+
|
4487
|
+
autostart: bool = True
|
4488
|
+
autorestart: str = 'unexpected'
|
4489
|
+
|
4490
|
+
startsecs: int = 1
|
4491
|
+
startretries: int = 3
|
4492
|
+
|
4493
|
+
numprocs: int = 1
|
4494
|
+
numprocs_start: int = 0
|
4495
|
+
|
4496
|
+
@dc.dataclass(frozen=True)
|
4497
|
+
class Log:
|
4498
|
+
file: ta.Optional[str] = None
|
4499
|
+
capture_maxbytes: ta.Optional[int] = None
|
4500
|
+
events_enabled: bool = False
|
4501
|
+
syslog: bool = False
|
4502
|
+
backups: ta.Optional[int] = None
|
4503
|
+
maxbytes: ta.Optional[int] = None
|
4504
|
+
|
4505
|
+
stdout: Log = Log()
|
4506
|
+
stderr: Log = Log()
|
4507
|
+
|
4508
|
+
stopsignal: int = signal.SIGTERM
|
4509
|
+
stopwaitsecs: int = 10
|
4510
|
+
stopasgroup: bool = False
|
4511
|
+
|
4512
|
+
killasgroup: bool = False
|
4513
|
+
|
4514
|
+
exitcodes: ta.Sequence[int] = (0,)
|
4515
|
+
|
4516
|
+
redirect_stderr: bool = False
|
4517
|
+
|
4518
|
+
environment: ta.Optional[ta.Mapping[str, str]] = None
|
4519
|
+
|
4520
|
+
|
4521
|
+
@dc.dataclass(frozen=True)
|
4522
|
+
class ProcessGroupConfig:
|
4523
|
+
name: str
|
4524
|
+
|
4525
|
+
priority: int = 999
|
4526
|
+
|
4527
|
+
processes: ta.Optional[ta.Sequence[ProcessConfig]] = None
|
4528
|
+
|
4529
|
+
|
4530
|
+
@dc.dataclass(frozen=True)
|
4531
|
+
class ServerConfig:
|
4532
|
+
user: ta.Optional[str] = None
|
4533
|
+
nodaemon: bool = False
|
4534
|
+
umask: int = 0o22
|
4535
|
+
directory: ta.Optional[str] = None
|
4536
|
+
logfile: str = 'supervisord.log'
|
4537
|
+
logfile_maxbytes: int = 50 * 1024 * 1024
|
4538
|
+
logfile_backups: int = 10
|
4539
|
+
loglevel: int = logging.INFO
|
4540
|
+
pidfile: str = 'supervisord.pid'
|
4541
|
+
identifier: str = 'supervisor'
|
4542
|
+
child_logdir: str = '/dev/null'
|
4543
|
+
minfds: int = 1024
|
4544
|
+
minprocs: int = 200
|
4545
|
+
nocleanup: bool = False
|
4546
|
+
strip_ansi: bool = False
|
4547
|
+
silent: bool = False
|
4548
|
+
|
4549
|
+
groups: ta.Optional[ta.Sequence[ProcessGroupConfig]] = None
|
4550
|
+
|
4551
|
+
@classmethod
|
4552
|
+
def new(
|
4553
|
+
cls,
|
4554
|
+
umask: ta.Union[int, str] = 0o22,
|
4555
|
+
directory: ta.Optional[str] = None,
|
4556
|
+
logfile: str = 'supervisord.log',
|
4557
|
+
logfile_maxbytes: ta.Union[int, str] = 50 * 1024 * 1024,
|
4558
|
+
loglevel: ta.Union[int, str] = logging.INFO,
|
4559
|
+
pidfile: str = 'supervisord.pid',
|
4560
|
+
child_logdir: ta.Optional[str] = None,
|
4561
|
+
**kwargs: ta.Any,
|
4562
|
+
) -> 'ServerConfig':
|
4563
|
+
return cls(
|
4564
|
+
umask=parse_octal(umask),
|
4565
|
+
directory=check_existing_dir(directory) if directory is not None else None,
|
4566
|
+
logfile=check_path_with_existing_dir(logfile),
|
4567
|
+
logfile_maxbytes=parse_bytes_size(logfile_maxbytes),
|
4568
|
+
loglevel=parse_logging_level(loglevel),
|
4569
|
+
pidfile=check_path_with_existing_dir(pidfile),
|
4570
|
+
child_logdir=child_logdir if child_logdir else tempfile.gettempdir(),
|
4571
|
+
**kwargs,
|
4572
|
+
)
|
4573
|
+
|
4574
|
+
|
4575
|
+
##
|
4576
|
+
|
4577
|
+
|
4578
|
+
def prepare_process_group_config(dct: ConfigMapping) -> ConfigMapping:
|
4579
|
+
out = dict(dct)
|
4580
|
+
out['processes'] = build_config_named_children(out.get('processes'))
|
4581
|
+
return out
|
4582
|
+
|
4583
|
+
|
4584
|
+
def prepare_server_config(dct: ta.Mapping[str, ta.Any]) -> ta.Mapping[str, ta.Any]:
|
4585
|
+
out = dict(dct)
|
4586
|
+
group_dcts = build_config_named_children(out.get('groups'))
|
4587
|
+
out['groups'] = [prepare_process_group_config(group_dct) for group_dct in group_dcts or []]
|
4588
|
+
return out
|
4589
|
+
|
4590
|
+
|
4591
|
+
##
|
4592
|
+
|
4593
|
+
|
4594
|
+
def parse_logging_level(value: ta.Union[str, int]) -> int:
|
4595
|
+
if isinstance(value, int):
|
4596
|
+
return value
|
4597
|
+
s = str(value).lower()
|
4598
|
+
level = logging.getLevelNamesMapping().get(s.upper())
|
4599
|
+
if level is None:
|
4600
|
+
raise ValueError(f'bad logging level name {value!r}')
|
4601
|
+
return level
|
4602
|
+
|
4603
|
+
|
4365
4604
|
########################################
|
4366
4605
|
# ../poller.py
|
4367
4606
|
|
@@ -4371,23 +4610,23 @@ class Poller(DaemonizeListener, abc.ABC):
|
|
4371
4610
|
super().__init__()
|
4372
4611
|
|
4373
4612
|
@abc.abstractmethod
|
4374
|
-
def register_readable(self, fd:
|
4613
|
+
def register_readable(self, fd: Fd) -> None:
|
4375
4614
|
raise NotImplementedError
|
4376
4615
|
|
4377
4616
|
@abc.abstractmethod
|
4378
|
-
def register_writable(self, fd:
|
4617
|
+
def register_writable(self, fd: Fd) -> None:
|
4379
4618
|
raise NotImplementedError
|
4380
4619
|
|
4381
4620
|
@abc.abstractmethod
|
4382
|
-
def unregister_readable(self, fd:
|
4621
|
+
def unregister_readable(self, fd: Fd) -> None:
|
4383
4622
|
raise NotImplementedError
|
4384
4623
|
|
4385
4624
|
@abc.abstractmethod
|
4386
|
-
def unregister_writable(self, fd:
|
4625
|
+
def unregister_writable(self, fd: Fd) -> None:
|
4387
4626
|
raise NotImplementedError
|
4388
4627
|
|
4389
4628
|
@abc.abstractmethod
|
4390
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[
|
4629
|
+
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4391
4630
|
raise NotImplementedError
|
4392
4631
|
|
4393
4632
|
def before_daemonize(self) -> None: # noqa
|
@@ -4404,37 +4643,37 @@ class SelectPoller(Poller):
|
|
4404
4643
|
def __init__(self) -> None:
|
4405
4644
|
super().__init__()
|
4406
4645
|
|
4407
|
-
self._readable: ta.Set[
|
4408
|
-
self._writable: ta.Set[
|
4646
|
+
self._readable: ta.Set[Fd] = set()
|
4647
|
+
self._writable: ta.Set[Fd] = set()
|
4409
4648
|
|
4410
|
-
def register_readable(self, fd:
|
4649
|
+
def register_readable(self, fd: Fd) -> None:
|
4411
4650
|
self._readable.add(fd)
|
4412
4651
|
|
4413
|
-
def register_writable(self, fd:
|
4652
|
+
def register_writable(self, fd: Fd) -> None:
|
4414
4653
|
self._writable.add(fd)
|
4415
4654
|
|
4416
|
-
def unregister_readable(self, fd:
|
4655
|
+
def unregister_readable(self, fd: Fd) -> None:
|
4417
4656
|
self._readable.discard(fd)
|
4418
4657
|
|
4419
|
-
def unregister_writable(self, fd:
|
4658
|
+
def unregister_writable(self, fd: Fd) -> None:
|
4420
4659
|
self._writable.discard(fd)
|
4421
4660
|
|
4422
4661
|
def unregister_all(self) -> None:
|
4423
4662
|
self._readable.clear()
|
4424
4663
|
self._writable.clear()
|
4425
4664
|
|
4426
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[
|
4665
|
+
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4427
4666
|
try:
|
4428
4667
|
r, w, x = select.select(
|
4429
4668
|
self._readable,
|
4430
4669
|
self._writable,
|
4431
4670
|
[], timeout,
|
4432
4671
|
)
|
4433
|
-
except OSError as
|
4434
|
-
if
|
4672
|
+
except OSError as exc:
|
4673
|
+
if exc.args[0] == errno.EINTR:
|
4435
4674
|
log.debug('EINTR encountered in poll')
|
4436
4675
|
return [], []
|
4437
|
-
if
|
4676
|
+
if exc.args[0] == errno.EBADF:
|
4438
4677
|
log.debug('EBADF encountered in poll')
|
4439
4678
|
self.unregister_all()
|
4440
4679
|
return [], []
|
@@ -4450,30 +4689,30 @@ class PollPoller(Poller):
|
|
4450
4689
|
super().__init__()
|
4451
4690
|
|
4452
4691
|
self._poller = select.poll()
|
4453
|
-
self._readable: set[
|
4454
|
-
self._writable: set[
|
4692
|
+
self._readable: set[Fd] = set()
|
4693
|
+
self._writable: set[Fd] = set()
|
4455
4694
|
|
4456
|
-
def register_readable(self, fd:
|
4695
|
+
def register_readable(self, fd: Fd) -> None:
|
4457
4696
|
self._poller.register(fd, self._READ)
|
4458
4697
|
self._readable.add(fd)
|
4459
4698
|
|
4460
|
-
def register_writable(self, fd:
|
4699
|
+
def register_writable(self, fd: Fd) -> None:
|
4461
4700
|
self._poller.register(fd, self._WRITE)
|
4462
4701
|
self._writable.add(fd)
|
4463
4702
|
|
4464
|
-
def unregister_readable(self, fd:
|
4703
|
+
def unregister_readable(self, fd: Fd) -> None:
|
4465
4704
|
self._readable.discard(fd)
|
4466
4705
|
self._poller.unregister(fd)
|
4467
4706
|
if fd in self._writable:
|
4468
4707
|
self._poller.register(fd, self._WRITE)
|
4469
4708
|
|
4470
|
-
def unregister_writable(self, fd:
|
4709
|
+
def unregister_writable(self, fd: Fd) -> None:
|
4471
4710
|
self._writable.discard(fd)
|
4472
4711
|
self._poller.unregister(fd)
|
4473
4712
|
if fd in self._readable:
|
4474
4713
|
self._poller.register(fd, self._READ)
|
4475
4714
|
|
4476
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[
|
4715
|
+
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4477
4716
|
fds = self._poll_fds(timeout) # type: ignore
|
4478
4717
|
readable, writable = [], []
|
4479
4718
|
for fd, eventmask in fds:
|
@@ -4485,16 +4724,16 @@ class PollPoller(Poller):
|
|
4485
4724
|
writable.append(fd)
|
4486
4725
|
return readable, writable
|
4487
4726
|
|
4488
|
-
def _poll_fds(self, timeout: float) -> ta.List[ta.Tuple[
|
4727
|
+
def _poll_fds(self, timeout: float) -> ta.List[ta.Tuple[Fd, Fd]]:
|
4489
4728
|
try:
|
4490
|
-
return self._poller.poll(timeout * 1000)
|
4491
|
-
except OSError as
|
4492
|
-
if
|
4729
|
+
return self._poller.poll(timeout * 1000) # type: ignore
|
4730
|
+
except OSError as exc:
|
4731
|
+
if exc.args[0] == errno.EINTR:
|
4493
4732
|
log.debug('EINTR encountered in poll')
|
4494
4733
|
return []
|
4495
4734
|
raise
|
4496
4735
|
|
4497
|
-
def _ignore_invalid(self, fd:
|
4736
|
+
def _ignore_invalid(self, fd: Fd, eventmask: int) -> bool:
|
4498
4737
|
if eventmask & select.POLLNVAL:
|
4499
4738
|
# POLLNVAL means `fd` value is invalid, not open. When a process quits it's `fd`s are closed so there is no
|
4500
4739
|
# more reason to keep this `fd` registered If the process restarts it's `fd`s are registered again.
|
@@ -4513,30 +4752,30 @@ if sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
|
|
4513
4752
|
super().__init__()
|
4514
4753
|
|
4515
4754
|
self._kqueue: ta.Optional[ta.Any] = select.kqueue()
|
4516
|
-
self._readable: set[
|
4517
|
-
self._writable: set[
|
4755
|
+
self._readable: set[Fd] = set()
|
4756
|
+
self._writable: set[Fd] = set()
|
4518
4757
|
|
4519
|
-
def register_readable(self, fd:
|
4758
|
+
def register_readable(self, fd: Fd) -> None:
|
4520
4759
|
self._readable.add(fd)
|
4521
4760
|
kevent = select.kevent(fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_ADD)
|
4522
4761
|
self._kqueue_control(fd, kevent)
|
4523
4762
|
|
4524
|
-
def register_writable(self, fd:
|
4763
|
+
def register_writable(self, fd: Fd) -> None:
|
4525
4764
|
self._writable.add(fd)
|
4526
4765
|
kevent = select.kevent(fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_ADD)
|
4527
4766
|
self._kqueue_control(fd, kevent)
|
4528
4767
|
|
4529
|
-
def unregister_readable(self, fd:
|
4768
|
+
def unregister_readable(self, fd: Fd) -> None:
|
4530
4769
|
kevent = select.kevent(fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_DELETE)
|
4531
4770
|
self._readable.discard(fd)
|
4532
4771
|
self._kqueue_control(fd, kevent)
|
4533
4772
|
|
4534
|
-
def unregister_writable(self, fd:
|
4773
|
+
def unregister_writable(self, fd: Fd) -> None:
|
4535
4774
|
kevent = select.kevent(fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_DELETE)
|
4536
4775
|
self._writable.discard(fd)
|
4537
4776
|
self._kqueue_control(fd, kevent)
|
4538
4777
|
|
4539
|
-
def _kqueue_control(self, fd:
|
4778
|
+
def _kqueue_control(self, fd: Fd, kevent: 'select.kevent') -> None:
|
4540
4779
|
try:
|
4541
4780
|
self._kqueue.control([kevent], 0) # type: ignore
|
4542
4781
|
except OSError as error:
|
@@ -4545,7 +4784,7 @@ if sys.platform == 'darwin' or sys.platform.startswith('freebsd'):
|
|
4545
4784
|
else:
|
4546
4785
|
raise
|
4547
4786
|
|
4548
|
-
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[
|
4787
|
+
def poll(self, timeout: ta.Optional[float]) -> ta.Tuple[ta.List[Fd], ta.List[Fd]]:
|
4549
4788
|
readable, writable = [], [] # type: ignore
|
4550
4789
|
|
4551
4790
|
try:
|
@@ -4595,157 +4834,6 @@ def get_poller_impl() -> ta.Type[Poller]:
|
|
4595
4834
|
return SelectPoller
|
4596
4835
|
|
4597
4836
|
|
4598
|
-
########################################
|
4599
|
-
# ../../../omlish/lite/http/handlers.py
|
4600
|
-
|
4601
|
-
|
4602
|
-
@dc.dataclass(frozen=True)
|
4603
|
-
class HttpHandlerRequest:
|
4604
|
-
client_address: SocketAddress
|
4605
|
-
method: str
|
4606
|
-
path: str
|
4607
|
-
headers: HttpHeaders
|
4608
|
-
data: ta.Optional[bytes]
|
4609
|
-
|
4610
|
-
|
4611
|
-
@dc.dataclass(frozen=True)
|
4612
|
-
class HttpHandlerResponse:
|
4613
|
-
status: ta.Union[http.HTTPStatus, int]
|
4614
|
-
|
4615
|
-
headers: ta.Optional[ta.Mapping[str, str]] = None
|
4616
|
-
data: ta.Optional[bytes] = None
|
4617
|
-
close_connection: ta.Optional[bool] = None
|
4618
|
-
|
4619
|
-
|
4620
|
-
class HttpHandlerError(Exception):
|
4621
|
-
pass
|
4622
|
-
|
4623
|
-
|
4624
|
-
class UnsupportedMethodHttpHandlerError(Exception):
|
4625
|
-
pass
|
4626
|
-
|
4627
|
-
|
4628
|
-
########################################
|
4629
|
-
# ../configs.py
|
4630
|
-
|
4631
|
-
|
4632
|
-
##
|
4633
|
-
|
4634
|
-
|
4635
|
-
@dc.dataclass(frozen=True)
|
4636
|
-
class ProcessConfig:
|
4637
|
-
name: str
|
4638
|
-
command: str
|
4639
|
-
|
4640
|
-
uid: ta.Optional[int] = None
|
4641
|
-
directory: ta.Optional[str] = None
|
4642
|
-
umask: ta.Optional[int] = None
|
4643
|
-
priority: int = 999
|
4644
|
-
|
4645
|
-
autostart: bool = True
|
4646
|
-
autorestart: str = 'unexpected'
|
4647
|
-
|
4648
|
-
startsecs: int = 1
|
4649
|
-
startretries: int = 3
|
4650
|
-
|
4651
|
-
numprocs: int = 1
|
4652
|
-
numprocs_start: int = 0
|
4653
|
-
|
4654
|
-
@dc.dataclass(frozen=True)
|
4655
|
-
class Log:
|
4656
|
-
file: ta.Optional[str] = None
|
4657
|
-
capture_maxbytes: ta.Optional[int] = None
|
4658
|
-
events_enabled: bool = False
|
4659
|
-
syslog: bool = False
|
4660
|
-
backups: ta.Optional[int] = None
|
4661
|
-
maxbytes: ta.Optional[int] = None
|
4662
|
-
|
4663
|
-
stdout: Log = Log()
|
4664
|
-
stderr: Log = Log()
|
4665
|
-
|
4666
|
-
stopsignal: int = signal.SIGTERM
|
4667
|
-
stopwaitsecs: int = 10
|
4668
|
-
stopasgroup: bool = False
|
4669
|
-
|
4670
|
-
killasgroup: bool = False
|
4671
|
-
|
4672
|
-
exitcodes: ta.Sequence[int] = (0,)
|
4673
|
-
|
4674
|
-
redirect_stderr: bool = False
|
4675
|
-
|
4676
|
-
environment: ta.Optional[ta.Mapping[str, str]] = None
|
4677
|
-
|
4678
|
-
|
4679
|
-
@dc.dataclass(frozen=True)
|
4680
|
-
class ProcessGroupConfig:
|
4681
|
-
name: str
|
4682
|
-
|
4683
|
-
priority: int = 999
|
4684
|
-
|
4685
|
-
processes: ta.Optional[ta.Sequence[ProcessConfig]] = None
|
4686
|
-
|
4687
|
-
|
4688
|
-
@dc.dataclass(frozen=True)
|
4689
|
-
class ServerConfig:
|
4690
|
-
user: ta.Optional[str] = None
|
4691
|
-
nodaemon: bool = False
|
4692
|
-
umask: int = 0o22
|
4693
|
-
directory: ta.Optional[str] = None
|
4694
|
-
logfile: str = 'supervisord.log'
|
4695
|
-
logfile_maxbytes: int = 50 * 1024 * 1024
|
4696
|
-
logfile_backups: int = 10
|
4697
|
-
loglevel: int = logging.INFO
|
4698
|
-
pidfile: str = 'supervisord.pid'
|
4699
|
-
identifier: str = 'supervisor'
|
4700
|
-
child_logdir: str = '/dev/null'
|
4701
|
-
minfds: int = 1024
|
4702
|
-
minprocs: int = 200
|
4703
|
-
nocleanup: bool = False
|
4704
|
-
strip_ansi: bool = False
|
4705
|
-
silent: bool = False
|
4706
|
-
|
4707
|
-
groups: ta.Optional[ta.Sequence[ProcessGroupConfig]] = None
|
4708
|
-
|
4709
|
-
@classmethod
|
4710
|
-
def new(
|
4711
|
-
cls,
|
4712
|
-
umask: ta.Union[int, str] = 0o22,
|
4713
|
-
directory: ta.Optional[str] = None,
|
4714
|
-
logfile: str = 'supervisord.log',
|
4715
|
-
logfile_maxbytes: ta.Union[int, str] = 50 * 1024 * 1024,
|
4716
|
-
loglevel: ta.Union[int, str] = logging.INFO,
|
4717
|
-
pidfile: str = 'supervisord.pid',
|
4718
|
-
child_logdir: ta.Optional[str] = None,
|
4719
|
-
**kwargs: ta.Any,
|
4720
|
-
) -> 'ServerConfig':
|
4721
|
-
return cls(
|
4722
|
-
umask=octal_type(umask),
|
4723
|
-
directory=existing_directory(directory) if directory is not None else None,
|
4724
|
-
logfile=existing_dirpath(logfile),
|
4725
|
-
logfile_maxbytes=byte_size(logfile_maxbytes),
|
4726
|
-
loglevel=logging_level(loglevel),
|
4727
|
-
pidfile=existing_dirpath(pidfile),
|
4728
|
-
child_logdir=child_logdir if child_logdir else tempfile.gettempdir(),
|
4729
|
-
**kwargs,
|
4730
|
-
)
|
4731
|
-
|
4732
|
-
|
4733
|
-
##
|
4734
|
-
|
4735
|
-
|
4736
|
-
def prepare_process_group_config(dct: ConfigMapping) -> ConfigMapping:
|
4737
|
-
out = dict(dct)
|
4738
|
-
out['processes'] = build_config_named_children(out.get('processes'))
|
4739
|
-
return out
|
4740
|
-
|
4741
|
-
|
4742
|
-
def prepare_server_config(dct: ta.Mapping[str, ta.Any]) -> ta.Mapping[str, ta.Any]:
|
4743
|
-
out = dict(dct)
|
4744
|
-
group_dcts = build_config_named_children(out.get('groups'))
|
4745
|
-
out['groups'] = [prepare_process_group_config(group_dct) for group_dct in group_dcts or []]
|
4746
|
-
return out
|
4747
|
-
|
4748
|
-
|
4749
4837
|
########################################
|
4750
4838
|
# ../../../omlish/lite/http/coroserver.py
|
4751
4839
|
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
@@ -5313,6 +5401,10 @@ class CoroHttpServerSocketHandler(SocketHandler):
|
|
5313
5401
|
##
|
5314
5402
|
|
5315
5403
|
|
5404
|
+
class ExitNow(Exception): # noqa
|
5405
|
+
pass
|
5406
|
+
|
5407
|
+
|
5316
5408
|
ServerEpoch = ta.NewType('ServerEpoch', int)
|
5317
5409
|
|
5318
5410
|
|
@@ -5336,12 +5428,7 @@ class ConfigPriorityOrdered(abc.ABC):
|
|
5336
5428
|
##
|
5337
5429
|
|
5338
5430
|
|
5339
|
-
class
|
5340
|
-
@property
|
5341
|
-
@abc.abstractmethod
|
5342
|
-
def config(self) -> ServerConfig:
|
5343
|
-
raise NotImplementedError
|
5344
|
-
|
5431
|
+
class SupervisorStateManager(abc.ABC):
|
5345
5432
|
@property
|
5346
5433
|
@abc.abstractmethod
|
5347
5434
|
def state(self) -> SupervisorState:
|
@@ -5351,21 +5438,11 @@ class ServerContext(abc.ABC):
|
|
5351
5438
|
def set_state(self, state: SupervisorState) -> None:
|
5352
5439
|
raise NotImplementedError
|
5353
5440
|
|
5354
|
-
@property
|
5355
|
-
@abc.abstractmethod
|
5356
|
-
def pid_history(self) -> ta.Dict[int, 'Process']:
|
5357
|
-
raise NotImplementedError
|
5358
|
-
|
5359
5441
|
|
5360
5442
|
##
|
5361
5443
|
|
5362
5444
|
|
5363
5445
|
class Dispatcher(abc.ABC):
|
5364
|
-
@property
|
5365
|
-
@abc.abstractmethod
|
5366
|
-
def process(self) -> 'Process':
|
5367
|
-
raise NotImplementedError
|
5368
|
-
|
5369
5446
|
@property
|
5370
5447
|
@abc.abstractmethod
|
5371
5448
|
def channel(self) -> str:
|
@@ -5373,7 +5450,7 @@ class Dispatcher(abc.ABC):
|
|
5373
5450
|
|
5374
5451
|
@property
|
5375
5452
|
@abc.abstractmethod
|
5376
|
-
def fd(self) ->
|
5453
|
+
def fd(self) -> Fd:
|
5377
5454
|
raise NotImplementedError
|
5378
5455
|
|
5379
5456
|
@property
|
@@ -5409,8 +5486,32 @@ class Dispatcher(abc.ABC):
|
|
5409
5486
|
def handle_write_event(self) -> None:
|
5410
5487
|
raise TypeError
|
5411
5488
|
|
5489
|
+
#
|
5490
|
+
|
5491
|
+
def handle_connect(self) -> None:
|
5492
|
+
raise TypeError
|
5493
|
+
|
5494
|
+
def handle_close(self) -> None:
|
5495
|
+
raise TypeError
|
5496
|
+
|
5497
|
+
def handle_accepted(self, sock, addr) -> None:
|
5498
|
+
raise TypeError
|
5499
|
+
|
5500
|
+
|
5501
|
+
class HasDispatchers(abc.ABC):
|
5502
|
+
@abc.abstractmethod
|
5503
|
+
def get_dispatchers(self) -> 'Dispatchers':
|
5504
|
+
raise NotImplementedError
|
5505
|
+
|
5506
|
+
|
5507
|
+
class ProcessDispatcher(Dispatcher, abc.ABC):
|
5508
|
+
@property
|
5509
|
+
@abc.abstractmethod
|
5510
|
+
def process(self) -> 'Process':
|
5511
|
+
raise NotImplementedError
|
5412
5512
|
|
5413
|
-
|
5513
|
+
|
5514
|
+
class ProcessOutputDispatcher(ProcessDispatcher, abc.ABC):
|
5414
5515
|
@abc.abstractmethod
|
5415
5516
|
def remove_logs(self) -> None:
|
5416
5517
|
raise NotImplementedError
|
@@ -5420,7 +5521,7 @@ class OutputDispatcher(Dispatcher, abc.ABC):
|
|
5420
5521
|
raise NotImplementedError
|
5421
5522
|
|
5422
5523
|
|
5423
|
-
class
|
5524
|
+
class ProcessInputDispatcher(ProcessDispatcher, abc.ABC):
|
5424
5525
|
@abc.abstractmethod
|
5425
5526
|
def write(self, chars: ta.Union[bytes, str]) -> None:
|
5426
5527
|
raise NotImplementedError
|
@@ -5433,7 +5534,11 @@ class InputDispatcher(Dispatcher, abc.ABC):
|
|
5433
5534
|
##
|
5434
5535
|
|
5435
5536
|
|
5436
|
-
class Process(
|
5537
|
+
class Process(
|
5538
|
+
ConfigPriorityOrdered,
|
5539
|
+
HasDispatchers,
|
5540
|
+
abc.ABC,
|
5541
|
+
):
|
5437
5542
|
@property
|
5438
5543
|
@abc.abstractmethod
|
5439
5544
|
def name(self) -> str:
|
@@ -5451,18 +5556,13 @@ class Process(ConfigPriorityOrdered, abc.ABC):
|
|
5451
5556
|
|
5452
5557
|
@property
|
5453
5558
|
@abc.abstractmethod
|
5454
|
-
def pid(self) ->
|
5559
|
+
def pid(self) -> Pid:
|
5455
5560
|
raise NotImplementedError
|
5456
5561
|
|
5457
5562
|
#
|
5458
5563
|
|
5459
|
-
@property
|
5460
|
-
@abc.abstractmethod
|
5461
|
-
def context(self) -> ServerContext:
|
5462
|
-
raise NotImplementedError
|
5463
|
-
|
5464
5564
|
@abc.abstractmethod
|
5465
|
-
def finish(self, sts:
|
5565
|
+
def finish(self, sts: Rc) -> None:
|
5466
5566
|
raise NotImplementedError
|
5467
5567
|
|
5468
5568
|
@abc.abstractmethod
|
@@ -5477,18 +5577,15 @@ class Process(ConfigPriorityOrdered, abc.ABC):
|
|
5477
5577
|
def transition(self) -> None:
|
5478
5578
|
raise NotImplementedError
|
5479
5579
|
|
5580
|
+
@property
|
5480
5581
|
@abc.abstractmethod
|
5481
|
-
def
|
5582
|
+
def state(self) -> ProcessState:
|
5482
5583
|
raise NotImplementedError
|
5483
5584
|
|
5484
5585
|
@abc.abstractmethod
|
5485
5586
|
def after_setuid(self) -> None:
|
5486
5587
|
raise NotImplementedError
|
5487
5588
|
|
5488
|
-
@abc.abstractmethod
|
5489
|
-
def get_dispatchers(self) -> 'Dispatchers':
|
5490
|
-
raise NotImplementedError
|
5491
|
-
|
5492
5589
|
|
5493
5590
|
##
|
5494
5591
|
|
@@ -5509,105 +5606,31 @@ class ProcessGroup(
|
|
5509
5606
|
raise NotImplementedError
|
5510
5607
|
|
5511
5608
|
@property
|
5512
|
-
@abc.abstractmethod
|
5513
|
-
def by_name(self) -> ta.Mapping[str, Process]:
|
5514
|
-
raise NotImplementedError
|
5515
|
-
|
5516
|
-
#
|
5517
|
-
|
5518
|
-
@abc.abstractmethod
|
5519
|
-
def stop_all(self) -> None:
|
5520
|
-
raise NotImplementedError
|
5521
|
-
|
5522
|
-
@abc.abstractmethod
|
5523
|
-
def get_unstopped_processes(self) -> ta.List[Process]:
|
5524
|
-
raise NotImplementedError
|
5525
|
-
|
5526
|
-
@abc.abstractmethod
|
5527
|
-
def before_remove(self) -> None:
|
5528
|
-
raise NotImplementedError
|
5529
|
-
|
5530
|
-
|
5531
|
-
########################################
|
5532
|
-
# ../context.py
|
5533
|
-
|
5534
|
-
|
5535
|
-
class ServerContextImpl(ServerContext):
|
5536
|
-
def __init__(
|
5537
|
-
self,
|
5538
|
-
config: ServerConfig,
|
5539
|
-
poller: Poller,
|
5540
|
-
*,
|
5541
|
-
epoch: ServerEpoch = ServerEpoch(0),
|
5542
|
-
) -> None:
|
5543
|
-
super().__init__()
|
5544
|
-
|
5545
|
-
self._config = config
|
5546
|
-
self._poller = poller
|
5547
|
-
self._epoch = epoch
|
5548
|
-
|
5549
|
-
self._pid_history: ta.Dict[int, Process] = {}
|
5550
|
-
self._state: SupervisorState = SupervisorState.RUNNING
|
5551
|
-
|
5552
|
-
@property
|
5553
|
-
def config(self) -> ServerConfig:
|
5554
|
-
return self._config
|
5555
|
-
|
5556
|
-
@property
|
5557
|
-
def epoch(self) -> ServerEpoch:
|
5558
|
-
return self._epoch
|
5559
|
-
|
5560
|
-
@property
|
5561
|
-
def first(self) -> bool:
|
5562
|
-
return not self._epoch
|
5563
|
-
|
5564
|
-
@property
|
5565
|
-
def state(self) -> SupervisorState:
|
5566
|
-
return self._state
|
5567
|
-
|
5568
|
-
def set_state(self, state: SupervisorState) -> None:
|
5569
|
-
self._state = state
|
5570
|
-
|
5571
|
-
@property
|
5572
|
-
def pid_history(self) -> ta.Dict[int, Process]:
|
5573
|
-
return self._pid_history
|
5609
|
+
@abc.abstractmethod
|
5610
|
+
def by_name(self) -> ta.Mapping[str, Process]:
|
5611
|
+
raise NotImplementedError
|
5574
5612
|
|
5575
5613
|
#
|
5576
5614
|
|
5577
|
-
|
5578
|
-
|
5579
|
-
|
5580
|
-
|
5581
|
-
|
5582
|
-
|
5583
|
-
|
5584
|
-
|
5585
|
-
|
5586
|
-
|
5587
|
-
|
5588
|
-
if code not in (errno.ECHILD, errno.EINTR):
|
5589
|
-
log.critical('waitpid error %r; a process may not be cleaned up properly', code)
|
5590
|
-
if code == errno.EINTR:
|
5591
|
-
log.debug('EINTR during reap')
|
5592
|
-
pid, sts = None, None
|
5593
|
-
return pid, sts
|
5594
|
-
|
5595
|
-
def get_auto_child_log_name(self, name: str, identifier: str, channel: str) -> str:
|
5596
|
-
prefix = f'{name}-{channel}---{identifier}-'
|
5597
|
-
logfile = mktempfile(
|
5598
|
-
suffix='.log',
|
5599
|
-
prefix=prefix,
|
5600
|
-
dir=self.config.child_logdir,
|
5601
|
-
)
|
5602
|
-
return logfile
|
5615
|
+
@abc.abstractmethod
|
5616
|
+
def stop_all(self) -> None:
|
5617
|
+
raise NotImplementedError
|
5618
|
+
|
5619
|
+
@abc.abstractmethod
|
5620
|
+
def get_unstopped_processes(self) -> ta.List[Process]:
|
5621
|
+
raise NotImplementedError
|
5622
|
+
|
5623
|
+
@abc.abstractmethod
|
5624
|
+
def before_remove(self) -> None:
|
5625
|
+
raise NotImplementedError
|
5603
5626
|
|
5604
5627
|
|
5605
5628
|
########################################
|
5606
5629
|
# ../dispatchers.py
|
5607
5630
|
|
5608
5631
|
|
5609
|
-
class Dispatchers(KeyedCollection[
|
5610
|
-
def _key(self, v: Dispatcher) ->
|
5632
|
+
class Dispatchers(KeyedCollection[Fd, Dispatcher]):
|
5633
|
+
def _key(self, v: Dispatcher) -> Fd:
|
5611
5634
|
return v.fd
|
5612
5635
|
|
5613
5636
|
#
|
@@ -5625,12 +5648,12 @@ class Dispatchers(KeyedCollection[int, Dispatcher]):
|
|
5625
5648
|
|
5626
5649
|
def remove_logs(self) -> None:
|
5627
5650
|
for d in self:
|
5628
|
-
if isinstance(d,
|
5651
|
+
if isinstance(d, ProcessOutputDispatcher):
|
5629
5652
|
d.remove_logs()
|
5630
5653
|
|
5631
5654
|
def reopen_logs(self) -> None:
|
5632
5655
|
for d in self:
|
5633
|
-
if isinstance(d,
|
5656
|
+
if isinstance(d, ProcessOutputDispatcher):
|
5634
5657
|
d.reopen_logs()
|
5635
5658
|
|
5636
5659
|
|
@@ -5638,14 +5661,15 @@ class Dispatchers(KeyedCollection[int, Dispatcher]):
|
|
5638
5661
|
# ../dispatchersimpl.py
|
5639
5662
|
|
5640
5663
|
|
5641
|
-
class
|
5664
|
+
class BaseProcessDispatcherImpl(ProcessDispatcher, abc.ABC):
|
5642
5665
|
def __init__(
|
5643
5666
|
self,
|
5644
5667
|
process: Process,
|
5645
5668
|
channel: str,
|
5646
|
-
fd:
|
5669
|
+
fd: Fd,
|
5647
5670
|
*,
|
5648
5671
|
event_callbacks: EventCallbacks,
|
5672
|
+
server_config: ServerConfig,
|
5649
5673
|
) -> None:
|
5650
5674
|
super().__init__()
|
5651
5675
|
|
@@ -5653,6 +5677,7 @@ class BaseDispatcherImpl(Dispatcher, abc.ABC):
|
|
5653
5677
|
self._channel = channel # 'stderr' or 'stdout'
|
5654
5678
|
self._fd = fd
|
5655
5679
|
self._event_callbacks = event_callbacks
|
5680
|
+
self._server_config = server_config
|
5656
5681
|
|
5657
5682
|
self._closed = False # True if close() has been called
|
5658
5683
|
|
@@ -5672,7 +5697,7 @@ class BaseDispatcherImpl(Dispatcher, abc.ABC):
|
|
5672
5697
|
return self._channel
|
5673
5698
|
|
5674
5699
|
@property
|
5675
|
-
def fd(self) ->
|
5700
|
+
def fd(self) -> Fd:
|
5676
5701
|
return self._fd
|
5677
5702
|
|
5678
5703
|
@property
|
@@ -5693,7 +5718,7 @@ class BaseDispatcherImpl(Dispatcher, abc.ABC):
|
|
5693
5718
|
self.close()
|
5694
5719
|
|
5695
5720
|
|
5696
|
-
class
|
5721
|
+
class ProcessOutputDispatcherImpl(BaseProcessDispatcherImpl, ProcessOutputDispatcher):
|
5697
5722
|
"""
|
5698
5723
|
Dispatcher for one channel (stdout or stderr) of one process. Serves several purposes:
|
5699
5724
|
|
@@ -5706,15 +5731,17 @@ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
|
5706
5731
|
self,
|
5707
5732
|
process: Process,
|
5708
5733
|
event_type: ta.Type[ProcessCommunicationEvent],
|
5709
|
-
fd:
|
5734
|
+
fd: Fd,
|
5710
5735
|
*,
|
5711
5736
|
event_callbacks: EventCallbacks,
|
5737
|
+
server_config: ServerConfig,
|
5712
5738
|
) -> None:
|
5713
5739
|
super().__init__(
|
5714
5740
|
process,
|
5715
5741
|
event_type.channel,
|
5716
5742
|
fd,
|
5717
5743
|
event_callbacks=event_callbacks,
|
5744
|
+
server_config=server_config,
|
5718
5745
|
)
|
5719
5746
|
|
5720
5747
|
self._event_type = event_type
|
@@ -5738,11 +5765,10 @@ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
|
5738
5765
|
|
5739
5766
|
self._main_log_level = logging.DEBUG
|
5740
5767
|
|
5741
|
-
self._log_to_main_log =
|
5768
|
+
self._log_to_main_log = self._server_config.loglevel <= self._main_log_level
|
5742
5769
|
|
5743
|
-
|
5744
|
-
self.
|
5745
|
-
self._stderr_events_enabled = config.stderr.events_enabled
|
5770
|
+
self._stdout_events_enabled = self._process.config.stdout.events_enabled
|
5771
|
+
self._stderr_events_enabled = self._process.config.stderr.events_enabled
|
5746
5772
|
|
5747
5773
|
_child_log: ta.Optional[logging.Logger] = None # the current logger (normal_log or capture_log)
|
5748
5774
|
_normal_log: ta.Optional[logging.Logger] = None # the "normal" (non-capture) logger
|
@@ -5813,7 +5839,7 @@ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
|
5813
5839
|
if not data:
|
5814
5840
|
return
|
5815
5841
|
|
5816
|
-
if self.
|
5842
|
+
if self._server_config.strip_ansi:
|
5817
5843
|
data = strip_escapes(as_bytes(data))
|
5818
5844
|
|
5819
5845
|
if self._child_log:
|
@@ -5911,20 +5937,22 @@ class OutputDispatcherImpl(BaseDispatcherImpl, OutputDispatcher):
|
|
5911
5937
|
self.close()
|
5912
5938
|
|
5913
5939
|
|
5914
|
-
class
|
5940
|
+
class ProcessInputDispatcherImpl(BaseProcessDispatcherImpl, ProcessInputDispatcher):
|
5915
5941
|
def __init__(
|
5916
5942
|
self,
|
5917
5943
|
process: Process,
|
5918
5944
|
channel: str,
|
5919
|
-
fd:
|
5945
|
+
fd: Fd,
|
5920
5946
|
*,
|
5921
5947
|
event_callbacks: EventCallbacks,
|
5948
|
+
server_config: ServerConfig,
|
5922
5949
|
) -> None:
|
5923
5950
|
super().__init__(
|
5924
5951
|
process,
|
5925
5952
|
channel,
|
5926
5953
|
fd,
|
5927
5954
|
event_callbacks=event_callbacks,
|
5955
|
+
server_config=server_config,
|
5928
5956
|
)
|
5929
5957
|
|
5930
5958
|
self._input_buffer = b''
|
@@ -6084,7 +6112,7 @@ class ProcessGroupImpl(ProcessGroup):
|
|
6084
6112
|
#
|
6085
6113
|
|
6086
6114
|
def get_unstopped_processes(self) -> ta.List[Process]:
|
6087
|
-
return [x for x in self if not x.
|
6115
|
+
return [x for x in self if not x.state.stopped]
|
6088
6116
|
|
6089
6117
|
def stop_all(self) -> None:
|
6090
6118
|
processes = list(self._by_name.values())
|
@@ -6092,7 +6120,7 @@ class ProcessGroupImpl(ProcessGroup):
|
|
6092
6120
|
processes.reverse() # stop in desc priority order
|
6093
6121
|
|
6094
6122
|
for proc in processes:
|
6095
|
-
state = proc.
|
6123
|
+
state = proc.state
|
6096
6124
|
if state == ProcessState.RUNNING:
|
6097
6125
|
# RUNNING -> STOPPING
|
6098
6126
|
proc.stop()
|
@@ -6110,7 +6138,7 @@ class ProcessGroupImpl(ProcessGroup):
|
|
6110
6138
|
|
6111
6139
|
|
6112
6140
|
########################################
|
6113
|
-
# ../
|
6141
|
+
# ../process.py
|
6114
6142
|
|
6115
6143
|
|
6116
6144
|
##
|
@@ -6123,7 +6151,7 @@ class ProcessStateError(RuntimeError):
|
|
6123
6151
|
##
|
6124
6152
|
|
6125
6153
|
|
6126
|
-
class PidHistory(ta.Dict[
|
6154
|
+
class PidHistory(ta.Dict[Pid, Process]):
|
6127
6155
|
pass
|
6128
6156
|
|
6129
6157
|
|
@@ -6272,348 +6300,126 @@ class SupervisorSetupImpl(SupervisorSetup):
|
|
6272
6300
|
except (resource.error, ValueError):
|
6273
6301
|
raise RuntimeError(msg % dict( # type: ignore # noqa
|
6274
6302
|
min_limit=min_limit,
|
6275
|
-
res=res,
|
6276
|
-
name=name,
|
6277
|
-
soft=soft,
|
6278
|
-
hard=hard,
|
6279
|
-
))
|
6280
|
-
|
6281
|
-
#
|
6282
|
-
|
6283
|
-
_unlink_pidfile = False
|
6284
|
-
|
6285
|
-
def _write_pidfile(self) -> None:
|
6286
|
-
pid = os.getpid()
|
6287
|
-
try:
|
6288
|
-
with open(self._config.pidfile, 'w') as f:
|
6289
|
-
f.write(f'{pid}\n')
|
6290
|
-
except OSError:
|
6291
|
-
log.critical('could not write pidfile %s', self._config.pidfile)
|
6292
|
-
else:
|
6293
|
-
self._unlink_pidfile = True
|
6294
|
-
log.info('supervisord started with pid %s', pid)
|
6295
|
-
|
6296
|
-
def _cleanup_pidfile(self) -> None:
|
6297
|
-
if self._unlink_pidfile:
|
6298
|
-
try_unlink(self._config.pidfile)
|
6299
|
-
|
6300
|
-
#
|
6301
|
-
|
6302
|
-
def _clear_auto_child_logdir(self) -> None:
|
6303
|
-
# must be called after realize()
|
6304
|
-
child_logdir = self._config.child_logdir
|
6305
|
-
if child_logdir == '/dev/null':
|
6306
|
-
return
|
6307
|
-
|
6308
|
-
fnre = re.compile(rf'.+?---{self._config.identifier}-\S+\.log\.?\d{{0,4}}')
|
6309
|
-
try:
|
6310
|
-
filenames = os.listdir(child_logdir)
|
6311
|
-
except OSError:
|
6312
|
-
log.warning('Could not clear child_log dir')
|
6313
|
-
return
|
6314
|
-
|
6315
|
-
for filename in filenames:
|
6316
|
-
if fnre.match(filename):
|
6317
|
-
pathname = os.path.join(child_logdir, filename)
|
6318
|
-
try:
|
6319
|
-
os.remove(pathname)
|
6320
|
-
except OSError:
|
6321
|
-
log.warning('Failed to clean up %r', pathname)
|
6322
|
-
|
6323
|
-
#
|
6324
|
-
|
6325
|
-
def _daemonize(self) -> None:
|
6326
|
-
for dl in self._daemonize_listeners:
|
6327
|
-
dl.before_daemonize()
|
6328
|
-
|
6329
|
-
self._do_daemonize()
|
6330
|
-
|
6331
|
-
for dl in self._daemonize_listeners:
|
6332
|
-
dl.after_daemonize()
|
6333
|
-
|
6334
|
-
def _do_daemonize(self) -> None:
|
6335
|
-
# To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
|
6336
|
-
# our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
|
6337
|
-
# our parent process during normal (uninteresting) operations such as when we press Ctrl-C in the parent
|
6338
|
-
# terminal window to escape from a logtail command. To disassociate ourselves from our parent's session group we
|
6339
|
-
# use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
|
6340
|
-
# session and process group and setting itself up as a new session leader.
|
6341
|
-
#
|
6342
|
-
# Unfortunately we cannot call setsid if we're already a session group leader, so we use "fork" to make a copy
|
6343
|
-
# of ourselves that is guaranteed to not be a session group leader.
|
6344
|
-
#
|
6345
|
-
# We also change directories, set stderr and stdout to null, and change our umask.
|
6346
|
-
#
|
6347
|
-
# This explanation was (gratefully) garnered from
|
6348
|
-
# http://www.cems.uwe.ac.uk/~irjohnso/coursenotes/lrc/system/daemons/d3.htm
|
6349
|
-
|
6350
|
-
pid = os.fork()
|
6351
|
-
if pid != 0:
|
6352
|
-
# Parent
|
6353
|
-
log.debug('supervisord forked; parent exiting')
|
6354
|
-
real_exit(0)
|
6355
|
-
|
6356
|
-
# Child
|
6357
|
-
log.info('daemonizing the supervisord process')
|
6358
|
-
if self._config.directory:
|
6359
|
-
try:
|
6360
|
-
os.chdir(self._config.directory)
|
6361
|
-
except OSError as err:
|
6362
|
-
log.critical("can't chdir into %r: %s", self._config.directory, err)
|
6363
|
-
else:
|
6364
|
-
log.info('set current directory: %r', self._config.directory)
|
6365
|
-
|
6366
|
-
os.dup2(0, os.open('/dev/null', os.O_RDONLY))
|
6367
|
-
os.dup2(1, os.open('/dev/null', os.O_WRONLY))
|
6368
|
-
os.dup2(2, os.open('/dev/null', os.O_WRONLY))
|
6369
|
-
|
6370
|
-
# XXX Stevens, in his Advanced Unix book, section 13.3 (page 417) recommends calling umask(0) and closing unused
|
6371
|
-
# file descriptors. In his Network Programming book, he additionally recommends ignoring SIGHUP and forking
|
6372
|
-
# again after the setsid() call, for obscure SVR4 reasons.
|
6373
|
-
os.setsid()
|
6374
|
-
os.umask(self._config.umask)
|
6375
|
-
|
6376
|
-
|
6377
|
-
########################################
|
6378
|
-
# ../spawning.py
|
6379
|
-
|
6380
|
-
|
6381
|
-
@dc.dataclass(frozen=True)
|
6382
|
-
class SpawnedProcess:
|
6383
|
-
pid: int
|
6384
|
-
pipes: ProcessPipes
|
6385
|
-
dispatchers: Dispatchers
|
6386
|
-
|
6387
|
-
|
6388
|
-
class ProcessSpawnError(RuntimeError):
|
6389
|
-
pass
|
6390
|
-
|
6391
|
-
|
6392
|
-
class ProcessSpawning:
|
6393
|
-
@property
|
6394
|
-
@abc.abstractmethod
|
6395
|
-
def process(self) -> Process:
|
6396
|
-
raise NotImplementedError
|
6397
|
-
|
6398
|
-
#
|
6399
|
-
|
6400
|
-
@abc.abstractmethod
|
6401
|
-
def spawn(self) -> SpawnedProcess: # Raises[ProcessSpawnError]
|
6402
|
-
raise NotImplementedError
|
6403
|
-
|
6404
|
-
|
6405
|
-
########################################
|
6406
|
-
# ../supervisor.py
|
6407
|
-
|
6408
|
-
|
6409
|
-
##
|
6410
|
-
|
6411
|
-
|
6412
|
-
class SignalHandler:
|
6413
|
-
def __init__(
|
6414
|
-
self,
|
6415
|
-
*,
|
6416
|
-
context: ServerContextImpl,
|
6417
|
-
signal_receiver: SignalReceiver,
|
6418
|
-
process_groups: ProcessGroupManager,
|
6419
|
-
) -> None:
|
6420
|
-
super().__init__()
|
6421
|
-
|
6422
|
-
self._context = context
|
6423
|
-
self._signal_receiver = signal_receiver
|
6424
|
-
self._process_groups = process_groups
|
6425
|
-
|
6426
|
-
def set_signals(self) -> None:
|
6427
|
-
self._signal_receiver.install(
|
6428
|
-
signal.SIGTERM,
|
6429
|
-
signal.SIGINT,
|
6430
|
-
signal.SIGQUIT,
|
6431
|
-
signal.SIGHUP,
|
6432
|
-
signal.SIGCHLD,
|
6433
|
-
signal.SIGUSR2,
|
6434
|
-
)
|
6435
|
-
|
6436
|
-
def handle_signals(self) -> None:
|
6437
|
-
sig = self._signal_receiver.get_signal()
|
6438
|
-
if not sig:
|
6439
|
-
return
|
6440
|
-
|
6441
|
-
if sig in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
|
6442
|
-
log.warning('received %s indicating exit request', sig_name(sig))
|
6443
|
-
self._context.set_state(SupervisorState.SHUTDOWN)
|
6444
|
-
|
6445
|
-
elif sig == signal.SIGHUP:
|
6446
|
-
if self._context.state == SupervisorState.SHUTDOWN:
|
6447
|
-
log.warning('ignored %s indicating restart request (shutdown in progress)', sig_name(sig)) # noqa
|
6448
|
-
else:
|
6449
|
-
log.warning('received %s indicating restart request', sig_name(sig)) # noqa
|
6450
|
-
self._context.set_state(SupervisorState.RESTARTING)
|
6451
|
-
|
6452
|
-
elif sig == signal.SIGCHLD:
|
6453
|
-
log.debug('received %s indicating a child quit', sig_name(sig))
|
6454
|
-
|
6455
|
-
elif sig == signal.SIGUSR2:
|
6456
|
-
log.info('received %s indicating log reopen request', sig_name(sig))
|
6457
|
-
|
6458
|
-
for p in self._process_groups.all_processes():
|
6459
|
-
for d in p.get_dispatchers():
|
6460
|
-
if isinstance(d, OutputDispatcher):
|
6461
|
-
d.reopen_logs()
|
6462
|
-
|
6463
|
-
else:
|
6464
|
-
log.debug('received %s indicating nothing', sig_name(sig))
|
6465
|
-
|
6466
|
-
|
6467
|
-
##
|
6468
|
-
|
6469
|
-
|
6470
|
-
class ProcessGroupFactory(Func1[ProcessGroupConfig, ProcessGroup]):
|
6471
|
-
pass
|
6472
|
-
|
6473
|
-
|
6474
|
-
class Supervisor:
|
6475
|
-
def __init__(
|
6476
|
-
self,
|
6477
|
-
*,
|
6478
|
-
context: ServerContextImpl,
|
6479
|
-
poller: Poller,
|
6480
|
-
process_groups: ProcessGroupManager,
|
6481
|
-
signal_handler: SignalHandler,
|
6482
|
-
event_callbacks: EventCallbacks,
|
6483
|
-
process_group_factory: ProcessGroupFactory,
|
6484
|
-
pid_history: PidHistory,
|
6485
|
-
setup: SupervisorSetup,
|
6486
|
-
) -> None:
|
6487
|
-
super().__init__()
|
6488
|
-
|
6489
|
-
self._context = context
|
6490
|
-
self._poller = poller
|
6491
|
-
self._process_groups = process_groups
|
6492
|
-
self._signal_handler = signal_handler
|
6493
|
-
self._event_callbacks = event_callbacks
|
6494
|
-
self._process_group_factory = process_group_factory
|
6495
|
-
self._pid_history = pid_history
|
6496
|
-
self._setup = setup
|
6497
|
-
|
6498
|
-
self._ticks: ta.Dict[int, float] = {}
|
6499
|
-
self._stop_groups: ta.Optional[ta.List[ProcessGroup]] = None # list used for priority ordered shutdown
|
6500
|
-
self._stopping = False # set after we detect that we are handling a stop request
|
6501
|
-
self._last_shutdown_report = 0. # throttle for delayed process error reports at stop
|
6502
|
-
|
6503
|
-
#
|
6504
|
-
|
6505
|
-
@property
|
6506
|
-
def context(self) -> ServerContextImpl:
|
6507
|
-
return self._context
|
6508
|
-
|
6509
|
-
def get_state(self) -> SupervisorState:
|
6510
|
-
return self._context.state
|
6511
|
-
|
6512
|
-
#
|
6513
|
-
|
6514
|
-
def add_process_group(self, config: ProcessGroupConfig) -> bool:
|
6515
|
-
if self._process_groups.get(config.name) is not None:
|
6516
|
-
return False
|
6517
|
-
|
6518
|
-
group = check_isinstance(self._process_group_factory(config), ProcessGroup)
|
6519
|
-
for process in group:
|
6520
|
-
process.after_setuid()
|
6521
|
-
|
6522
|
-
self._process_groups.add(group)
|
6303
|
+
res=res,
|
6304
|
+
name=name,
|
6305
|
+
soft=soft,
|
6306
|
+
hard=hard,
|
6307
|
+
))
|
6523
6308
|
|
6524
|
-
|
6309
|
+
#
|
6525
6310
|
|
6526
|
-
|
6527
|
-
if self._process_groups[name].get_unstopped_processes():
|
6528
|
-
return False
|
6311
|
+
_unlink_pidfile = False
|
6529
6312
|
|
6530
|
-
|
6313
|
+
def _write_pidfile(self) -> None:
|
6314
|
+
pid = os.getpid()
|
6315
|
+
try:
|
6316
|
+
with open(self._config.pidfile, 'w') as f:
|
6317
|
+
f.write(f'{pid}\n')
|
6318
|
+
except OSError:
|
6319
|
+
log.critical('could not write pidfile %s', self._config.pidfile)
|
6320
|
+
else:
|
6321
|
+
self._unlink_pidfile = True
|
6322
|
+
log.info('supervisord started with pid %s', pid)
|
6531
6323
|
|
6532
|
-
|
6324
|
+
def _cleanup_pidfile(self) -> None:
|
6325
|
+
if self._unlink_pidfile:
|
6326
|
+
try_unlink(self._config.pidfile)
|
6533
6327
|
|
6534
6328
|
#
|
6535
6329
|
|
6536
|
-
def
|
6537
|
-
|
6538
|
-
|
6539
|
-
|
6540
|
-
|
6330
|
+
def _clear_auto_child_logdir(self) -> None:
|
6331
|
+
# must be called after realize()
|
6332
|
+
child_logdir = self._config.child_logdir
|
6333
|
+
if child_logdir == '/dev/null':
|
6334
|
+
return
|
6541
6335
|
|
6542
|
-
|
6543
|
-
|
6544
|
-
|
6545
|
-
|
6546
|
-
|
6547
|
-
|
6548
|
-
log.info('waiting for %s to die', namestr)
|
6549
|
-
self._last_shutdown_report = now
|
6550
|
-
for proc in unstopped:
|
6551
|
-
log.debug('%s state: %s', proc.config.name, proc.get_state().name)
|
6336
|
+
fnre = re.compile(rf'.+?---{self._config.identifier}-\S+\.log\.?\d{{0,4}}')
|
6337
|
+
try:
|
6338
|
+
filenames = os.listdir(child_logdir)
|
6339
|
+
except OSError:
|
6340
|
+
log.warning('Could not clear child_log dir')
|
6341
|
+
return
|
6552
6342
|
|
6553
|
-
|
6343
|
+
for filename in filenames:
|
6344
|
+
if fnre.match(filename):
|
6345
|
+
pathname = os.path.join(child_logdir, filename)
|
6346
|
+
try:
|
6347
|
+
os.remove(pathname)
|
6348
|
+
except OSError:
|
6349
|
+
log.warning('Failed to clean up %r', pathname)
|
6554
6350
|
|
6555
6351
|
#
|
6556
6352
|
|
6557
|
-
def
|
6558
|
-
self.
|
6559
|
-
|
6560
|
-
self.run(**kwargs)
|
6561
|
-
finally:
|
6562
|
-
self._setup.cleanup()
|
6353
|
+
def _daemonize(self) -> None:
|
6354
|
+
for dl in self._daemonize_listeners:
|
6355
|
+
dl.before_daemonize()
|
6563
6356
|
|
6564
|
-
|
6565
|
-
self,
|
6566
|
-
*,
|
6567
|
-
callback: ta.Optional[ta.Callable[['Supervisor'], bool]] = None,
|
6568
|
-
) -> None:
|
6569
|
-
self._process_groups.clear()
|
6570
|
-
self._stop_groups = None # clear
|
6357
|
+
self._do_daemonize()
|
6571
6358
|
|
6572
|
-
self.
|
6359
|
+
for dl in self._daemonize_listeners:
|
6360
|
+
dl.after_daemonize()
|
6573
6361
|
|
6574
|
-
|
6575
|
-
|
6576
|
-
|
6362
|
+
def _do_daemonize(self) -> None:
|
6363
|
+
# To daemonize, we need to become the leader of our own session (process) group. If we do not, signals sent to
|
6364
|
+
# our parent process will also be sent to us. This might be bad because signals such as SIGINT can be sent to
|
6365
|
+
# our parent process during normal (uninteresting) operations such as when we press Ctrl-C in the parent
|
6366
|
+
# terminal window to escape from a logtail command. To disassociate ourselves from our parent's session group we
|
6367
|
+
# use os.setsid. It means "set session id", which has the effect of disassociating a process from is current
|
6368
|
+
# session and process group and setting itself up as a new session leader.
|
6369
|
+
#
|
6370
|
+
# Unfortunately we cannot call setsid if we're already a session group leader, so we use "fork" to make a copy
|
6371
|
+
# of ourselves that is guaranteed to not be a session group leader.
|
6372
|
+
#
|
6373
|
+
# We also change directories, set stderr and stdout to null, and change our umask.
|
6374
|
+
#
|
6375
|
+
# This explanation was (gratefully) garnered from
|
6376
|
+
# http://www.cems.uwe.ac.uk/~irjohnso/coursenotes/lrc/system/daemons/d3.htm
|
6577
6377
|
|
6578
|
-
|
6378
|
+
pid = os.fork()
|
6379
|
+
if pid != 0:
|
6380
|
+
# Parent
|
6381
|
+
log.debug('supervisord forked; parent exiting')
|
6382
|
+
real_exit(Rc(0))
|
6579
6383
|
|
6580
|
-
|
6384
|
+
# Child
|
6385
|
+
log.info('daemonizing the supervisord process')
|
6386
|
+
if self._config.directory:
|
6387
|
+
try:
|
6388
|
+
os.chdir(self._config.directory)
|
6389
|
+
except OSError as err:
|
6390
|
+
log.critical("can't chdir into %r: %s", self._config.directory, err)
|
6391
|
+
else:
|
6392
|
+
log.info('set current directory: %r', self._config.directory)
|
6581
6393
|
|
6582
|
-
|
6583
|
-
|
6584
|
-
|
6394
|
+
os.dup2(0, os.open('/dev/null', os.O_RDONLY))
|
6395
|
+
os.dup2(1, os.open('/dev/null', os.O_WRONLY))
|
6396
|
+
os.dup2(2, os.open('/dev/null', os.O_WRONLY))
|
6585
6397
|
|
6586
|
-
|
6398
|
+
# XXX Stevens, in his Advanced Unix book, section 13.3 (page 417) recommends calling umask(0) and closing unused
|
6399
|
+
# file descriptors. In his Network Programming book, he additionally recommends ignoring SIGHUP and forking
|
6400
|
+
# again after the setsid() call, for obscure SVR4 reasons.
|
6401
|
+
os.setsid()
|
6402
|
+
os.umask(self._config.umask)
|
6587
6403
|
|
6588
|
-
finally:
|
6589
|
-
self._poller.close()
|
6590
6404
|
|
6591
|
-
|
6405
|
+
########################################
|
6406
|
+
# ../io.py
|
6592
6407
|
|
6593
|
-
def _run_once(self) -> None:
|
6594
|
-
self._poll()
|
6595
|
-
self._reap()
|
6596
|
-
self._signal_handler.handle_signals()
|
6597
|
-
self._tick()
|
6598
6408
|
|
6599
|
-
|
6600
|
-
self._ordered_stop_groups_phase_2()
|
6409
|
+
##
|
6601
6410
|
|
6602
|
-
def _ordered_stop_groups_phase_1(self) -> None:
|
6603
|
-
if self._stop_groups:
|
6604
|
-
# stop the last group (the one with the "highest" priority)
|
6605
|
-
self._stop_groups[-1].stop_all()
|
6606
6411
|
|
6607
|
-
|
6608
|
-
|
6609
|
-
|
6610
|
-
|
6611
|
-
|
6612
|
-
|
6613
|
-
|
6614
|
-
|
6615
|
-
|
6616
|
-
|
6412
|
+
class IoManager:
|
6413
|
+
def __init__(
|
6414
|
+
self,
|
6415
|
+
*,
|
6416
|
+
poller: Poller,
|
6417
|
+
process_groups: ProcessGroupManager,
|
6418
|
+
) -> None:
|
6419
|
+
super().__init__()
|
6420
|
+
|
6421
|
+
self._poller = poller
|
6422
|
+
self._process_groups = process_groups
|
6617
6423
|
|
6618
6424
|
def get_dispatchers(self) -> Dispatchers:
|
6619
6425
|
return Dispatchers(
|
@@ -6622,25 +6428,9 @@ class Supervisor:
|
|
6622
6428
|
for d in p.get_dispatchers()
|
6623
6429
|
)
|
6624
6430
|
|
6625
|
-
def
|
6431
|
+
def poll(self) -> None:
|
6626
6432
|
dispatchers = self.get_dispatchers()
|
6627
6433
|
|
6628
|
-
sorted_groups = list(self._process_groups)
|
6629
|
-
sorted_groups.sort()
|
6630
|
-
|
6631
|
-
if self._context.state < SupervisorState.RUNNING:
|
6632
|
-
if not self._stopping:
|
6633
|
-
# first time, set the stopping flag, do a notification and set stop_groups
|
6634
|
-
self._stopping = True
|
6635
|
-
self._stop_groups = sorted_groups[:]
|
6636
|
-
self._event_callbacks.notify(SupervisorStoppingEvent())
|
6637
|
-
|
6638
|
-
self._ordered_stop_groups_phase_1()
|
6639
|
-
|
6640
|
-
if not self.shutdown_report():
|
6641
|
-
# if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
|
6642
|
-
raise ExitNow
|
6643
|
-
|
6644
6434
|
for fd, dispatcher in dispatchers.items():
|
6645
6435
|
if dispatcher.readable():
|
6646
6436
|
self._poller.register_readable(fd)
|
@@ -6690,53 +6480,96 @@ class Supervisor:
|
|
6690
6480
|
except Exception: # noqa
|
6691
6481
|
pass
|
6692
6482
|
|
6693
|
-
for group in sorted_groups:
|
6694
|
-
for process in group:
|
6695
|
-
process.transition()
|
6696
6483
|
|
6697
|
-
|
6698
|
-
|
6699
|
-
|
6484
|
+
########################################
|
6485
|
+
# ../signals.py
|
6486
|
+
|
6487
|
+
|
6488
|
+
class SignalHandler:
|
6489
|
+
def __init__(
|
6490
|
+
self,
|
6491
|
+
*,
|
6492
|
+
states: SupervisorStateManager,
|
6493
|
+
signal_receiver: SignalReceiver,
|
6494
|
+
process_groups: ProcessGroupManager,
|
6495
|
+
) -> None:
|
6496
|
+
super().__init__()
|
6497
|
+
|
6498
|
+
self._states = states
|
6499
|
+
self._signal_receiver = signal_receiver
|
6500
|
+
self._process_groups = process_groups
|
6501
|
+
|
6502
|
+
def set_signals(self) -> None:
|
6503
|
+
self._signal_receiver.install(
|
6504
|
+
signal.SIGTERM,
|
6505
|
+
signal.SIGINT,
|
6506
|
+
signal.SIGQUIT,
|
6507
|
+
signal.SIGHUP,
|
6508
|
+
signal.SIGCHLD,
|
6509
|
+
signal.SIGUSR2,
|
6510
|
+
)
|
6700
6511
|
|
6701
|
-
|
6702
|
-
|
6512
|
+
def handle_signals(self) -> None:
|
6513
|
+
sig = self._signal_receiver.get_signal()
|
6514
|
+
if not sig:
|
6703
6515
|
return
|
6704
6516
|
|
6705
|
-
|
6706
|
-
|
6707
|
-
|
6708
|
-
|
6517
|
+
if sig in (signal.SIGTERM, signal.SIGINT, signal.SIGQUIT):
|
6518
|
+
log.warning('received %s indicating exit request', sig_name(sig))
|
6519
|
+
self._states.set_state(SupervisorState.SHUTDOWN)
|
6520
|
+
|
6521
|
+
elif sig == signal.SIGHUP:
|
6522
|
+
if self._states.state == SupervisorState.SHUTDOWN:
|
6523
|
+
log.warning('ignored %s indicating restart request (shutdown in progress)', sig_name(sig)) # noqa
|
6524
|
+
else:
|
6525
|
+
log.warning('received %s indicating restart request', sig_name(sig)) # noqa
|
6526
|
+
self._states.set_state(SupervisorState.RESTARTING)
|
6527
|
+
|
6528
|
+
elif sig == signal.SIGCHLD:
|
6529
|
+
log.debug('received %s indicating a child quit', sig_name(sig))
|
6530
|
+
|
6531
|
+
elif sig == signal.SIGUSR2:
|
6532
|
+
log.info('received %s indicating log reopen request', sig_name(sig))
|
6533
|
+
|
6534
|
+
for p in self._process_groups.all_processes():
|
6535
|
+
for d in p.get_dispatchers():
|
6536
|
+
if isinstance(d, ProcessOutputDispatcher):
|
6537
|
+
d.reopen_logs()
|
6538
|
+
|
6709
6539
|
else:
|
6710
|
-
|
6711
|
-
del self._pid_history[pid]
|
6540
|
+
log.debug('received %s indicating nothing', sig_name(sig))
|
6712
6541
|
|
6713
|
-
if not once:
|
6714
|
-
# keep reaping until no more kids to reap, but don't recurse infinitely
|
6715
|
-
self._reap(once=False, depth=depth + 1)
|
6716
6542
|
|
6717
|
-
|
6718
|
-
|
6543
|
+
########################################
|
6544
|
+
# ../spawning.py
|
6719
6545
|
|
6720
|
-
if now is None:
|
6721
|
-
# now won't be None in unit tests
|
6722
|
-
now = time.time()
|
6723
6546
|
|
6724
|
-
|
6725
|
-
|
6547
|
+
@dc.dataclass(frozen=True)
|
6548
|
+
class SpawnedProcess:
|
6549
|
+
pid: Pid
|
6550
|
+
pipes: ProcessPipes
|
6551
|
+
dispatchers: Dispatchers
|
6726
6552
|
|
6727
|
-
last_tick = self._ticks.get(period)
|
6728
|
-
if last_tick is None:
|
6729
|
-
# we just started up
|
6730
|
-
last_tick = self._ticks[period] = timeslice(period, now)
|
6731
6553
|
|
6732
|
-
|
6733
|
-
|
6734
|
-
|
6735
|
-
|
6554
|
+
class ProcessSpawnError(RuntimeError):
|
6555
|
+
pass
|
6556
|
+
|
6557
|
+
|
6558
|
+
class ProcessSpawning:
|
6559
|
+
@property
|
6560
|
+
@abc.abstractmethod
|
6561
|
+
def process(self) -> Process:
|
6562
|
+
raise NotImplementedError
|
6563
|
+
|
6564
|
+
#
|
6565
|
+
|
6566
|
+
@abc.abstractmethod
|
6567
|
+
def spawn(self) -> SpawnedProcess: # Raises[ProcessSpawnError]
|
6568
|
+
raise NotImplementedError
|
6736
6569
|
|
6737
6570
|
|
6738
6571
|
########################################
|
6739
|
-
# ../
|
6572
|
+
# ../processimpl.py
|
6740
6573
|
|
6741
6574
|
|
6742
6575
|
class ProcessSpawningFactory(Func1[Process, ProcessSpawning]):
|
@@ -6754,7 +6587,7 @@ class ProcessImpl(Process):
|
|
6754
6587
|
config: ProcessConfig,
|
6755
6588
|
group: ProcessGroup,
|
6756
6589
|
*,
|
6757
|
-
|
6590
|
+
supervisor_states: SupervisorStateManager,
|
6758
6591
|
event_callbacks: EventCallbacks,
|
6759
6592
|
process_spawning_factory: ProcessSpawningFactory,
|
6760
6593
|
) -> None:
|
@@ -6763,7 +6596,7 @@ class ProcessImpl(Process):
|
|
6763
6596
|
self._config = config
|
6764
6597
|
self._group = group
|
6765
6598
|
|
6766
|
-
self.
|
6599
|
+
self._supervisor_states = supervisor_states
|
6767
6600
|
self._event_callbacks = event_callbacks
|
6768
6601
|
|
6769
6602
|
self._spawning = process_spawning_factory(self)
|
@@ -6774,7 +6607,7 @@ class ProcessImpl(Process):
|
|
6774
6607
|
self._pipes = ProcessPipes()
|
6775
6608
|
|
6776
6609
|
self._state = ProcessState.STOPPED
|
6777
|
-
self._pid = 0 # 0 when not running
|
6610
|
+
self._pid = Pid(0) # 0 when not running
|
6778
6611
|
|
6779
6612
|
self._last_start = 0. # Last time the subprocess was started; 0 if never
|
6780
6613
|
self._last_stop = 0. # Last time the subprocess was stopped; 0 if never
|
@@ -6788,13 +6621,13 @@ class ProcessImpl(Process):
|
|
6788
6621
|
|
6789
6622
|
self._backoff = 0 # backoff counter (to startretries)
|
6790
6623
|
|
6791
|
-
self._exitstatus: ta.Optional[
|
6624
|
+
self._exitstatus: ta.Optional[Rc] = None # status attached to dead process by finish()
|
6792
6625
|
self._spawn_err: ta.Optional[str] = None # error message attached by spawn() if any
|
6793
6626
|
|
6794
6627
|
#
|
6795
6628
|
|
6796
6629
|
def __repr__(self) -> str:
|
6797
|
-
return f'<Subprocess at {id(self)} with name {self._config.name} in state {self.
|
6630
|
+
return f'<Subprocess at {id(self)} with name {self._config.name} in state {self._state.name}>'
|
6798
6631
|
|
6799
6632
|
#
|
6800
6633
|
|
@@ -6811,15 +6644,11 @@ class ProcessImpl(Process):
|
|
6811
6644
|
return self._group
|
6812
6645
|
|
6813
6646
|
@property
|
6814
|
-
def pid(self) ->
|
6647
|
+
def pid(self) -> Pid:
|
6815
6648
|
return self._pid
|
6816
6649
|
|
6817
6650
|
#
|
6818
6651
|
|
6819
|
-
@property
|
6820
|
-
def context(self) -> ServerContext:
|
6821
|
-
return self._context
|
6822
|
-
|
6823
6652
|
@property
|
6824
6653
|
def state(self) -> ProcessState:
|
6825
6654
|
return self._state
|
@@ -6830,11 +6659,9 @@ class ProcessImpl(Process):
|
|
6830
6659
|
|
6831
6660
|
#
|
6832
6661
|
|
6833
|
-
def spawn(self) -> ta.Optional[
|
6834
|
-
process_name = as_string(self._config.name)
|
6835
|
-
|
6662
|
+
def spawn(self) -> ta.Optional[Pid]:
|
6836
6663
|
if self.pid:
|
6837
|
-
log.warning('process \'%s\' already running',
|
6664
|
+
log.warning('process \'%s\' already running', self.name)
|
6838
6665
|
return None
|
6839
6666
|
|
6840
6667
|
self.check_in_state(
|
@@ -6884,7 +6711,7 @@ class ProcessImpl(Process):
|
|
6884
6711
|
if stdin_fd is None:
|
6885
6712
|
raise OSError(errno.EPIPE, 'Process has no stdin channel')
|
6886
6713
|
|
6887
|
-
dispatcher = check_isinstance(self._dispatchers[stdin_fd],
|
6714
|
+
dispatcher = check_isinstance(self._dispatchers[stdin_fd], ProcessInputDispatcher)
|
6888
6715
|
if dispatcher.closed:
|
6889
6716
|
raise OSError(errno.EPIPE, "Process' stdin channel is closed")
|
6890
6717
|
|
@@ -6957,7 +6784,7 @@ class ProcessImpl(Process):
|
|
6957
6784
|
self._check_and_adjust_for_system_clock_rollback(now)
|
6958
6785
|
|
6959
6786
|
if now > (self._last_stop_report + 2): # every 2 seconds
|
6960
|
-
log.info('waiting for %s to stop',
|
6787
|
+
log.info('waiting for %s to stop', self.name)
|
6961
6788
|
self._last_stop_report = now
|
6962
6789
|
|
6963
6790
|
def give_up(self) -> None:
|
@@ -6977,18 +6804,17 @@ class ProcessImpl(Process):
|
|
6977
6804
|
"""
|
6978
6805
|
now = time.time()
|
6979
6806
|
|
6980
|
-
process_name = as_string(self._config.name)
|
6981
6807
|
# If the process is in BACKOFF and we want to stop or kill it, then BACKOFF -> STOPPED. This is needed because
|
6982
6808
|
# if startretries is a large number and the process isn't starting successfully, the stop request would be
|
6983
6809
|
# blocked for a long time waiting for the retries.
|
6984
6810
|
if self._state == ProcessState.BACKOFF:
|
6985
|
-
log.debug('Attempted to kill %s, which is in BACKOFF state.',
|
6811
|
+
log.debug('Attempted to kill %s, which is in BACKOFF state.', self.name)
|
6986
6812
|
self.change_state(ProcessState.STOPPED)
|
6987
6813
|
return None
|
6988
6814
|
|
6989
6815
|
args: tuple
|
6990
6816
|
if not self.pid:
|
6991
|
-
fmt, args = "attempted to kill %s with sig %s but it wasn't running", (
|
6817
|
+
fmt, args = "attempted to kill %s with sig %s but it wasn't running", (self.name, sig_name(sig))
|
6992
6818
|
log.debug(fmt, *args)
|
6993
6819
|
return fmt % args
|
6994
6820
|
|
@@ -7002,7 +6828,7 @@ class ProcessImpl(Process):
|
|
7002
6828
|
if killasgroup:
|
7003
6829
|
as_group = 'process group '
|
7004
6830
|
|
7005
|
-
log.debug('killing %s (pid %s) %s with signal %s',
|
6831
|
+
log.debug('killing %s (pid %s) %s with signal %s', self.name, self.pid, as_group, sig_name(sig))
|
7006
6832
|
|
7007
6833
|
# RUNNING/STARTING/STOPPING -> STOPPING
|
7008
6834
|
self._killing = True
|
@@ -7011,24 +6837,24 @@ class ProcessImpl(Process):
|
|
7011
6837
|
self.check_in_state(ProcessState.RUNNING, ProcessState.STARTING, ProcessState.STOPPING)
|
7012
6838
|
self.change_state(ProcessState.STOPPING)
|
7013
6839
|
|
7014
|
-
|
6840
|
+
kpid = int(self.pid)
|
7015
6841
|
if killasgroup:
|
7016
6842
|
# send to the whole process group instead
|
7017
|
-
|
6843
|
+
kpid = -kpid
|
7018
6844
|
|
7019
6845
|
try:
|
7020
6846
|
try:
|
7021
|
-
os.kill(
|
6847
|
+
os.kill(kpid, sig)
|
7022
6848
|
except OSError as exc:
|
7023
6849
|
if exc.errno == errno.ESRCH:
|
7024
|
-
log.debug('unable to signal %s (pid %s), it probably just exited on its own: %s',
|
6850
|
+
log.debug('unable to signal %s (pid %s), it probably just exited on its own: %s', self.name, self.pid, str(exc)) # noqa
|
7025
6851
|
# we could change the state here but we intentionally do not. we will do it during normal SIGCHLD
|
7026
6852
|
# processing.
|
7027
6853
|
return None
|
7028
6854
|
raise
|
7029
6855
|
except Exception: # noqa
|
7030
6856
|
tb = traceback.format_exc()
|
7031
|
-
fmt, args = 'unknown problem killing %s (%s):%s', (
|
6857
|
+
fmt, args = 'unknown problem killing %s (%s):%s', (self.name, self.pid, tb)
|
7032
6858
|
log.critical(fmt, *args)
|
7033
6859
|
self.change_state(ProcessState.UNKNOWN)
|
7034
6860
|
self._killing = False
|
@@ -7044,14 +6870,13 @@ class ProcessImpl(Process):
|
|
7044
6870
|
Return None if the signal was sent, or an error message string if an error occurred or if the subprocess is not
|
7045
6871
|
running.
|
7046
6872
|
"""
|
7047
|
-
process_name = as_string(self._config.name)
|
7048
6873
|
args: tuple
|
7049
6874
|
if not self.pid:
|
7050
|
-
fmt, args = "
|
6875
|
+
fmt, args = "Attempted to send %s sig %s but it wasn't running", (self.name, sig_name(sig))
|
7051
6876
|
log.debug(fmt, *args)
|
7052
6877
|
return fmt % args
|
7053
6878
|
|
7054
|
-
log.debug('sending %s (pid %s) sig %s',
|
6879
|
+
log.debug('sending %s (pid %s) sig %s', self.name, self.pid, sig_name(sig))
|
7055
6880
|
|
7056
6881
|
self.check_in_state(ProcessState.RUNNING, ProcessState.STARTING, ProcessState.STOPPING)
|
7057
6882
|
|
@@ -7062,7 +6887,7 @@ class ProcessImpl(Process):
|
|
7062
6887
|
if exc.errno == errno.ESRCH:
|
7063
6888
|
log.debug(
|
7064
6889
|
'unable to signal %s (pid %s), it probably just now exited on its own: %s',
|
7065
|
-
|
6890
|
+
self.name,
|
7066
6891
|
self.pid,
|
7067
6892
|
str(exc),
|
7068
6893
|
)
|
@@ -7072,14 +6897,14 @@ class ProcessImpl(Process):
|
|
7072
6897
|
raise
|
7073
6898
|
except Exception: # noqa
|
7074
6899
|
tb = traceback.format_exc()
|
7075
|
-
fmt, args = 'unknown problem sending sig %s (%s):%s', (
|
6900
|
+
fmt, args = 'unknown problem sending sig %s (%s):%s', (self.name, self.pid, tb)
|
7076
6901
|
log.critical(fmt, *args)
|
7077
6902
|
self.change_state(ProcessState.UNKNOWN)
|
7078
6903
|
return fmt % args
|
7079
6904
|
|
7080
6905
|
return None
|
7081
6906
|
|
7082
|
-
def finish(self, sts:
|
6907
|
+
def finish(self, sts: Rc) -> None:
|
7083
6908
|
"""The process was reaped and we need to report and manage its state."""
|
7084
6909
|
|
7085
6910
|
self._dispatchers.drain()
|
@@ -7091,7 +6916,6 @@ class ProcessImpl(Process):
|
|
7091
6916
|
self._check_and_adjust_for_system_clock_rollback(now)
|
7092
6917
|
|
7093
6918
|
self._last_stop = now
|
7094
|
-
process_name = as_string(self._config.name)
|
7095
6919
|
|
7096
6920
|
if now > self._last_start:
|
7097
6921
|
too_quickly = now - self._last_start < self._config.startsecs
|
@@ -7100,7 +6924,7 @@ class ProcessImpl(Process):
|
|
7100
6924
|
log.warning(
|
7101
6925
|
"process '%s' (%s) last_start time is in the future, don't know how long process was running so "
|
7102
6926
|
"assuming it did not exit too quickly",
|
7103
|
-
|
6927
|
+
self.name,
|
7104
6928
|
self.pid,
|
7105
6929
|
)
|
7106
6930
|
|
@@ -7110,9 +6934,9 @@ class ProcessImpl(Process):
|
|
7110
6934
|
# likely the result of a stop request implies STOPPING -> STOPPED
|
7111
6935
|
self._killing = False
|
7112
6936
|
self._delay = 0
|
7113
|
-
self._exitstatus = es
|
6937
|
+
self._exitstatus = Rc(es)
|
7114
6938
|
|
7115
|
-
fmt, args = 'stopped: %s (%s)', (
|
6939
|
+
fmt, args = 'stopped: %s (%s)', (self.name, msg)
|
7116
6940
|
self.check_in_state(ProcessState.STOPPING)
|
7117
6941
|
self.change_state(ProcessState.STOPPED)
|
7118
6942
|
if exit_expected:
|
@@ -7126,7 +6950,7 @@ class ProcessImpl(Process):
|
|
7126
6950
|
self._spawn_err = 'Exited too quickly (process log may have details)'
|
7127
6951
|
self.check_in_state(ProcessState.STARTING)
|
7128
6952
|
self.change_state(ProcessState.BACKOFF)
|
7129
|
-
log.warning('exited: %s (%s)',
|
6953
|
+
log.warning('exited: %s (%s)', self.name, msg + '; not expected')
|
7130
6954
|
|
7131
6955
|
else:
|
7132
6956
|
# this finish was not the result of a stop request, the program was in the RUNNING state but exited implies
|
@@ -7145,21 +6969,18 @@ class ProcessImpl(Process):
|
|
7145
6969
|
if exit_expected:
|
7146
6970
|
# expected exit code
|
7147
6971
|
self.change_state(ProcessState.EXITED, expected=True)
|
7148
|
-
log.info('exited: %s (%s)',
|
6972
|
+
log.info('exited: %s (%s)', self.name, msg + '; expected')
|
7149
6973
|
else:
|
7150
6974
|
# unexpected exit code
|
7151
6975
|
self._spawn_err = f'Bad exit code {es}'
|
7152
6976
|
self.change_state(ProcessState.EXITED, expected=False)
|
7153
|
-
log.warning('exited: %s (%s)',
|
6977
|
+
log.warning('exited: %s (%s)', self.name, msg + '; not expected')
|
7154
6978
|
|
7155
|
-
self._pid = 0
|
6979
|
+
self._pid = Pid(0)
|
7156
6980
|
close_parent_pipes(self._pipes)
|
7157
6981
|
self._pipes = ProcessPipes()
|
7158
6982
|
self._dispatchers = Dispatchers([])
|
7159
6983
|
|
7160
|
-
def get_state(self) -> ProcessState:
|
7161
|
-
return self._state
|
7162
|
-
|
7163
6984
|
def transition(self) -> None:
|
7164
6985
|
now = time.time()
|
7165
6986
|
state = self._state
|
@@ -7168,7 +6989,7 @@ class ProcessImpl(Process):
|
|
7168
6989
|
|
7169
6990
|
logger = log
|
7170
6991
|
|
7171
|
-
if self.
|
6992
|
+
if self._supervisor_states.state > SupervisorState.RESTARTING:
|
7172
6993
|
# dont start any processes if supervisor is shutting down
|
7173
6994
|
if state == ProcessState.EXITED:
|
7174
6995
|
if self._config.autorestart:
|
@@ -7190,7 +7011,6 @@ class ProcessImpl(Process):
|
|
7190
7011
|
# BACKOFF -> STARTING
|
7191
7012
|
self.spawn()
|
7192
7013
|
|
7193
|
-
process_name = as_string(self._config.name)
|
7194
7014
|
if state == ProcessState.STARTING:
|
7195
7015
|
if now - self._last_start > self._config.startsecs:
|
7196
7016
|
# STARTING -> RUNNING if the proc has started successfully and it has stayed up for at least
|
@@ -7200,21 +7020,21 @@ class ProcessImpl(Process):
|
|
7200
7020
|
self.check_in_state(ProcessState.STARTING)
|
7201
7021
|
self.change_state(ProcessState.RUNNING)
|
7202
7022
|
msg = ('entered RUNNING state, process has stayed up for > than %s seconds (startsecs)' % self._config.startsecs) # noqa
|
7203
|
-
logger.info('success: %s %s',
|
7023
|
+
logger.info('success: %s %s', self.name, msg)
|
7204
7024
|
|
7205
7025
|
if state == ProcessState.BACKOFF:
|
7206
7026
|
if self._backoff > self._config.startretries:
|
7207
7027
|
# BACKOFF -> FATAL if the proc has exceeded its number of retries
|
7208
7028
|
self.give_up()
|
7209
7029
|
msg = ('entered FATAL state, too many start retries too quickly')
|
7210
|
-
logger.info('gave up: %s %s',
|
7030
|
+
logger.info('gave up: %s %s', self.name, msg)
|
7211
7031
|
|
7212
7032
|
elif state == ProcessState.STOPPING:
|
7213
7033
|
time_left = self._delay - now
|
7214
7034
|
if time_left <= 0:
|
7215
7035
|
# kill processes which are taking too long to stop with a final sigkill. if this doesn't kill it, the
|
7216
7036
|
# process will be stuck in the STOPPING state forever.
|
7217
|
-
log.warning('killing \'%s\' (%s) with SIGKILL',
|
7037
|
+
log.warning('killing \'%s\' (%s) with SIGKILL', self.name, self.pid)
|
7218
7038
|
self.kill(signal.SIGKILL)
|
7219
7039
|
|
7220
7040
|
def after_setuid(self) -> None:
|
@@ -7233,15 +7053,15 @@ class ProcessImpl(Process):
|
|
7233
7053
|
# ../spawningimpl.py
|
7234
7054
|
|
7235
7055
|
|
7236
|
-
class
|
7056
|
+
class ProcessOutputDispatcherFactory(Func3[Process, ta.Type[ProcessCommunicationEvent], Fd, ProcessOutputDispatcher]):
|
7237
7057
|
pass
|
7238
7058
|
|
7239
7059
|
|
7240
|
-
class
|
7060
|
+
class ProcessInputDispatcherFactory(Func3[Process, str, Fd, ProcessInputDispatcher]):
|
7241
7061
|
pass
|
7242
7062
|
|
7243
7063
|
|
7244
|
-
InheritedFds = ta.NewType('InheritedFds', ta.FrozenSet[
|
7064
|
+
InheritedFds = ta.NewType('InheritedFds', ta.FrozenSet[Fd])
|
7245
7065
|
|
7246
7066
|
|
7247
7067
|
##
|
@@ -7255,8 +7075,8 @@ class ProcessSpawningImpl(ProcessSpawning):
|
|
7255
7075
|
server_config: ServerConfig,
|
7256
7076
|
pid_history: PidHistory,
|
7257
7077
|
|
7258
|
-
output_dispatcher_factory:
|
7259
|
-
input_dispatcher_factory:
|
7078
|
+
output_dispatcher_factory: ProcessOutputDispatcherFactory,
|
7079
|
+
input_dispatcher_factory: ProcessInputDispatcherFactory,
|
7260
7080
|
|
7261
7081
|
inherited_fds: ta.Optional[InheritedFds] = None,
|
7262
7082
|
) -> None:
|
@@ -7312,7 +7132,7 @@ class ProcessSpawningImpl(ProcessSpawning):
|
|
7312
7132
|
raise ProcessSpawnError(f"Unknown error making dispatchers for '{self.process.name}': {exc}") from exc
|
7313
7133
|
|
7314
7134
|
try:
|
7315
|
-
pid = os.fork()
|
7135
|
+
pid = Pid(os.fork())
|
7316
7136
|
except OSError as exc:
|
7317
7137
|
code = exc.args[0]
|
7318
7138
|
if code == errno.EAGAIN:
|
@@ -7396,21 +7216,21 @@ class ProcessSpawningImpl(ProcessSpawning):
|
|
7396
7216
|
self.process,
|
7397
7217
|
ProcessCommunicationStdoutEvent,
|
7398
7218
|
pipes.stdout,
|
7399
|
-
),
|
7219
|
+
), ProcessOutputDispatcher))
|
7400
7220
|
|
7401
7221
|
if pipes.stderr is not None:
|
7402
7222
|
dispatchers.append(check_isinstance(self._output_dispatcher_factory(
|
7403
7223
|
self.process,
|
7404
7224
|
ProcessCommunicationStderrEvent,
|
7405
7225
|
pipes.stderr,
|
7406
|
-
),
|
7226
|
+
), ProcessOutputDispatcher))
|
7407
7227
|
|
7408
7228
|
if pipes.stdin is not None:
|
7409
7229
|
dispatchers.append(check_isinstance(self._input_dispatcher_factory(
|
7410
7230
|
self.process,
|
7411
7231
|
'stdin',
|
7412
7232
|
pipes.stdin,
|
7413
|
-
),
|
7233
|
+
), ProcessInputDispatcher))
|
7414
7234
|
|
7415
7235
|
return Dispatchers(dispatchers)
|
7416
7236
|
|
@@ -7491,7 +7311,7 @@ class ProcessSpawningImpl(ProcessSpawning):
|
|
7491
7311
|
|
7492
7312
|
finally:
|
7493
7313
|
os.write(2, as_bytes('supervisor: child process was not spawned\n'))
|
7494
|
-
real_exit(127) # exit process with code for spawn failure
|
7314
|
+
real_exit(Rc(127)) # exit process with code for spawn failure
|
7495
7315
|
|
7496
7316
|
raise RuntimeError('Unreachable')
|
7497
7317
|
|
@@ -7508,7 +7328,7 @@ class ProcessSpawningImpl(ProcessSpawning):
|
|
7508
7328
|
for i in range(3, self._server_config.minfds):
|
7509
7329
|
if i in self._inherited_fds:
|
7510
7330
|
continue
|
7511
|
-
close_fd(i)
|
7331
|
+
close_fd(Fd(i))
|
7512
7332
|
|
7513
7333
|
def _set_uid(self) -> ta.Optional[str]:
|
7514
7334
|
if self.config.uid is None:
|
@@ -7539,6 +7359,278 @@ def check_execv_args(
|
|
7539
7359
|
raise NoPermissionError(f'No permission to run command {exe!r}')
|
7540
7360
|
|
7541
7361
|
|
7362
|
+
########################################
|
7363
|
+
# ../supervisor.py
|
7364
|
+
|
7365
|
+
|
7366
|
+
##
|
7367
|
+
|
7368
|
+
|
7369
|
+
def timeslice(period: int, when: float) -> int:
|
7370
|
+
return int(when - (when % period))
|
7371
|
+
|
7372
|
+
|
7373
|
+
##
|
7374
|
+
|
7375
|
+
|
7376
|
+
class SupervisorStateManagerImpl(SupervisorStateManager):
|
7377
|
+
def __init__(self) -> None:
|
7378
|
+
super().__init__()
|
7379
|
+
|
7380
|
+
self._state: SupervisorState = SupervisorState.RUNNING
|
7381
|
+
|
7382
|
+
@property
|
7383
|
+
def state(self) -> SupervisorState:
|
7384
|
+
return self._state
|
7385
|
+
|
7386
|
+
def set_state(self, state: SupervisorState) -> None:
|
7387
|
+
self._state = state
|
7388
|
+
|
7389
|
+
|
7390
|
+
##
|
7391
|
+
|
7392
|
+
|
7393
|
+
class ProcessGroupFactory(Func1[ProcessGroupConfig, ProcessGroup]):
|
7394
|
+
pass
|
7395
|
+
|
7396
|
+
|
7397
|
+
class Supervisor:
|
7398
|
+
def __init__(
|
7399
|
+
self,
|
7400
|
+
*,
|
7401
|
+
config: ServerConfig,
|
7402
|
+
poller: Poller,
|
7403
|
+
process_groups: ProcessGroupManager,
|
7404
|
+
signal_handler: SignalHandler,
|
7405
|
+
event_callbacks: EventCallbacks,
|
7406
|
+
process_group_factory: ProcessGroupFactory,
|
7407
|
+
pid_history: PidHistory,
|
7408
|
+
setup: SupervisorSetup,
|
7409
|
+
states: SupervisorStateManager,
|
7410
|
+
io: IoManager,
|
7411
|
+
) -> None:
|
7412
|
+
super().__init__()
|
7413
|
+
|
7414
|
+
self._config = config
|
7415
|
+
self._poller = poller
|
7416
|
+
self._process_groups = process_groups
|
7417
|
+
self._signal_handler = signal_handler
|
7418
|
+
self._event_callbacks = event_callbacks
|
7419
|
+
self._process_group_factory = process_group_factory
|
7420
|
+
self._pid_history = pid_history
|
7421
|
+
self._setup = setup
|
7422
|
+
self._states = states
|
7423
|
+
self._io = io
|
7424
|
+
|
7425
|
+
self._ticks: ta.Dict[int, float] = {}
|
7426
|
+
self._stop_groups: ta.Optional[ta.List[ProcessGroup]] = None # list used for priority ordered shutdown
|
7427
|
+
self._stopping = False # set after we detect that we are handling a stop request
|
7428
|
+
self._last_shutdown_report = 0. # throttle for delayed process error reports at stop
|
7429
|
+
|
7430
|
+
#
|
7431
|
+
|
7432
|
+
@property
|
7433
|
+
def state(self) -> SupervisorState:
|
7434
|
+
return self._states.state
|
7435
|
+
|
7436
|
+
#
|
7437
|
+
|
7438
|
+
def add_process_group(self, config: ProcessGroupConfig) -> bool:
|
7439
|
+
if self._process_groups.get(config.name) is not None:
|
7440
|
+
return False
|
7441
|
+
|
7442
|
+
group = check_isinstance(self._process_group_factory(config), ProcessGroup)
|
7443
|
+
for process in group:
|
7444
|
+
process.after_setuid()
|
7445
|
+
|
7446
|
+
self._process_groups.add(group)
|
7447
|
+
|
7448
|
+
return True
|
7449
|
+
|
7450
|
+
def remove_process_group(self, name: str) -> bool:
|
7451
|
+
if self._process_groups[name].get_unstopped_processes():
|
7452
|
+
return False
|
7453
|
+
|
7454
|
+
self._process_groups.remove(name)
|
7455
|
+
|
7456
|
+
return True
|
7457
|
+
|
7458
|
+
#
|
7459
|
+
|
7460
|
+
def shutdown_report(self) -> ta.List[Process]:
|
7461
|
+
unstopped: ta.List[Process] = []
|
7462
|
+
|
7463
|
+
for group in self._process_groups:
|
7464
|
+
unstopped.extend(group.get_unstopped_processes())
|
7465
|
+
|
7466
|
+
if unstopped:
|
7467
|
+
# throttle 'waiting for x to die' reports
|
7468
|
+
now = time.time()
|
7469
|
+
if now > (self._last_shutdown_report + 3): # every 3 secs
|
7470
|
+
names = [p.config.name for p in unstopped]
|
7471
|
+
namestr = ', '.join(names)
|
7472
|
+
log.info('waiting for %s to die', namestr)
|
7473
|
+
self._last_shutdown_report = now
|
7474
|
+
for proc in unstopped:
|
7475
|
+
log.debug('%s state: %s', proc.config.name, proc.state.name)
|
7476
|
+
|
7477
|
+
return unstopped
|
7478
|
+
|
7479
|
+
#
|
7480
|
+
|
7481
|
+
def main(self, **kwargs: ta.Any) -> None:
|
7482
|
+
self._setup.setup()
|
7483
|
+
try:
|
7484
|
+
self.run(**kwargs)
|
7485
|
+
finally:
|
7486
|
+
self._setup.cleanup()
|
7487
|
+
|
7488
|
+
def run(
|
7489
|
+
self,
|
7490
|
+
*,
|
7491
|
+
callback: ta.Optional[ta.Callable[['Supervisor'], bool]] = None,
|
7492
|
+
) -> None:
|
7493
|
+
self._process_groups.clear()
|
7494
|
+
self._stop_groups = None # clear
|
7495
|
+
|
7496
|
+
self._event_callbacks.clear()
|
7497
|
+
|
7498
|
+
try:
|
7499
|
+
for config in self._config.groups or []:
|
7500
|
+
self.add_process_group(config)
|
7501
|
+
|
7502
|
+
self._signal_handler.set_signals()
|
7503
|
+
|
7504
|
+
self._event_callbacks.notify(SupervisorRunningEvent())
|
7505
|
+
|
7506
|
+
while True:
|
7507
|
+
if callback is not None and not callback(self):
|
7508
|
+
break
|
7509
|
+
|
7510
|
+
self._run_once()
|
7511
|
+
|
7512
|
+
finally:
|
7513
|
+
self._poller.close()
|
7514
|
+
|
7515
|
+
#
|
7516
|
+
|
7517
|
+
def _run_once(self) -> None:
|
7518
|
+
self._poll()
|
7519
|
+
self._reap()
|
7520
|
+
self._signal_handler.handle_signals()
|
7521
|
+
self._tick()
|
7522
|
+
|
7523
|
+
if self._states.state < SupervisorState.RUNNING:
|
7524
|
+
self._ordered_stop_groups_phase_2()
|
7525
|
+
|
7526
|
+
def _ordered_stop_groups_phase_1(self) -> None:
|
7527
|
+
if self._stop_groups:
|
7528
|
+
# stop the last group (the one with the "highest" priority)
|
7529
|
+
self._stop_groups[-1].stop_all()
|
7530
|
+
|
7531
|
+
def _ordered_stop_groups_phase_2(self) -> None:
|
7532
|
+
# after phase 1 we've transitioned and reaped, let's see if we can remove the group we stopped from the
|
7533
|
+
# stop_groups queue.
|
7534
|
+
if self._stop_groups:
|
7535
|
+
# pop the last group (the one with the "highest" priority)
|
7536
|
+
group = self._stop_groups.pop()
|
7537
|
+
if group.get_unstopped_processes():
|
7538
|
+
# if any processes in the group aren't yet in a stopped state, we're not yet done shutting this group
|
7539
|
+
# down, so push it back on to the end of the stop group queue
|
7540
|
+
self._stop_groups.append(group)
|
7541
|
+
|
7542
|
+
def _poll(self) -> None:
|
7543
|
+
sorted_groups = list(self._process_groups)
|
7544
|
+
sorted_groups.sort()
|
7545
|
+
|
7546
|
+
if self._states.state < SupervisorState.RUNNING:
|
7547
|
+
if not self._stopping:
|
7548
|
+
# first time, set the stopping flag, do a notification and set stop_groups
|
7549
|
+
self._stopping = True
|
7550
|
+
self._stop_groups = sorted_groups[:]
|
7551
|
+
self._event_callbacks.notify(SupervisorStoppingEvent())
|
7552
|
+
|
7553
|
+
self._ordered_stop_groups_phase_1()
|
7554
|
+
|
7555
|
+
if not self.shutdown_report():
|
7556
|
+
# if there are no unstopped processes (we're done killing everything), it's OK to shutdown or reload
|
7557
|
+
raise ExitNow
|
7558
|
+
|
7559
|
+
self._io.poll()
|
7560
|
+
|
7561
|
+
for group in sorted_groups:
|
7562
|
+
for process in group:
|
7563
|
+
process.transition()
|
7564
|
+
|
7565
|
+
def _reap(self, *, once: bool = False, depth: int = 0) -> None:
|
7566
|
+
if depth >= 100:
|
7567
|
+
return
|
7568
|
+
|
7569
|
+
wp = waitpid()
|
7570
|
+
if wp is None or not wp.pid:
|
7571
|
+
return
|
7572
|
+
|
7573
|
+
process = self._pid_history.get(wp.pid, None)
|
7574
|
+
if process is None:
|
7575
|
+
_, msg = decode_wait_status(wp.sts)
|
7576
|
+
log.info('reaped unknown pid %s (%s)', wp.pid, msg)
|
7577
|
+
else:
|
7578
|
+
process.finish(wp.sts)
|
7579
|
+
del self._pid_history[wp.pid]
|
7580
|
+
|
7581
|
+
if not once:
|
7582
|
+
# keep reaping until no more kids to reap, but don't recurse infinitely
|
7583
|
+
self._reap(once=False, depth=depth + 1)
|
7584
|
+
|
7585
|
+
def _tick(self, now: ta.Optional[float] = None) -> None:
|
7586
|
+
"""Send one or more 'tick' events when the timeslice related to the period for the event type rolls over"""
|
7587
|
+
|
7588
|
+
if now is None:
|
7589
|
+
# now won't be None in unit tests
|
7590
|
+
now = time.time()
|
7591
|
+
|
7592
|
+
for event in TICK_EVENTS:
|
7593
|
+
period = event.period
|
7594
|
+
|
7595
|
+
last_tick = self._ticks.get(period)
|
7596
|
+
if last_tick is None:
|
7597
|
+
# we just started up
|
7598
|
+
last_tick = self._ticks[period] = timeslice(period, now)
|
7599
|
+
|
7600
|
+
this_tick = timeslice(period, now)
|
7601
|
+
if this_tick != last_tick:
|
7602
|
+
self._ticks[period] = this_tick
|
7603
|
+
self._event_callbacks.notify(event(this_tick, self))
|
7604
|
+
|
7605
|
+
|
7606
|
+
##
|
7607
|
+
|
7608
|
+
|
7609
|
+
class WaitedPid(ta.NamedTuple):
|
7610
|
+
pid: Pid
|
7611
|
+
sts: Rc
|
7612
|
+
|
7613
|
+
|
7614
|
+
def waitpid() -> ta.Optional[WaitedPid]:
|
7615
|
+
# Need pthread_sigmask here to avoid concurrent sigchld, but Python doesn't offer in Python < 3.4. There is
|
7616
|
+
# still a race condition here; we can get a sigchld while we're sitting in the waitpid call. However, AFAICT, if
|
7617
|
+
# waitpid is interrupted by SIGCHLD, as long as we call waitpid again (which happens every so often during the
|
7618
|
+
# normal course in the mainloop), we'll eventually reap the child that we tried to reap during the interrupted
|
7619
|
+
# call. At least on Linux, this appears to be true, or at least stopping 50 processes at once never left zombies
|
7620
|
+
# lying around.
|
7621
|
+
try:
|
7622
|
+
pid, sts = os.waitpid(-1, os.WNOHANG)
|
7623
|
+
except OSError as exc:
|
7624
|
+
code = exc.args[0]
|
7625
|
+
if code not in (errno.ECHILD, errno.EINTR):
|
7626
|
+
log.critical('waitpid error %r; a process may not be cleaned up properly', code)
|
7627
|
+
if code == errno.EINTR:
|
7628
|
+
log.debug('EINTR during reap')
|
7629
|
+
return None
|
7630
|
+
else:
|
7631
|
+
return WaitedPid(pid, sts) # type: ignore
|
7632
|
+
|
7633
|
+
|
7542
7634
|
########################################
|
7543
7635
|
# ../inject.py
|
7544
7636
|
|
@@ -7559,17 +7651,21 @@ def bind_server(
|
|
7559
7651
|
|
7560
7652
|
inj.bind(DaemonizeListener, array=True, to_key=Poller),
|
7561
7653
|
|
7562
|
-
inj.bind(ServerContextImpl, singleton=True),
|
7563
|
-
inj.bind(ServerContext, to_key=ServerContextImpl),
|
7564
|
-
|
7565
7654
|
inj.bind(EventCallbacks, singleton=True),
|
7566
7655
|
|
7567
7656
|
inj.bind(SignalReceiver, singleton=True),
|
7568
7657
|
|
7658
|
+
inj.bind(IoManager, singleton=True),
|
7659
|
+
|
7569
7660
|
inj.bind(SignalHandler, singleton=True),
|
7661
|
+
|
7570
7662
|
inj.bind(ProcessGroupManager, singleton=True),
|
7663
|
+
|
7571
7664
|
inj.bind(Supervisor, singleton=True),
|
7572
7665
|
|
7666
|
+
inj.bind(SupervisorStateManagerImpl, singleton=True),
|
7667
|
+
inj.bind(SupervisorStateManager, to_key=SupervisorStateManagerImpl),
|
7668
|
+
|
7573
7669
|
inj.bind(PidHistory()),
|
7574
7670
|
|
7575
7671
|
inj.bind_factory(ProcessGroupImpl, ProcessGroupFactory),
|
@@ -7577,8 +7673,8 @@ def bind_server(
|
|
7577
7673
|
|
7578
7674
|
inj.bind_factory(ProcessSpawningImpl, ProcessSpawningFactory),
|
7579
7675
|
|
7580
|
-
inj.bind_factory(
|
7581
|
-
inj.bind_factory(
|
7676
|
+
inj.bind_factory(ProcessOutputDispatcherImpl, ProcessOutputDispatcherFactory),
|
7677
|
+
inj.bind_factory(ProcessInputDispatcherImpl, ProcessInputDispatcherFactory),
|
7582
7678
|
]
|
7583
7679
|
|
7584
7680
|
#
|
@@ -7635,7 +7731,7 @@ def main(
|
|
7635
7731
|
if not no_logging:
|
7636
7732
|
configure_standard_logging(
|
7637
7733
|
'INFO',
|
7638
|
-
handler_factory=journald_log_handler_factory if not args.no_journald else None,
|
7734
|
+
handler_factory=journald_log_handler_factory if not (args.no_journald or is_debugger_attached()) else None,
|
7639
7735
|
)
|
7640
7736
|
|
7641
7737
|
#
|
@@ -7658,7 +7754,6 @@ def main(
|
|
7658
7754
|
inherited_fds=inherited_fds,
|
7659
7755
|
))
|
7660
7756
|
|
7661
|
-
context = injector[ServerContextImpl]
|
7662
7757
|
supervisor = injector[Supervisor]
|
7663
7758
|
|
7664
7759
|
try:
|
@@ -7666,7 +7761,7 @@ def main(
|
|
7666
7761
|
except ExitNow:
|
7667
7762
|
pass
|
7668
7763
|
|
7669
|
-
if
|
7764
|
+
if supervisor.state < SupervisorState.RESTARTING:
|
7670
7765
|
break
|
7671
7766
|
|
7672
7767
|
|